gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import numbers
import re
import semantic_version
import six
import glance.common.exception as exc
from glance.common.glare import declarative
from glance.i18n import _
class Text(declarative.PropertyDefinition):
"""A text metadata property of arbitrary length
Maps to TEXT columns in database, does not support sorting or filtering
"""
ALLOWED_TYPES = (six.string_types,)
DB_TYPE = 'text'
# noinspection PyAttributeOutsideInit
class String(Text):
"""A string metadata property of limited length
Maps to VARCHAR columns in database, supports filtering and sorting.
May have constrains on length and regexp patterns.
The maximum length is limited to 255 characters
"""
DB_TYPE = 'string'
def __init__(self, max_length=255, min_length=0, pattern=None, **kwargs):
"""Defines a String metadata property.
:param max_length: maximum value length
:param min_length: minimum value length
:param pattern: regexp pattern to match
"""
super(String, self).__init__(**kwargs)
self.max_length(max_length)
self.min_length(min_length)
if pattern:
self.pattern(pattern)
# if default and/or allowed_values are specified (in base classes)
# then we need to validate them against the newly added validators
self._check_definition()
def max_length(self, value):
"""Sets the maximum value length"""
self._max_length = value
if value is not None:
if value > 255:
raise exc.InvalidArtifactTypePropertyDefinition(
_('Max string length may not exceed 255 characters'))
self._add_validator('max_length',
lambda v: len(v) <= self._max_length,
_('Length is greater than maximum'))
else:
self._remove_validator('max_length')
self._check_definition()
def min_length(self, value):
"""Sets the minimum value length"""
self._min_length = value
if value is not None:
if value < 0:
raise exc.InvalidArtifactTypePropertyDefinition(
_('Min string length may not be negative'))
self._add_validator('min_length',
lambda v: len(v) >= self._min_length,
_('Length is less than minimum'))
else:
self._remove_validator('min_length')
self._check_definition()
def pattern(self, value):
"""Sets the regexp pattern to match"""
self._pattern = value
if value is not None:
self._add_validator('pattern',
lambda v: re.match(self._pattern,
v) is not None,
_('Does not match pattern'))
else:
self._remove_validator('pattern')
self._check_definition()
class SemVerString(String):
"""A String metadata property matching semver pattern"""
def __init__(self, **kwargs):
def validate(value):
try:
semantic_version.Version(value, partial=True)
except ValueError:
return False
return True
super(SemVerString,
self).__init__(validators=[(validate,
"Invalid semver string")],
**kwargs)
# noinspection PyAttributeOutsideInit
class Integer(declarative.PropertyDefinition):
"""An Integer metadata property
Maps to INT columns in Database, supports filtering and sorting.
May have constraints on value
"""
ALLOWED_TYPES = (six.integer_types,)
DB_TYPE = 'int'
def __init__(self, min_value=None, max_value=None, **kwargs):
"""Defines an Integer metadata property
:param min_value: minimum allowed value
:param max_value: maximum allowed value
"""
super(Integer, self).__init__(**kwargs)
if min_value is not None:
self.min_value(min_value)
if max_value is not None:
self.max_value(max_value)
# if default and/or allowed_values are specified (in base classes)
# then we need to validate them against the newly added validators
self._check_definition()
def min_value(self, value):
"""Sets the minimum allowed value"""
self._min_value = value
if value is not None:
self._add_validator('min_value',
lambda v: v >= self._min_value,
_('Value is less than minimum'))
else:
self._remove_validator('min_value')
self._check_definition()
def max_value(self, value):
"""Sets the maximum allowed value"""
self._max_value = value
if value is not None:
self._add_validator('max_value',
lambda v: v <= self._max_value,
_('Value is greater than maximum'))
else:
self._remove_validator('max_value')
self._check_definition()
# noinspection PyAttributeOutsideInit
class DateTime(declarative.PropertyDefinition):
"""A DateTime metadata property
Maps to a DATETIME columns in database.
Is not supported as Type Specific property, may be used only as Generic one
May have constraints on value
"""
ALLOWED_TYPES = (datetime.datetime,)
DB_TYPE = 'datetime'
def __init__(self, min_value=None, max_value=None, **kwargs):
"""Defines a DateTime metadata property
:param min_value: minimum allowed value
:param max_value: maximum allowed value
"""
super(DateTime, self).__init__(**kwargs)
if min_value is not None:
self.min_value(min_value)
if max_value is not None:
self.max_value(max_value)
# if default and/or allowed_values are specified (in base classes)
# then we need to validate them against the newly added validators
self._check_definition()
def min_value(self, value):
"""Sets the minimum allowed value"""
self._min_value = value
if value is not None:
self._add_validator('min_value',
lambda v: v >= self._min_value,
_('Value is less than minimum'))
else:
self._remove_validator('min_value')
self._check_definition()
def max_value(self, value):
"""Sets the maximum allowed value"""
self._max_value = value
if value is not None:
self._add_validator('max_value',
lambda v: v <= self._max_value,
_('Value is greater than maximum'))
else:
self._remove_validator('max_value')
self._check_definition()
# noinspection PyAttributeOutsideInit
class Numeric(declarative.PropertyDefinition):
"""A Numeric metadata property
Maps to floating point number columns in Database, supports filtering and
sorting. May have constraints on value
"""
ALLOWED_TYPES = numbers.Number
DB_TYPE = 'numeric'
def __init__(self, min_value=None, max_value=None, **kwargs):
"""Defines a Numeric metadata property
:param min_value: minimum allowed value
:param max_value: maximum allowed value
"""
super(Numeric, self).__init__(**kwargs)
if min_value is not None:
self.min_value(min_value)
if max_value is not None:
self.max_value(max_value)
# if default and/or allowed_values are specified (in base classes)
# then we need to validate them against the newly added validators
self._check_definition()
def min_value(self, value):
"""Sets the minimum allowed value"""
self._min_value = value
if value is not None:
self._add_validator('min_value',
lambda v: v >= self._min_value,
_('Value is less than minimum'))
else:
self._remove_validator('min_value')
self._check_definition()
def max_value(self, value):
"""Sets the maximum allowed value"""
self._max_value = value
if value is not None:
self._add_validator('max_value',
lambda v: v <= self._max_value,
_('Value is greater than maximum'))
else:
self._remove_validator('max_value')
self._check_definition()
class Boolean(declarative.PropertyDefinition):
"""A Boolean metadata property
Maps to Boolean columns in database. Supports filtering and sorting.
"""
ALLOWED_TYPES = (bool,)
DB_TYPE = 'bool'
class Array(declarative.ListAttributeDefinition,
declarative.PropertyDefinition, list):
"""An array metadata property
May contain elements of any other PropertyDefinition types except Dict and
Array. Each elements maps to appropriate type of columns in database.
Preserves order. Allows filtering based on "Array contains Value" semantics
May specify constrains on types of elements, their amount and uniqueness.
"""
ALLOWED_ITEM_TYPES = (declarative.PropertyDefinition,)
def __init__(self, item_type=String(), min_size=0, max_size=None,
unique=False, extra_items=True, **kwargs):
"""Defines an Array metadata property
:param item_type: defines the types of elements in Array. If set to an
instance of PropertyDefinition then all the elements have to be of that
type. If set to list of such instances, then the elements on the
corresponding positions have to be of the appropriate type.
:param min_size: minimum size of the Array
:param max_size: maximum size of the Array
:param unique: if set to true, all the elements in the Array have to be
unique
"""
if isinstance(item_type, Array):
msg = _("Array property can't have item_type=Array")
raise exc.InvalidArtifactTypePropertyDefinition(msg)
declarative.ListAttributeDefinition.__init__(self,
item_type=item_type,
min_size=min_size,
max_size=max_size,
unique=unique)
declarative.PropertyDefinition.__init__(self, **kwargs)
class Dict(declarative.DictAttributeDefinition,
declarative.PropertyDefinition, dict):
"""A dictionary metadata property
May contain elements of any other PropertyDefinition types except Dict.
Each elements maps to appropriate type of columns in database. Allows
filtering and sorting by values of each key except the ones mapping the
Text fields.
May specify constrains on types of elements and their amount.
"""
ALLOWED_PROPERTY_TYPES = (declarative.PropertyDefinition,)
def __init__(self, properties=String(), min_properties=0,
max_properties=None, **kwargs):
"""Defines a dictionary metadata property
:param properties: defines the types of dictionary values. If set to an
instance of PropertyDefinition then all the value have to be of that
type. If set to a dictionary with string keys and values of
PropertyDefinition type, then the elements mapped by the corresponding
have have to be of the appropriate type.
:param min_properties: minimum allowed amount of properties in the dict
:param max_properties: maximum allowed amount of properties in the dict
"""
declarative.DictAttributeDefinition.__init__(
self,
properties=properties,
min_properties=min_properties,
max_properties=max_properties)
declarative.PropertyDefinition.__init__(self, **kwargs)
class ArtifactType(declarative.get_declarative_base()): # noqa
"""A base class for all the Artifact Type definitions
Defines the Generic metadata properties as attributes.
"""
id = String(required=True, readonly=True)
type_name = String(required=True, readonly=True)
type_version = SemVerString(required=True, readonly=True)
name = String(required=True, mutable=False)
version = SemVerString(required=True, mutable=False)
description = Text()
tags = Array(unique=True, default=[])
visibility = String(required=True,
allowed_values=["private", "public", "shared",
"community"],
default="private")
state = String(required=True, readonly=True, allowed_values=["creating",
"active",
"deactivated",
"deleted"])
owner = String(required=True, readonly=True)
created_at = DateTime(required=True, readonly=True)
updated_at = DateTime(required=True, readonly=True)
published_at = DateTime(readonly=True)
deleted_at = DateTime(readonly=True)
def __init__(self, **kwargs):
if "type_name" in kwargs:
raise exc.InvalidArtifactPropertyValue(
_("Unable to specify artifact type explicitly"))
if "type_version" in kwargs:
raise exc.InvalidArtifactPropertyValue(
_("Unable to specify artifact type version explicitly"))
super(ArtifactType,
self).__init__(type_name=self.metadata.type_name,
type_version=self.metadata.type_version, **kwargs)
def __eq__(self, other):
if not isinstance(other, ArtifactType):
return False
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
def __is_mutable__(self):
return self.state == "creating"
class ArtifactReference(declarative.RelationDefinition):
"""An artifact reference definition
Allows to define constraints by the name and version of target artifact
"""
ALLOWED_TYPES = ArtifactType
def __init__(self, type_name=None, type_version=None, **kwargs):
"""Defines an artifact reference
:param type_name: type name of the target artifact
:param type_version: type version of the target artifact
"""
super(ArtifactReference, self).__init__(**kwargs)
if type_name is not None:
if isinstance(type_name, list):
type_names = list(type_name)
if type_version is not None:
raise exc.InvalidArtifactTypePropertyDefinition(
_('Unable to specify version '
'if multiple types are possible'))
else:
type_names = [type_name]
def validate_reference(artifact):
if artifact.type_name not in type_names:
return False
if (type_version is not None and
artifact.type_version != type_version):
return False
return True
self._add_validator('referenced_type',
validate_reference,
_("Invalid referenced type"))
elif type_version is not None:
raise exc.InvalidArtifactTypePropertyDefinition(
_('Unable to specify version '
'if type is not specified'))
self._check_definition()
class ArtifactReferenceList(declarative.ListAttributeDefinition,
declarative.RelationDefinition, list):
"""A list of Artifact References
Allows to define a collection of references to other artifacts, each
optionally constrained by type name and type version
"""
ALLOWED_ITEM_TYPES = (ArtifactReference,)
def __init__(self, references=ArtifactReference(), min_size=0,
max_size=None, **kwargs):
if isinstance(references, list):
raise exc.InvalidArtifactTypePropertyDefinition(
_("Invalid reference list specification"))
declarative.RelationDefinition.__init__(self, **kwargs)
declarative.ListAttributeDefinition.__init__(self,
item_type=references,
min_size=min_size,
max_size=max_size,
unique=True,
default=[]
if min_size == 0 else
None)
class Blob(object):
"""A Binary object being part of the Artifact"""
def __init__(self, size=0, locations=None, checksum=None, item_key=None):
"""Initializes a new Binary Object for an Artifact
:param size: the size of Binary Data
:param locations: a list of data locations in backing stores
:param checksum: a checksum for the data
"""
if locations is None:
locations = []
self.size = size
self.checksum = checksum
self.locations = locations
self.item_key = item_key
def to_dict(self):
return {
"size": self.size,
"checksum": self.checksum,
}
class BinaryObject(declarative.BlobDefinition, Blob):
"""A definition of BinaryObject binding
Adds a BinaryObject to an Artifact Type, optionally constrained by file
size and amount of locations
"""
ALLOWED_TYPES = (Blob,)
def __init__(self,
max_file_size=None,
min_file_size=None,
min_locations=None,
max_locations=None,
**kwargs):
"""Defines a binary object as part of Artifact Type
:param max_file_size: maximum size of the associate Blob
:param min_file_size: minimum size of the associated Blob
:param min_locations: minimum number of locations in the associated
Blob
:param max_locations: maximum number of locations in the associated
Blob
"""
mutable = kwargs.pop('mutable', False)
if mutable:
raise exc.InvalidArtifactTypePropertyDefinition(
_("BinaryObject property cannot be declared mutable"))
super(BinaryObject, self).__init__(default=None, readonly=False,
mutable=mutable, **kwargs)
self._max_file_size = max_file_size
self._min_file_size = min_file_size
self._min_locations = min_locations
self._max_locations = max_locations
self._add_validator('size_not_empty',
lambda v: v.size is not None,
_('Blob size is not set'))
if max_file_size:
self._add_validator('max_size',
lambda v: v.size <= self._max_file_size,
_("File too large"))
if min_file_size:
self._add_validator('min_size',
lambda v: v.size >= self._min_file_size,
_("File too small"))
if min_locations:
self._add_validator('min_locations',
lambda v: len(
v.locations) >= self._min_locations,
_("Too few locations"))
if max_locations:
self._add_validator(
'max_locations',
lambda v: len(v.locations) <= self._max_locations,
_("Too many locations"))
class BinaryObjectList(declarative.ListAttributeDefinition,
declarative.BlobDefinition, list):
"""A definition of binding to the list of BinaryObject
Adds a list of BinaryObject's to an artifact type, optionally constrained
by the number of objects in the list and their uniqueness
"""
ALLOWED_ITEM_TYPES = (BinaryObject,)
def __init__(self, objects=BinaryObject(), min_count=0, max_count=None,
**kwargs):
declarative.BlobDefinition.__init__(self, **kwargs)
declarative.ListAttributeDefinition.__init__(self,
item_type=objects,
min_size=min_count,
max_size=max_count,
unique=True)
self.default = [] if min_count == 0 else None
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from exam import before
from sentry.models import GroupSeen, Group
from sentry.constants import MAX_JSON_RESULTS
from sentry.testutils import TestCase, fixture
class GroupDetailsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_simple(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert resp.context['group'] == self.group
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
# ensure we've marked the group as seen
assert GroupSeen.objects.filter(
group=self.group, user=self.user).exists()
class GroupListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-stream', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
})
@before
def create_a_couple_events(self):
later = timezone.now()
now = later - timedelta(hours=1)
past = now - timedelta(hours=1)
self.group1 = Group.objects.create(
project=self.project,
checksum='a' * 32,
last_seen=now,
first_seen=now,
times_seen=5,
)
self.group2 = Group.objects.create(
project=self.project,
checksum='b' * 32,
last_seen=later,
first_seen=past,
times_seen=50,
)
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
def test_date_sort(self):
self.login()
resp = self.client.get(self.path + '?sort=date')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
assert list(resp.context['event_list']) == [self.group2, self.group1]
def test_new_sort(self):
self.login()
resp = self.client.get(self.path + '?sort=new')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
print self.group1.score, self.group2.score
assert list(resp.context['event_list']) == [self.group1, self.group2]
def test_freq_sort(self):
self.login()
resp = self.client.get(self.path + '?sort=freq')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
assert list(resp.context['event_list']) == [self.group2, self.group1]
class GroupEventListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-events', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
event = self.create_event(
event_id='a' * 32, datetime=timezone.now() - timedelta(minutes=1))
event2 = self.create_event(
event_id='b' * 32, datetime=timezone.now())
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/event_list.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
event_list = resp.context['event_list']
assert len(event_list) == 2
assert event_list[0] == event2
assert event_list[1] == event
class GroupTagListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-tags', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/tag_list.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'tag_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
class GroupEventDetailsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-event', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id': self.event.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
assert resp.context['event'] == self.event
class GroupEventListJsonTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-events-json', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
event = self.create_event(
event_id='a' * 32, datetime=timezone.now() - timedelta(minutes=1))
event2 = self.create_event(
event_id='b' * 32, datetime=timezone.now())
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
data = json.loads(resp.content)
assert len(data) == 2
assert data[0]['id'] == str(event2.event_id)
assert data[1]['id'] == str(event.event_id)
def test_does_not_allow_beyond_limit(self):
self.login()
resp = self.client.get(self.path, {'limit': MAX_JSON_RESULTS + 1})
assert resp.status_code == 400
class GroupEventJsonTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-event-json', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id_or_latest': self.event.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
data = json.loads(resp.content)
assert data['id'] == self.event.event_id
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
distributions = tf.contrib.distributions
def softplus(x):
return np.log(1 + np.exp(x))
class OperatorPDCholeskyTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_cholesky_array(self, shape):
mat = self._rng.rand(*shape)
chol = distribution_util.matrix_diag_transform(
mat, transform=tf.nn.softplus)
# Zero the upper triangle because we're using this as a true Cholesky factor
# in our tests.
return tf.matrix_band_part(chol, -1, 0).eval()
def test_log_det(self):
with self.test_session():
batch_shape = ()
for k in [1, 4]:
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
log_det = operator.log_det()
expected_log_det = np.log(np.prod(np.diag(chol))**2)
self.assertEqual(batch_shape, log_det.get_shape())
self.assertAllClose(expected_log_det, log_det.eval())
def test_log_det_batch_matrix(self):
with self.test_session():
batch_shape = (2, 3)
for k in [1, 4]:
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
log_det = operator.log_det()
self.assertEqual(batch_shape, log_det.get_shape())
# Test the log-determinant of the [1, 1] matrix.
chol_11 = chol[1, 1, :, :]
expected_log_det = np.log(np.prod(np.diag(chol_11))**2)
self.assertAllClose(expected_log_det, log_det.eval()[1, 1])
def test_sqrt_matmul_single_matrix(self):
with self.test_session():
batch_shape = ()
for k in [1, 4]:
x_shape = batch_shape + (k, 3)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
sqrt_operator_times_x = operator.sqrt_matmul(x)
expected = tf.batch_matmul(chol, x)
self.assertEqual(expected.get_shape(),
sqrt_operator_times_x.get_shape())
self.assertAllClose(expected.eval(), sqrt_operator_times_x.eval())
def test_sqrt_matmul_batch_matrix(self):
with self.test_session():
batch_shape = (2, 3)
for k in [1, 4]:
x_shape = batch_shape + (k, 5)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
sqrt_operator_times_x = operator.sqrt_matmul(x)
expected = tf.batch_matmul(chol, x)
self.assertEqual(expected.get_shape(),
sqrt_operator_times_x.get_shape())
self.assertAllClose(expected.eval(), sqrt_operator_times_x.eval())
def test_sqrt_matmul_batch_matrix_with_transpose(self):
with self.test_session():
batch_shape = (2, 3)
for k in [1, 4]:
x_shape = batch_shape + (5, k)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
sqrt_operator_times_x = operator.sqrt_matmul(x, transpose_x=True)
# tf.batch_matmul is defined x * y, so "y" is on the right, not "x".
expected = tf.batch_matmul(chol, x, adj_y=True)
self.assertEqual(expected.get_shape(),
sqrt_operator_times_x.get_shape())
self.assertAllClose(expected.eval(), sqrt_operator_times_x.eval())
def test_matmul_single_matrix(self):
with self.test_session():
batch_shape = ()
for k in [1, 4]:
x_shape = batch_shape + (k, 5)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
matrix = tf.batch_matmul(chol, chol, adj_y=True)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
expected = tf.batch_matmul(matrix, x)
self.assertEqual(expected.get_shape(), operator.matmul(x).get_shape())
self.assertAllClose(expected.eval(), operator.matmul(x).eval())
def test_matmul_batch_matrix(self):
with self.test_session():
batch_shape = (2, 3)
for k in [1, 4]:
x_shape = batch_shape + (k, 5)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
matrix = tf.batch_matmul(chol, chol, adj_y=True)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
expected = tf.batch_matmul(matrix, x)
self.assertEqual(expected.get_shape(), operator.matmul(x).get_shape())
self.assertAllClose(expected.eval(), operator.matmul(x).eval())
def test_matmul_batch_matrix_with_transpose(self):
with self.test_session():
batch_shape = (2, 3)
for k in [1, 4]:
x_shape = batch_shape + (5, k)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
matrix = tf.batch_matmul(chol, chol, adj_y=True)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
operator_times_x = operator.matmul(x, transpose_x=True)
# tf.batch_matmul is defined x * y, so "y" is on the right, not "x".
expected = tf.batch_matmul(matrix, x, adj_y=True)
self.assertEqual(expected.get_shape(), operator_times_x.get_shape())
self.assertAllClose(expected.eval(), operator_times_x.eval())
def test_shape(self):
# All other shapes are defined by the abstractmethod shape, so we only need
# to test this.
with self.test_session():
for shape in [(3, 3), (2, 3, 3), (1, 2, 3, 3)]:
chol = self._random_cholesky_array(shape)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
self.assertAllEqual(shape, operator.shape().eval())
def test_to_dense(self):
with self.test_session():
chol = self._random_cholesky_array((3, 3))
chol_2 = chol.copy()
chol_2[0, 2] = 1000 # Make sure upper triangular part makes no diff.
operator = operator_pd_cholesky.OperatorPDCholesky(chol_2)
self.assertAllClose(chol.dot(chol.T), operator.to_dense().eval())
def test_sqrt_to_dense(self):
with self.test_session():
chol = self._random_cholesky_array((2, 3, 3))
chol_2 = chol.copy()
chol_2[0, 0, 2] = 1000 # Make sure upper triangular part makes no diff.
operator = operator_pd_cholesky.OperatorPDCholesky(chol_2)
self.assertAllClose(chol, operator.sqrt_to_dense().eval())
def test_non_positive_definite_matrix_raises(self):
# Singlular matrix with one positive eigenvalue and one zero eigenvalue.
with self.test_session():
lower_mat = [[1.0, 0.0], [2.0, 0.0]]
operator = operator_pd_cholesky.OperatorPDCholesky(lower_mat)
with self.assertRaisesOpError("x > 0 did not hold"):
operator.to_dense().eval()
def test_non_positive_definite_matrix_does_not_raise_if_not_verify_pd(self):
# Singlular matrix with one positive eigenvalue and one zero eigenvalue.
with self.test_session():
lower_mat = [[1.0, 0.0], [2.0, 0.0]]
operator = operator_pd_cholesky.OperatorPDCholesky(
lower_mat, verify_pd=False)
operator.to_dense().eval() # Should not raise.
def test_not_having_two_identical_last_dims_raises(self):
# Unless the last two dims are equal, this cannot represent a matrix, and it
# should raise.
with self.test_session():
batch_vec = [[1.0], [2.0]] # shape 2 x 1
with self.assertRaisesRegexp(ValueError, ".*Dimensions.*"):
operator = operator_pd_cholesky.OperatorPDCholesky(batch_vec)
operator.to_dense().eval()
class MatrixDiagTransformTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(0)
def check_off_diagonal_same(self, m1, m2):
"""Check the lower triangular part, not upper or diag."""
self.assertAllClose(np.tril(m1, k=-1), np.tril(m2, k=-1))
self.assertAllClose(np.triu(m1, k=1), np.triu(m2, k=1))
def test_non_batch_matrix_with_transform(self):
mat = self._rng.rand(4, 4)
with self.test_session():
chol = distributions.matrix_diag_transform(mat, transform=tf.nn.softplus)
self.assertEqual((4, 4), chol.get_shape())
self.check_off_diagonal_same(mat, chol.eval())
self.assertAllClose(softplus(np.diag(mat)), np.diag(chol.eval()))
def test_non_batch_matrix_no_transform(self):
mat = self._rng.rand(4, 4)
with self.test_session():
# Default is no transform.
chol = distributions.matrix_diag_transform(mat)
self.assertEqual((4, 4), chol.get_shape())
self.assertAllClose(mat, chol.eval())
def test_batch_matrix_with_transform(self):
mat = self._rng.rand(2, 4, 4)
mat_0 = mat[0, :, :]
with self.test_session():
chol = distributions.matrix_diag_transform(mat, transform=tf.nn.softplus)
self.assertEqual((2, 4, 4), chol.get_shape())
chol_0 = chol.eval()[0, :, :]
self.check_off_diagonal_same(mat_0, chol_0)
self.assertAllClose(softplus(np.diag(mat_0)), np.diag(chol_0))
self.check_off_diagonal_same(mat_0, chol_0)
self.assertAllClose(softplus(np.diag(mat_0)), np.diag(chol_0))
def test_batch_matrix_no_transform(self):
mat = self._rng.rand(2, 4, 4)
with self.test_session():
# Default is no transform.
chol = distributions.matrix_diag_transform(mat)
self.assertEqual((2, 4, 4), chol.get_shape())
self.assertAllClose(mat, chol.eval())
if __name__ == "__main__":
tf.test.main()
|
|
# Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..externals.six import string_types
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = NearestNeighbors(algorithm=neighbors_method, n_neighbors=k,
metric=self.metric)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
distances_nn, neighbors_nn = knn.kneighbors(
None, n_neighbors=k)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
y : Ignored
"""
self.fit_transform(X)
return self
|
|
# utils.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
import re
import sys
import time
import stat
import shutil
import tempfile
import platform
from gitdb.util import (
make_sha,
LockedFD,
file_contents_ro,
LazyMixin,
to_hex_sha,
to_bin_sha
)
__all__ = ( "stream_copy", "join_path", "to_native_path_windows", "to_native_path_linux",
"join_path_native", "Stats", "IndexFileSHA1Writer", "Iterable", "IterableList",
"BlockingLockFile", "LockFile", 'Actor', 'get_user_id', 'assure_directory_exists',
'RemoteProgress', 'rmtree')
#{ Utility Methods
def rmtree(path):
"""Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case"""
def onerror(func, path, exc_info):
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
# END end onerror
return shutil.rmtree(path, False, onerror)
def stream_copy(source, destination, chunk_size=512*1024):
"""Copy all data from the source stream into the destination stream in chunks
of size chunk_size
:return: amount of bytes written"""
br = 0
while True:
chunk = source.read(chunk_size)
destination.write(chunk)
br += len(chunk)
if len(chunk) < chunk_size:
break
# END reading output stream
return br
def join_path(a, *p):
"""Join path tokens together similar to os.path.join, but always use
'/' instead of possibly '\' on windows."""
path = a
for b in p:
if len(b) == 0:
continue
if b.startswith('/'):
path += b[1:]
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
# END for each path token to add
return path
def to_native_path_windows(path):
return path.replace('/','\\')
def to_native_path_linux(path):
return path.replace('\\','/')
if sys.platform.startswith('win'):
to_native_path = to_native_path_windows
else:
# no need for any work on linux
def to_native_path_linux(path):
return path
to_native_path = to_native_path_linux
def join_path_native(a, *p):
"""
As join path, but makes sure an OS native path is returned. This is only
needed to play it safe on my dear windows and to assure nice paths that only
use '\'"""
return to_native_path(join_path(a, *p))
def assure_directory_exists(path, is_file=False):
"""Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed"""
if is_file:
path = os.path.dirname(path)
#END handle file
if not os.path.isdir(path):
os.makedirs(path)
return True
return False
def get_user_id():
""":return: string identifying the currently active system user as name@node
:note: user can be set with the 'USER' environment variable, usually set on windows"""
ukn = 'UNKNOWN'
username = os.environ.get('USER', os.environ.get('USERNAME', ukn))
if username == ukn and hasattr(os, 'getlogin'):
username = os.getlogin()
# END get username from login
return "%s@%s" % (username, platform.node())
#} END utilities
#{ Classes
class RemoteProgress(object):
"""
Handler providing an interface to parse progress information emitted by git-push
and git-fetch and to dispatch callbacks allowing subclasses to react to the progress.
"""
_num_op_codes = 7
BEGIN, END, COUNTING, COMPRESSING, WRITING, RECEIVING, RESOLVING = [1 << x for x in range(_num_op_codes)]
STAGE_MASK = BEGIN|END
OP_MASK = ~STAGE_MASK
__slots__ = ("_cur_line", "_seen_ops")
re_op_absolute = re.compile("(remote: )?([\w\s]+):\s+()(\d+)()(.*)")
re_op_relative = re.compile("(remote: )?([\w\s]+):\s+(\d+)% \((\d+)/(\d+)\)(.*)")
def __init__(self):
self._seen_ops = list()
def _parse_progress_line(self, line):
"""Parse progress information from the given line as retrieved by git-push
or git-fetch
:return: list(line, ...) list of lines that could not be processed"""
# handle
# Counting objects: 4, done.
# Compressing objects: 50% (1/2) \rCompressing objects: 100% (2/2) \rCompressing objects: 100% (2/2), done.
self._cur_line = line
sub_lines = line.split('\r')
failed_lines = list()
for sline in sub_lines:
# find esacpe characters and cut them away - regex will not work with
# them as they are non-ascii. As git might expect a tty, it will send them
last_valid_index = None
for i,c in enumerate(reversed(sline)):
if ord(c) < 32:
# its a slice index
last_valid_index = -i-1
# END character was non-ascii
# END for each character in sline
if last_valid_index is not None:
sline = sline[:last_valid_index]
# END cut away invalid part
sline = sline.rstrip()
cur_count, max_count = None, None
match = self.re_op_relative.match(sline)
if match is None:
match = self.re_op_absolute.match(sline)
if not match:
self.line_dropped(sline)
failed_lines.append(sline)
continue
# END could not get match
op_code = 0
remote, op_name, percent, cur_count, max_count, message = match.groups()
# get operation id
if op_name == "Counting objects":
op_code |= self.COUNTING
elif op_name == "Compressing objects":
op_code |= self.COMPRESSING
elif op_name == "Writing objects":
op_code |= self.WRITING
elif op_name == 'Receiving objects':
op_code |= self.RECEIVING
elif op_name == 'Resolving deltas':
op_code |= self.RESOLVING
else:
# Note: On windows it can happen that partial lines are sent
# Hence we get something like "CompreReceiving objects", which is
# a blend of "Compressing objects" and "Receiving objects".
# This can't really be prevented, so we drop the line verbosely
# to make sure we get informed in case the process spits out new
# commands at some point.
self.line_dropped(sline)
sys.stderr.write("Operation name %r unknown - skipping line '%s'" % (op_name, sline))
# Note: Don't add this line to the failed lines, as we have to silently
# drop it
return failed_lines
# END handle op code
# figure out stage
if op_code not in self._seen_ops:
self._seen_ops.append(op_code)
op_code |= self.BEGIN
# END begin opcode
if message is None:
message = ''
# END message handling
message = message.strip()
done_token = ', done.'
if message.endswith(done_token):
op_code |= self.END
message = message[:-len(done_token)]
# END end message handling
self.update(op_code, cur_count, max_count, message)
# END for each sub line
return failed_lines
def line_dropped(self, line):
"""Called whenever a line could not be understood and was therefore dropped."""
pass
def update(self, op_code, cur_count, max_count=None, message=''):
"""Called whenever the progress changes
:param op_code:
Integer allowing to be compared against Operation IDs and stage IDs.
Stage IDs are BEGIN and END. BEGIN will only be set once for each Operation
ID as well as END. It may be that BEGIN and END are set at once in case only
one progress message was emitted due to the speed of the operation.
Between BEGIN and END, none of these flags will be set
Operation IDs are all held within the OP_MASK. Only one Operation ID will
be active per call.
:param cur_count: Current absolute count of items
:param max_count:
The maximum count of items we expect. It may be None in case there is
no maximum number of items or if it is (yet) unknown.
:param message:
In case of the 'WRITING' operation, it contains the amount of bytes
transferred. It may possibly be used for other purposes as well.
You may read the contents of the current line in self._cur_line"""
pass
class Actor(object):
"""Actors hold information about a person acting on the repository. They
can be committers and authors or anything with a name and an email as
mentioned in the git log entries."""
# PRECOMPILED REGEX
name_only_regex = re.compile( r'<(.+)>' )
name_email_regex = re.compile( r'(.*) <(.+?)>' )
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_name = "GIT_AUTHOR_NAME"
env_author_email = "GIT_AUTHOR_EMAIL"
env_committer_name = "GIT_COMMITTER_NAME"
env_committer_email = "GIT_COMMITTER_EMAIL"
# CONFIGURATION KEYS
conf_name = 'name'
conf_email = 'email'
__slots__ = ('name', 'email')
def __init__(self, name, email):
self.name = name
self.email = email
def __eq__(self, other):
return self.name == other.name and self.email == other.email
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.name, self.email))
def __str__(self):
return self.name
def __repr__(self):
return '<git.Actor "%s <%s>">' % (self.name, self.email)
@classmethod
def _from_string(cls, string):
"""Create an Actor from a string.
:param string: is the string, which is expected to be in regular git format
John Doe <[email protected]>
:return: Actor """
m = cls.name_email_regex.search(string)
if m:
name, email = m.groups()
return Actor(name, email)
else:
m = cls.name_only_regex.search(string)
if m:
return Actor(m.group(1), None)
else:
# assume best and use the whole string as name
return Actor(string, None)
# END special case name
# END handle name/email matching
@classmethod
def _main_actor(cls, env_name, env_email, config_reader=None):
actor = Actor('', '')
default_email = get_user_id()
default_name = default_email.split('@')[0]
for attr, evar, cvar, default in (('name', env_name, cls.conf_name, default_name),
('email', env_email, cls.conf_email, default_email)):
try:
setattr(actor, attr, os.environ[evar])
except KeyError:
if config_reader is not None:
setattr(actor, attr, config_reader.get_value('user', cvar, default))
#END config-reader handling
if not getattr(actor, attr):
setattr(actor, attr, default)
#END handle name
#END for each item to retrieve
return actor
@classmethod
def committer(cls, config_reader=None):
"""
:return: Actor instance corresponding to the configured committer. It behaves
similar to the git implementation, such that the environment will override
configuration values of config_reader. If no value is set at all, it will be
generated
:param config_reader: ConfigReader to use to retrieve the values from in case
they are not set in the environment"""
return cls._main_actor(cls.env_committer_name, cls.env_committer_email, config_reader)
@classmethod
def author(cls, config_reader=None):
"""Same as committer(), but defines the main author. It may be specified in the environment,
but defaults to the committer"""
return cls._main_actor(cls.env_author_name, cls.env_author_email, config_reader)
class Stats(object):
"""
Represents stat information as presented by git at the end of a merge. It is
created from the output of a diff operation.
``Example``::
c = Commit( sha1 )
s = c.stats
s.total # full-stat-dict
s.files # dict( filepath : stat-dict )
``stat-dict``
A dictionary with the following keys and values::
deletions = number of deleted lines as int
insertions = number of inserted lines as int
lines = total number of lines changed as int, or deletions + insertions
``full-stat-dict``
In addition to the items in the stat-dict, it features additional information::
files = number of changed files as int"""
__slots__ = ("total", "files")
def __init__(self, total, files):
self.total = total
self.files = files
@classmethod
def _list_from_string(cls, repo, text):
"""Create a Stat object from output retrieved by git-diff.
:return: git.Stat"""
hsh = {'total': {'insertions': 0, 'deletions': 0, 'lines': 0, 'files': 0}, 'files': dict()}
for line in text.splitlines():
(raw_insertions, raw_deletions, filename) = line.split("\t")
insertions = raw_insertions != '-' and int(raw_insertions) or 0
deletions = raw_deletions != '-' and int(raw_deletions) or 0
hsh['total']['insertions'] += insertions
hsh['total']['deletions'] += deletions
hsh['total']['lines'] += insertions + deletions
hsh['total']['files'] += 1
hsh['files'][filename.strip()] = {'insertions': insertions,
'deletions': deletions,
'lines': insertions + deletions}
return Stats(hsh['total'], hsh['files'])
class IndexFileSHA1Writer(object):
"""Wrapper around a file-like object that remembers the SHA1 of
the data written to it. It will write a sha when the stream is closed
or if the asked for explicitly usign write_sha.
Only useful to the indexfile
:note: Based on the dulwich project"""
__slots__ = ("f", "sha1")
def __init__(self, f):
self.f = f
self.sha1 = make_sha("")
def write(self, data):
self.sha1.update(data)
return self.f.write(data)
def write_sha(self):
sha = self.sha1.digest()
self.f.write(sha)
return sha
def close(self):
sha = self.write_sha()
self.f.close()
return sha
def tell(self):
return self.f.tell()
class LockFile(object):
"""Provides methods to obtain, check for, and release a file based lock which
should be used to handle concurrent access to the same file.
As we are a utility class to be derived from, we only use protected methods.
Locks will automatically be released on destruction"""
__slots__ = ("_file_path", "_owns_lock")
def __init__(self, file_path):
self._file_path = file_path
self._owns_lock = False
def __del__(self):
self._release_lock()
def _lock_file_path(self):
""":return: Path to lockfile"""
return "%s.lock" % (self._file_path)
def _has_lock(self):
""":return: True if we have a lock and if the lockfile still exists
:raise AssertionError: if our lock-file does not exist"""
if not self._owns_lock:
return False
return True
def _obtain_lock_or_raise(self):
"""Create a lock file as flag for other instances, mark our instance as lock-holder
:raise IOError: if a lock was already present or a lock file could not be written"""
if self._has_lock():
return
lock_file = self._lock_file_path()
if os.path.isfile(lock_file):
raise IOError("Lock for file %r did already exist, delete %r in case the lock is illegal" % (self._file_path, lock_file))
try:
fd = os.open(lock_file, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0)
os.close(fd)
except OSError,e:
raise IOError(str(e))
self._owns_lock = True
def _obtain_lock(self):
"""The default implementation will raise if a lock cannot be obtained.
Subclasses may override this method to provide a different implementation"""
return self._obtain_lock_or_raise()
def _release_lock(self):
"""Release our lock if we have one"""
if not self._has_lock():
return
# if someone removed our file beforhand, lets just flag this issue
# instead of failing, to make it more usable.
lfp = self._lock_file_path()
try:
# on bloody windows, the file needs write permissions to be removable.
# Why ...
if os.name == 'nt':
os.chmod(lfp, 0777)
# END handle win32
os.remove(lfp)
except OSError:
pass
self._owns_lock = False
class BlockingLockFile(LockFile):
"""The lock file will block until a lock could be obtained, or fail after
a specified timeout.
:note: If the directory containing the lock was removed, an exception will
be raised during the blocking period, preventing hangs as the lock
can never be obtained."""
__slots__ = ("_check_interval", "_max_block_time")
def __init__(self, file_path, check_interval_s=0.3, max_block_time_s=sys.maxint):
"""Configure the instance
:parm check_interval_s:
Period of time to sleep until the lock is checked the next time.
By default, it waits a nearly unlimited time
:parm max_block_time_s: Maximum amount of seconds we may lock"""
super(BlockingLockFile, self).__init__(file_path)
self._check_interval = check_interval_s
self._max_block_time = max_block_time_s
def _obtain_lock(self):
"""This method blocks until it obtained the lock, or raises IOError if
it ran out of time or if the parent directory was not available anymore.
If this method returns, you are guranteed to own the lock"""
starttime = time.time()
maxtime = starttime + float(self._max_block_time)
while True:
try:
super(BlockingLockFile, self)._obtain_lock()
except IOError:
# synity check: if the directory leading to the lockfile is not
# readable anymore, raise an execption
curtime = time.time()
if not os.path.isdir(os.path.dirname(self._lock_file_path())):
msg = "Directory containing the lockfile %r was not readable anymore after waiting %g seconds" % (self._lock_file_path(), curtime - starttime)
raise IOError(msg)
# END handle missing directory
if curtime >= maxtime:
msg = "Waited %g seconds for lock at %r" % ( maxtime - starttime, self._lock_file_path())
raise IOError(msg)
# END abort if we wait too long
time.sleep(self._check_interval)
else:
break
# END endless loop
class IterableList(list):
"""
List of iterable objects allowing to query an object by id or by named index::
heads = repo.heads
heads.master
heads['master']
heads[0]
It requires an id_attribute name to be set which will be queried from its
contained items to have a means for comparison.
A prefix can be specified which is to be used in case the id returned by the
items always contains a prefix that does not matter to the user, so it
can be left out."""
__slots__ = ('_id_attr', '_prefix')
def __new__(cls, id_attr, prefix=''):
return super(IterableList,cls).__new__(cls)
def __init__(self, id_attr, prefix=''):
self._id_attr = id_attr
self._prefix = prefix
if not isinstance(id_attr, basestring):
raise ValueError("First parameter must be a string identifying the name-property. Extend the list after initialization")
# END help debugging !
def __contains__(self, attr):
# first try identy match for performance
rval = list.__contains__(self, attr)
if rval:
return rval
#END handle match
# otherwise make a full name search
try:
getattr(self, attr)
return True
except (AttributeError, TypeError):
return False
#END handle membership
def __getattr__(self, attr):
attr = self._prefix + attr
for item in self:
if getattr(item, self._id_attr) == attr:
return item
# END for each item
return list.__getattribute__(self, attr)
def __getitem__(self, index):
if isinstance(index, int):
return list.__getitem__(self,index)
try:
return getattr(self, index)
except AttributeError:
raise IndexError( "No item found with id %r" % (self._prefix + index) )
# END handle getattr
def __delitem__(self, index):
delindex = index
if not isinstance(index, int):
delindex = -1
name = self._prefix + index
for i, item in enumerate(self):
if getattr(item, self._id_attr) == name:
delindex = i
break
#END search index
#END for each item
if delindex == -1:
raise IndexError("Item with name %s not found" % name)
#END handle error
#END get index to delete
list.__delitem__(self, delindex)
class Iterable(object):
"""Defines an interface for iterable items which is to assure a uniform
way to retrieve and iterate items within the git repository"""
__slots__ = tuple()
_id_attribute_ = "attribute that most suitably identifies your instance"
@classmethod
def list_items(cls, repo, *args, **kwargs):
"""
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances"""
out_list = IterableList( cls._id_attribute_ )
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list
@classmethod
def iter_items(cls, repo, *args, **kwargs):
"""For more information about the arguments, see list_items
:return: iterator yielding Items"""
raise NotImplementedError("To be implemented by Subclass")
#} END classes
|
|
from builtins import object
import numpy as _np
import astropy.units as _u
import astropy.constants as _const
import astropy.cosmology as _cosmology
import scipy.integrate as _integrate
class MakinoProfile(object):
def __init__(self,
halo_mass,
redshift,
T=None,
delta_vir=None,
mu=None,
f_gas=None,
omega_b=None,
gamma=None,
concentration_method=None,
cosmo=None):
"""Creates a Makino profile for the given halo mass and redshift"""
self.halo_mass = halo_mass
self.redshift = redshift
if delta_vir is None:
delta_vir = 200
self.delta_vir = delta_vir
if mu is None:
mu = 0.60364
self.mu = mu
if f_gas is None:
f_gas = 1.0
self.f_gas = f_gas
if cosmo is None:
cosmo = _cosmology.Planck15
self.cosmo = cosmo
if omega_b is None:
omega_b = self.cosmo.Ob(self.redshift) / self.cosmo.Om(
self.redshift)
self.omega_b = omega_b
if gamma is None:
gamma = 5.0 / 3.0
self.gamma = gamma
if concentration_method is None:
concentration_method = 'klypin-planck-relaxed'
self.concentration_method = concentration_method
self.critical_density = self.calculate_critical_density(
self.redshift, cosmo=self.cosmo)
self.virial_radius = self.calculate_virial_radius(
self.halo_mass, self.delta_vir, self.critical_density)
if T is None:
T = self.calculate_temperature(self.halo_mass, self.mu,
self.virial_radius)
self.T = T
self.concentration = self.calculate_concentration(
self.halo_mass,
redshift=self.redshift,
method=self.concentration_method,
cosmo=self.cosmo)
self.scale_radius = self.calculate_scale_radius(self.virial_radius,
self.concentration)
self.characteristic_density = self.calculate_characteristic_density(
self.delta_vir, self.concentration)
self.nfw_parameter = self.calculate_nfw_parameter(
self.scale_radius, self.characteristic_density,
self.critical_density, self.mu, self.T)
self.phi_nfw = self.calculate_phi_nfw(self.nfw_parameter, self.mu,
self.T)
self.central_density = self.calculate_central_density(
self.f_gas, self.omega_b, self.halo_mass, self.virial_radius,
self.nfw_parameter, self.scale_radius)
self.sound_speed = self.calculate_sound_speed(self.gamma, self.T,
self.mu)
def get_density(self, radius):
"""Calculates the density for this profile at the specified radius"""
return self.calculate_density_at_radius(self.central_density,
self.nfw_parameter,
self.scale_radius, radius)
@staticmethod
def calculate_temperature(halo_mass,
mu,
virial_radius,
gamma_isothermal=1.5):
"""Calculates the virial temperature using an isothermal approximation from Makino+98"""
t_vir = (gamma_isothermal / 3.0) * (mu * _const.m_p) * (
_const.G * halo_mass) / (virial_radius)
return t_vir.to(_u.keV)
@staticmethod
def calculate_critical_density(redshift, cosmo=_cosmology.Planck15):
"""Calculates the critical density parameter for the given redshift,
using the Planck (15) survey results for the hubble constant"""
H_squared = cosmo.H(redshift)**2
critical_density = (3.0 * H_squared) / (8.0 * _np.pi * _const.G)
return critical_density.to(_u.g / _u.cm**3)
@staticmethod
def calculate_virial_radius(virial_mass, delta_vir, critical_density):
"""Calculates the virial radius for the given virial mass, delta_vir and critical density"""
r_vir = ((3.0 * virial_mass) /
(4.0 * _np.pi * delta_vir * critical_density))**(1.0 / 3.0)
return r_vir.to(_u.kpc)
@staticmethod
def calculate_concentration(virial_mass,
method='bullock',
redshift=0,
cosmo=_cosmology.Planck15):
"""Calculates the concentration parameter for a given virial mass and redshift"""
# parameters for Dolag+04, delta_vir is 200 using MEAN density. Cosmology closely matches WMAP9
c_0_dolag = 9.59
alpha_dolag = -0.102
# parameters for Klypin+16 Planck, all halos, delta_vir is 200 using critical density
c_0_klypin_planck_all = 7.40
gamma_klypin_planck_all = 0.120
m_0_klypin_planck_all = 5.5e5
# parameters for Klypin+16 Planck, relaxed halos, delta_vir is 200 using critical density
c_0_klypin_planck_relaxed = 7.75
gamma_klypin_planck_relaxed = 0.100
m_0_klypin_planck_relaxed = 4.5e5
# parameters for Klypin WMAP7 all halos, delta_vir is 200 using critical density
c_0_wmap_all = 6.60
gam_wmap_all = 0.110
m_0_wmap_all = 2e6
# parameters for Klypin WMAP7 relaxed halos, delta_vir is 200 using critical density
c_0_wmap_relaxed = 6.90
gam_wmap_relaxed = 0.090
m_0_wmap_relaxed = 5.5e5
# parameters for Dutton+14, NFW model, delta_vir = 200 using critical density, Planck cosmology
b_dutton = -0.101 + 0.026 * redshift
a_dutton = 0.520 + (0.905 - 0.520) * _np.exp(-0.617 * (redshift**1.21))
# parameters for Maccio+08, NFW model, WMAP5, delta_vir = 200 using critical density
zero_maccio = 0.787
slope_maccio = -0.110
c = 0.0
# Dolag+04 method
if method == 'dolag':
c = (c_0_dolag) / (1 + redshift) * ((virial_mass / (
10.0**14 * _u.M_sun * (1.0 / cosmo.h)))**alpha_dolag)
# Bullock+01 method, cosmological parameters agree with WMAP9, delta_vir = 180 using 337 using mean density (for LambdaCDM)
elif method == 'bullock':
c = (8.0 / (1 + redshift)) * (10**(
(-0.13) * (_np.log10(virial_mass.to(_u.M_sun).value) - 14.15)))
# Klypin+16 methods
elif method == 'klypin-planck-all':
c = (c_0_klypin_planck_all * ((virial_mass / (1e12 * _u.M_sun *
(cosmo.h**(-1))))
**-gamma_klypin_planck_all) *
(1 + (virial_mass / (m_0_klypin_planck_all *
(1e12 * _u.M_sun * (cosmo.h**
(-1)))))**0.4))
elif method == 'klypin-planck-relaxed':
c = (c_0_klypin_planck_relaxed * ((virial_mass / (1e12 * _u.M_sun *
(cosmo.h**(-1))))
**-gamma_klypin_planck_relaxed) *
(1 + (virial_mass / (m_0_klypin_planck_relaxed *
(1e12 * _u.M_sun * (cosmo.h**
(-1)))))**0.4))
elif method == 'klypin-wmap-all':
c = ((c_0_wmap_all *
((virial_mass / (1e12 * _u.M_sun * (cosmo.h**(-1))))
**-gam_wmap_all)) * (
(1 + (virial_mass / (m_0_wmap_all * (1e12 * _u.M_sun *
(cosmo.h**
(-1)))))**0.4)))
elif method == 'klypin-wmap-relaxed':
c = ((c_0_wmap_relaxed * (
(virial_mass /
(1e12 * _u.M_sun * (cosmo.h**(-1))))**-gam_wmap_relaxed)) * (
(1 + (virial_mass / (m_0_wmap_relaxed * (1e12 * _u.M_sun *
(cosmo.h**
(-1)))))**0.4)))
# Dutton+14 method
elif method == 'dutton':
logc = a_dutton + b_dutton * _np.log10(virial_mass /
(1e12 *
(cosmo.h**-1) * _u.M_sun))
c = 10**logc
# Maccio+08 method
elif method == 'maccio':
logc = zero_maccio + slope_maccio * (_np.log10(virial_mass / (
_u.M_sun * (1.0 / cosmo.h))) - 12)
c = 10**logc
else:
raise ValueError('Unknown concentration method chosen')
return c
@staticmethod
def calculate_scale_radius(virial_radius, concentration):
"""Calculates the scale radius for a given virial radius and concentration"""
r_s = virial_radius / concentration
return r_s
@staticmethod
def calculate_characteristic_density(delta_vir, concentration):
"""Calculates the characteristic density for a given Delta_vir and concentration"""
char_density = (delta_vir / 3.0) * (
(concentration**3.0) / (_np.log(1.0 + concentration) -
(concentration / (1.0 + concentration))))
return char_density
@staticmethod
def calculate_nfw_parameter(r_s, char_density, rho_crit, mu, T):
"""Calculates the nfw parameter for a given scale radius, characteristic density, critical density, mean molecular weight
and temperature (in keV)"""
delta_nfw = _const.G * 4 * _np.pi * char_density * rho_crit * (
r_s**2.0) * (mu * _const.m_p) / T
return delta_nfw.si
@staticmethod
def calculate_phi_nfw(delta_nfw, mu, T):
"""Calculates phi NFW for a given NFW parameter, mean molecular weight and temperature"""
phi_nfw = -delta_nfw * (T / (mu * _const.m_p))
return phi_nfw.si
@staticmethod
def calculate_central_density(f_gas, omega_b, virial_mass, virial_radius,
delta_nfw, r_s):
"""Calculates the central density for the NFW profile, given the hot gas fraction, baryonic matter percentage and
virial mass of the halo"""
integral = (_u.kpc**3) * _integrate.quad(
lambda r: 4 * _np.pi * (r**2) * _np.exp(-delta_nfw) * _np.power((1.0 + r / r_s.value), delta_nfw / (r / r_s.value)),
0, virial_radius.value)
denom = integral[0]
rho_0 = (f_gas * omega_b * virial_mass) / denom
return rho_0.to(_u.g * _u.cm**(-3))
@staticmethod
def calculate_sound_speed(gamma, T, mu):
return _np.sqrt((gamma * T) / (mu * _const.m_p)).to(_u.km / _u.s)
@staticmethod
def calculate_density_at_radius(central_density, delta_nfw, r_s, r):
return central_density * _np.exp(-delta_nfw) * (1 + r / r_s)**(
delta_nfw / (r / r_s))
|
|
from __future__ import print_function, division
from builtins import range
from builtins import object
import numpy as np
from code_base import optim
from code_base.data_utils import sample_minibatch, one_hot_encoding
class SentimentAnalysisSolver(object):
"""
A SentimentAnalysisSolver encapsulates all the logic necessary for training
sentiment analysis models. The SentimentAnalysisSolver performs stochastic
gradient descent using different update rules defined in optim.py.
The solver accepts both training and validataion data and labels so it can
periodically check classification accuracy on both training and validation
data to watch out for overfitting.
To train a model, you will first construct a SentimentAnalysisSolver instance,
passing the model, dataset, and various options (learning rate, batch size,
etc) to the constructor. You will then call the train() method to run the
optimization procedure and train the model.
After the train() method returns, model.params will contain the parameters
that performed best on the validation set over the course of training.
In addition, the instance variable solver.loss_history will contain a list
of all losses encountered during training and the instance variables
solver.train_acc_history and solver.val_acc_history will be lists containing
the accuracies of the model on the training and validation set at each epoch.
Example usage might look something like this:
data = load_data()
model = MyAwesomeModel(hidden_dim=100)
solver = SentimentAnalysisSolver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
A SentimentAnalysisSolver works on a model object that must conform to the following
API:
- model.params must be a dictionary mapping string parameter names to numpy
arrays containing parameter values.
- model.loss(sentences, labels) must be a function that computes
training-time loss and gradients, with the following inputs and outputs:
Inputs:
- sentences: Array giving a minibatch of sentences, N strings
- labels: Array of sentiment classes for the sentences, N labels where
each element is 0 (negative) or 1 (positive).
Returns:
- loss: Scalar giving the loss
- grads: Dictionary with the same keys as self.params mapping parameter
names to gradients of the loss with respect to those parameters.
"""
def __init__(self, model, data, **kwargs):
"""
Construct a new SentimentAnalysisSolver instance.
Required arguments:
- model: A model object conforming to the API described above
- data: A dictionary of training and validation data/labels from load_data
Optional arguments:
- update_rule: A string giving the name of an update rule in optim.py.
Default is 'sgd'.
- optim_config: A dictionary containing hyperparameters that will be
passed to the chosen update rule. Each update rule requires different
hyperparameters (see optim.py) but all update rules require a
'learning_rate' parameter so that should always be present.
- lr_decay: A scalar for learning rate decay; after each epoch the learning
rate is multiplied by this value.
- batch_size: Size of minibatches used to compute loss and gradient during
training.
- num_epochs: The number of epochs to run for during training.
- print_every: Integer; training losses will be printed every print_every
iterations.
- verbose: Boolean; if set to false then no output will be printed during
training.
"""
self.model = model
self.data = data
# Unpack keyword arguments
self.update_rule = kwargs.pop('update_rule', 'sgd')
self.optim_config = kwargs.pop('optim_config', {})
self.lr_decay = kwargs.pop('lr_decay', 1.0)
self.batch_size = kwargs.pop('batch_size', 100)
self.num_epochs = kwargs.pop('num_epochs', 10)
self.print_every = kwargs.pop('print_every', 10)
self.verbose = kwargs.pop('verbose', True)
# Throw an error if there are extra keyword arguments
if len(kwargs) > 0:
extra = ', '.join('"%s"' % k for k in list(kwargs.keys()))
raise ValueError('Unrecognized arguments %s' % extra)
# Make sure the update rule exists, then replace the string
# name with the actual function
if not hasattr(optim, self.update_rule):
raise ValueError('Invalid update_rule "%s"' % self.update_rule)
self.update_rule = getattr(optim, self.update_rule)
self._reset()
def _reset(self):
"""
Set up some book-keeping variables for optimization. Don't call this
manually.
"""
# Set up some variables for book-keeping
self.epoch = 0
self.best_val_acc = 0
self.best_params = {}
self.loss_history = []
self.train_acc_history = []
self.val_acc_history = []
# Make a deep copy of the optim_config for each parameter
self.optim_configs = {}
for p in self.model.params:
d = {k: v for k, v in self.optim_config.items()}
self.optim_configs[p] = d
def _step(self):
"""
Make a single gradient update. This is called by train() and should not
be called manually.
"""
# Make a minibatch of training data
sentences, labels = sample_minibatch(self.data,
batch_size=self.batch_size,
split='train', random=False)
wordvecs, mask = one_hot_encoding(sentences, self.model.word_to_idx,
max_length=self.model.max_length)
# Compute loss and gradient
loss, grads = self.model.loss(wordvecs, labels, mask)
self.loss_history.append(loss)
# Perform a parameter update
for p, w in self.model.params.items():
dw = grads[p]
config = self.optim_configs[p]
next_w, next_config = self.update_rule(w, dw, config)
self.model.params[p] = next_w
self.optim_configs[p] = next_config
def train(self):
"""
Run optimization to train the model.
"""
num_train = self.data['train'].shape[0]
iterations_per_epoch = max(num_train // self.batch_size, 1)
num_iterations = self.num_epochs * iterations_per_epoch
for t in range(num_iterations):
self._step()
# Maybe print training loss
if self.verbose and t % self.print_every == 0:
print('(Iteration %d / %d) loss: %f' % (
t + 1, num_iterations, self.loss_history[-1]))
# At the end of every epoch, increment the epoch counter and decay the
# learning rate.
epoch_end = (t + 1) % iterations_per_epoch == 0
if epoch_end:
self.epoch += 1
for k in self.optim_configs:
self.optim_configs[k]['learning_rate'] *= self.lr_decay
# Check train and val accuracy on the first iteration, the last
# iteration, and at the end of each epoch.
# TODO: Implement some logic to check Bleu on validation set periodically
# At the end of training swap the best params into the model
# self.model.params = self.best_params
def test(self, split='test'):
"""
Generate the sentiments for the samples based on the trained model
Returns:
-preds: predicted sentiments, 1-D array where each element is either
0 (Negative) or 1 (Positive).
"""
# We use the first batch samples for inference
sentences, labels = sample_minibatch(self.data,
batch_size=self.batch_size,
split=split, random=False)
wordvecs, mask = one_hot_encoding(sentences, self.model.word_to_idx,
max_length=self.model.max_length)
preds = self.model.inference(wordvecs, mask)
return preds
|
|
#####################################################################
# vec3 - 3-dimensional vector
#
# Copyright (C) 2002, Matthias Baas ([email protected])
#
# You may distribute under the terms of the BSD license, as
# specified in the file license.txt.
####################################################################
import types, math, copy
import pdb
# vec3
class vec3:
"""Three-dimensional vector.
This class can be used to represent points, vectors, normals
or even colors. The usual vector operations are available.
"""
def __init__(self, *args):
"""Constructor.
There are several possibilities how to initialize a vector:
v = vec3() -> v = <0,0,0>
v = vec3(a) -> v = <a,a,a>
v = vec3(x,y) -> v = <x,y,0>
v = vec3(x,y,z) -> v = <x,y,z>
Note that specifying just one value sets all three components to
that value (except when that single value is a another vec3, then
that vector is copied).
Additionally you can wrap those values in a list or a tuple or
specify them as a string:
v = vec3([1,2,3]) -> v = <1,2,3>
v = vec3("4,5") -> v = <4,5,0>
"""
if len(args)==0:
self.x, self.y, self.z = (0.0, 0.0, 0.0)
elif len(args)==1:
T = type(args[0])
# scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.x, self.y, self.z = (args[0], args[0], args[0])
# vec3
elif isinstance(args[0], vec3):
self.x, self.y, self.z = args[0]
# Tuple/List
elif T==types.TupleType or T==types.ListType:
if len(args[0])==0:
self.x = self.y = self.z = 0.0
elif len(args[0])==1:
self.x = self.y = self.z = args[0][0]
elif len(args[0])==2:
self.x, self.y = args[0]
self.z = 0.0
elif len(args[0])==3:
self.x, self.y, self.z = args[0]
else:
raise TypeError("vec3() takes at most 3 arguments")
# String
elif T==types.StringType:
s=args[0].replace(","," ").replace(" "," ").strip().split(" ")
if s==[""]:
s=[]
f=map(lambda x: float(x), s)
dummy = vec3(f)
self.x, self.y, self.z = dummy
# error
else:
raise TypeError("vec3() arg can't be converted to vec3")
elif len(args)==2:
self.x, self.y, self.z = (args[0], args[1], 0.0)
elif len(args)==3:
self.x, self.y, self.z = args
else:
raise TypeError("vec3() takes at most 3 arguments")
def __repr__(self):
return 'vec3('+str(self.x)+', '+str(self.y)+', '+str(self.z)+')'
def __str__(self):
fmt="%1.4f"
return '('+fmt%self.x+', '+fmt%self.y+', '+fmt%self.z+')'
def __eq__(self, other):
"""== operator
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> c=vec3(-0.3, 0.75, 0.5)
>>> print(a==b)
0
>>> print(b==c)
1
>>> print(a==None)
0
"""
if isinstance(other, vec3):
return self.x==other.x and self.y==other.y and self.z==other.z
else:
return 0
def __ne__(self, other):
"""!= operator
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> c=vec3(-0.3, 0.75, 0.5)
>>> print(a!=b)
1
>>> print(b!=c)
0
>>> print(a!=None)
1
"""
if isinstance(other, vec3):
return self.x!=other.x or self.y!=other.y or self.z!=other.z
else:
return 1
def __add__(self, other):
"""Vector addition.
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> print(a+b)
(0.7000, 1.2500, -1.3000)
"""
if isinstance(other, vec3):
return vec3(self.x+other.x, self.y+other.y, self.z+other.z)
else:
raise TypeError("unsupported operand type for +")
def __sub__(self, other):
"""Vector subtraction.
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> print(a-b)
(1.3000, -0.2500, -2.3000)
"""
if isinstance(other, vec3):
return vec3(self.x-other.x, self.y-other.y, self.z-other.z)
else:
raise TypeError("unsupported operand type for -")
def __mul__(self, other):
"""Multiplication with a scalar or dot product.
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> print(a*2.0)
(2.0000, 1.0000, -3.6000)
>>> print(2.0*a)
(2.0000, 1.0000, -3.6000)
>>> print(a*b)
-0.825
"""
T = type(other)
# vec3*scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return vec3(self.x*other, self.y*other, self.z*other)
# vec3*vec3
if isinstance(other, vec3):
return self.x*other.x + self.y*other.y + self.z*other.z
# unsupported
else:
# Try to delegate the operation to the other operand
if getattr(other,"__rmul__",None)!=None:
return other.__rmul__(self)
else:
raise TypeError("unsupported operand type for *")
__rmul__ = __mul__
def __div__(self, other):
"""Division by scalar
>>> a=vec3(1.0, 0.5, -1.8)
>>> print(a/2.0)
(0.5000, 0.2500, -0.9000)
"""
T = type(other)
# vec3/scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return vec3(self.x/other, self.y/other, self.z/other)
# unsupported
else:
raise TypeError("unsupported operand type for /")
def __mod__(self, other):
"""Modulo (component wise)
>>> a=vec3(3.0, 2.5, -1.8)
>>> print(a%2.0)
(1.0000, 0.5000, 0.2000)
"""
T = type(other)
# vec3%scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return vec3(self.x%other, self.y%other, self.z%other)
# unsupported
else:
raise TypeError("unsupported operand type for %")
def __iadd__(self, other):
"""Inline vector addition.
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> a+=b
>>> print(a)
(0.7000, 1.2500, -1.3000)
"""
if isinstance(other, vec3):
self.x+=other.x
self.y+=other.y
self.z+=other.z
return self
else:
raise TypeError("unsupported operand type for +=")
def __isub__(self, other):
"""Inline vector subtraction.
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> a-=b
>>> print(a)
(1.3000, -0.2500, -2.3000)
"""
if isinstance(other, vec3):
self.x-=other.x
self.y-=other.y
self.z-=other.z
return self
else:
raise TypeError("unsupported operand type for -=")
def __imul__(self, other):
"""Inline multiplication (only with scalar)
>>> a=vec3(1.0, 0.5, -1.8)
>>> a*=2.0
>>> print(a)
(2.0000, 1.0000, -3.6000)
"""
T = type(other)
# vec3*=scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.x*=other
self.y*=other
self.z*=other
return self
else:
raise TypeError("unsupported operand type for *=")
def __idiv__(self, other):
"""Inline division with scalar
>>> a=vec3(1.0, 0.5, -1.8)
>>> a/=2.0
>>> print(a)
(0.5000, 0.2500, -0.9000)
"""
T = type(other)
# vec3/=scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.x/=other
self.y/=other
self.z/=other
return self
else:
raise TypeError("unsupported operand type for /=")
def __imod__(self, other):
"""Inline modulo
>>> a=vec3(3.0, 2.5, -1.8)
>>> a%=2.0
>>> print(a)
(1.0000, 0.5000, 0.2000)
"""
T = type(other)
# vec3%=scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.x%=other
self.y%=other
self.z%=other
return self
else:
raise TypeError("unsupported operand type for %=")
def __neg__(self):
"""Negation
>>> a=vec3(3.0, 2.5, -1.8)
>>> print(-a)
(-3.0000, -2.5000, 1.8000)
"""
return vec3(-self.x, -self.y, -self.z)
def __pos__(self):
"""
>>> a=vec3(3.0, 2.5, -1.8)
>>> print(+a)
(3.0000, 2.5000, -1.8000)
"""
return vec3(+self.x, +self.y, +self.z)
def __abs__(self):
"""Return the length of the vector.
abs(v) is equivalent to v.length().
>>> a=vec3(1.0, 0.5, -1.8)
>>> print(abs(a))
2.11896201004
"""
return math.sqrt(self*self)
def __len__(self):
"""Length of the sequence (always 3)"""
return 3
def __getitem__(self, key):
"""Return a component by index (0-based)
>>> a=vec3(1.0, 0.5, -1.8)
>>> print(a[0])
1.0
>>> print(a[1])
0.5
>>> print(a[2])
-1.8
"""
T=type(key)
if T!=types.IntType and T!=types.LongType:
raise TypeError("index must be integer")
if key==0: return self.x
elif key==1: return self.y
elif key==2: return self.z
else:
raise IndexError("index out of range")
def __setitem__(self, key, value):
"""Set a component by index (0-based)
>>> a=vec3()
>>> a[0]=1.5; a[1]=0.7; a[2]=-0.3
>>> print(a)
(1.5000, 0.7000, -0.3000)
"""
T=type(key)
if T!=types.IntType and T!=types.LongType:
raise TypeError("index must be integer")
if key==0: self.x = value
elif key==1: self.y = value
elif key==2: self.z = value
else:
raise IndexError("index out of range")
def cross(self, other):
"""Cross product.
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> c=a.cross(b)
>>> print(c)
(1.6000, 0.0400, 0.9000)
"""
if isinstance(other, vec3):
return vec3(self.y*other.z-self.z*other.y,
self.z*other.x-self.x*other.z,
self.x*other.y-self.y*other.x)
else:
raise TypeError("unsupported operand type for cross()")
def length(self):
"""Return the length of the vector.
v.length() is equivalent to abs(v).
>>> a=vec3(1.0, 0.5, -1.8)
>>> print(a.length())
2.11896201004
"""
return math.sqrt(self*self)
def normalize(self):
"""Return normalized vector.
>>> a=vec3(1.0, 0.5, -1.8)
>>> print(a.normalize())
(0.4719, 0.2360, -0.8495)
"""
try:
nlen = 1.0/math.sqrt(self*self)
except:
nlen=1.
return vec3(self.x*nlen, self.y*nlen, self.z*nlen)
def angle(self, other):
"""Return angle (in radians) between self and other.
>>> a=vec3(1.0, 0.5, -1.8)
>>> b=vec3(-0.3, 0.75, 0.5)
>>> print(a.angle(b))
1.99306755584
"""
if isinstance(other, vec3):
return math.acos((self*other) / (abs(self)*abs(other)))
else:
raise TypeError("unsupported operand type for angle()")
def reflect(self, N):
"""Return the reflection vector.
N is the surface normal which has to be of unit length.
>>> a=vec3(1.0, 0.5, -1.8)
>>> print( a.reflect(vec3(1,0,1))
(2.6000, 0.5000, -0.2000)
"""
return self - 2.0*(self*N)*N
def refract(self, N, eta):
"""Return the transmitted vector.
N is the surface normal which has to be of unit length.
eta is the relative index of refraction. If the returned
vector is zero then there is no transmitted light because
of total internal reflection.
>>> a=vec3(1.0, -1.5, 0.8)
>>> print(a.refract(vec3(0,1,0), 1.33))
(1.3300, -1.7920, 1.0640)
"""
dot = self*N
k = 1.0 - eta*eta*(1.0 - dot*dot)
if k<0:
return vec3(0.0,0.0,0.0)
else:
return eta*self - (eta*dot + math.sqrt(k))*N
def ortho(self):
"""Returns an orthogonal vector.
Returns a vector that is orthogonal to self (where
self*self.ortho()==0).
>>> a=vec3(1.0, -1.5, 0.8)
>>> print(round(a*a.ortho(),8))
0.0
"""
x=abs(self.x)
y=abs(self.y)
z=abs(self.z)
# Is z the smallest element? Then use x and y
if z<=x and z<=y:
return vec3(-self.y, self.x, 0.0)
# Is y smallest element? Then use x and z
elif y<=x and y<=z:
return vec3(-self.z, 0.0, self.x)
# x is smallest
else:
return vec3(0.0, -self.z, self.y)
def ang0(self):
C = vec3(1.0,0.0,0.0)
try:
return self.angle(C)
except:
return 0.0
def vquarter(self):
a0 = copy.copy(self.normalize())
a1 = copy.copy(self.normalize())
a0.x = math.cos(self.ang0()+math.pi/6.0)*self.length()
a1.x = math.cos(self.ang0()-math.pi/6.0)*self.length()
a0.y = math.sin(self.ang0()+math.pi/6.0)*self.length()
a1.y = math.sin(self.ang0()-math.pi/6.0)*self.length()
return [a0,self,a1]
def tolist(self):
return [self.x, self.y, self.z]
######################################################################
def _test():
import doctest, vec3
failed, total = doctest.testmod(vec3)
print("%d/%d failed" % (failed, total))
if __name__=="__main__":
_test()
# a = vec3(1,2,3.03)
# b = vec3("-2,0.5,1E10")
# print a.angle(b)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating random numbers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_random_ops import *
# pylint: enable=wildcard-import
def _ShapeTensor(shape):
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int32
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
# pylint: disable=protected-access
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32,
seed=None, name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.op_scope([shape, mean, stddev], name, "random_normal") as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(
mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(
stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._random_standard_normal(shape_tensor, dtype,
seed=seed1,
seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NoGradient("RandomStandardNormal")
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32,
seed=None, name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.op_scope([shape, mean, stddev], name, "truncated_normal") as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(
mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(
stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._truncated_normal(shape_tensor, dtype,
seed=seed1,
seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NoGradient("TruncatedNormal")
def random_uniform(shape, minval=0.0, maxval=1.0,
dtype=dtypes.float32, seed=None,
name=None):
"""Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
the range of random values to generate.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
"""
with ops.op_scope([shape, minval, maxval], name, "random_uniform") as name:
shape_tensor = _ShapeTensor(shape)
min_tensor = ops.convert_to_tensor(minval, dtype=dtype, name="min")
range_tensor = ops.convert_to_tensor(
maxval - minval, dtype=dtype, name="range")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._random_uniform(shape_tensor, dtype,
seed=seed1,
seed2=seed2)
mul = rnd * range_tensor
value = math_ops.add(mul, min_tensor, name=name)
return value
def random_shuffle(value, seed=None, name=None):
"""Randomly shuffles a tensor along its first dimension.
The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
to one and only one `output[i]`. For example, a mapping that might occur for a
3x2 tensor is:
```python
[[1, 2], [[5, 6],
[3, 4], ==> [1, 2],
[5, 6]] [3, 4]]
```
Args:
value: A Tensor to be shuffled.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of same shape and type as `value`, shuffled along its first
dimension.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops._random_shuffle(value, seed=seed1, seed2=seed2,
name=name)
ops.NoGradient("RandomUniform")
@ops.RegisterShape("TruncatedNormal")
@ops.RegisterShape("RandomStandardNormal")
@ops.RegisterShape("RandomUniform")
def _RandomShape(op):
shape_val = tensor_util.ConstantValue(op.inputs[0])
if shape_val is not None:
return [tensor_shape.TensorShape(shape_val.tolist())]
else:
shape_shape = op.inputs[0].get_shape().with_rank_at_most(1)
return [tensor_shape.unknown_shape(ndims=shape_shape.num_elements())]
ops.RegisterShape("RandomShuffle")(common_shapes.unchanged_shape)
|
|
"""Module with main classes related to Switches."""
import json
import logging
from pyof.v0x01.common.phy_port import PortFeatures
from kytos.core.constants import CONNECTION_TIMEOUT, FLOOD_TIMEOUT
from kytos.core.helpers import now
__all__ = ('Interface', 'Switch')
LOG = logging.getLogger(__name__)
class Interface(object):
"""Interface Class used to abstract the network interfaces."""
# pylint: disable=too-many-arguments
def __init__(self, name, port_number, switch, address=None, state=None,
features=None):
"""Assign the parameters to instance attributes.
Args:
name (string): name from this interface.
port_number (int): port number from this interface.
switch (:class:`~.core.switch.Switch`): Switch with this interface.
address (|hw_address|): Port address from this interface.
state (|port_stats|): Port Stat from interface.
features (|port_features|): Port feature used to calculate link
utilization from this interface.
"""
self.name = name
self.port_number = int(port_number)
self.switch = switch
self.address = address
self.state = state
self.features = features
self.nni = False
self.endpoints = []
def __eq__(self, other):
"""Compare Interface class with another instance."""
if isinstance(other, str):
return self.address == other
elif isinstance(other, Interface):
return self.port_number == other.port_number and \
self.name == other.name and \
self.address == other.address and \
self.switch.dpid == other.switch.dpid
return False
@property
def id(self): # pylint: disable=invalid-name
"""Return id from Interface intance.
Returns:
string: Interface id.
"""
return "{}:{}".format(self.switch.dpid, self.port_number)
@property
def uni(self):
"""Return if an interface is a user-to-network Interface."""
return not self.nni
def get_endpoint(self, endpoint):
"""Return a tuple with existent endpoint, None otherwise.
Args:
endpoint(|hw_address|, :class:`.Interface`): endpoint instance.
Returns:
tuple: A tuple with endpoint and time of last update.
"""
for item in self.endpoints:
if endpoint == item[0]:
return item
return None
def add_endpoint(self, endpoint):
"""Create a new endpoint to Interface instance.
Args:
endpoint(|hw_address|, :class:`.Interface`): A target endpoint.
"""
exists = self.get_endpoint(endpoint)
if not exists:
self.endpoints.append((endpoint, now()))
def delete_endpoint(self, endpoint):
"""Delete a existent endpoint in Interface instance.
Args:
endpoint (|hw_address|, :class:`.Interface`): A target endpoint.
"""
exists = self.get_endpoint(endpoint)
if exists:
self.endpoints.remove(exists)
def update_endpoint(self, endpoint):
"""Update or create new endpoint to Interface instance.
Args:
endpoint(|hw_address|, :class:`.Interface`): A target endpoint.
"""
exists = self.get_endpoint(endpoint)
if exists:
self.delete_endpoint(endpoint)
self.add_endpoint(endpoint)
def get_speed(self):
"""Return the link speed in bits per second, None otherwise.
Returns:
int: Link speed in bits per second.
"""
fts = self.features
pfts = PortFeatures
if fts and fts & pfts.OFPPF_10GB_FD:
return 10 * 10**9
elif fts and fts & (pfts.OFPPF_1GB_HD | pfts.OFPPF_1GB_FD):
return 10**9
elif fts and fts & (pfts.OFPPF_100MB_HD | pfts.OFPPF_100MB_FD):
return 100 * 10**6
elif fts and fts & (pfts.OFPPF_10MB_HD | pfts.OFPPF_10MB_FD):
return 10 * 10**6
else:
LOG.warning("No speed port %s, sw %s, feats %s", self.port_number,
self.switch.dpid[-3:], self.features)
return None
def get_hr_speed(self):
"""Return Human-Readable string for link speed.
Returns:
string: String with link speed. e.g: '350 Gbps' or '350 Mbps'.
"""
speed = self.get_speed()
if speed is None:
return ''
elif speed >= 10**9:
return '{} Gbps'.format(round(speed / 10**9))
return '{} Mbps'.format(round(speed / 10**6))
def as_dict(self):
"""Return a dictionary with Interface attributes.
Example of output:
.. code-block:: python3
{'id': '00:00:00:00:00:00:00:01:2',
'name': 'eth01',
'port_number': 2,
'mac': '00:7e:04:3b:c2:a6',
'switch': '00:00:00:00:00:00:00:01',
'type': 'interface',
'nni': False,
'uni': True,
'speed': '350 Mbps'}
Returns:
dict: Dictionary filled with interface attributes.
"""
return {'id': self.id,
'name': self.name,
'port_number': self.port_number,
'mac': self.address,
'switch': self.switch.dpid,
'type': 'interface',
'nni': self.nni,
'uni': self.uni,
'speed': self.get_hr_speed()}
def as_json(self):
"""Return a json with Interfaces attributes.
Example of output:
.. code-block:: json
{"mac": "00:7e:04:3b:c2:a6",
"switch": "00:00:00:00:00:00:00:01",
"type": "interface",
"name": "eth01",
"id": "00:00:00:00:00:00:00:01:2",
"port_number": 2,
"speed": "350 Mbps"}
Returns:
string: Json filled with interface attributes.
"""
return json.dumps(self.as_dict())
class Switch(object):
"""Switch class is a abstraction from switches.
A new Switch will be created every time the handshake process is done
(after receiving the first FeaturesReply). Considering this, the
:attr:`socket`, :attr:`connection_id`, :attr:`of_version` and
:attr:`features` need to be passed on init. But when the connection to the
switch is lost, then this attributes can be set to None (actually some of
them must be).
The :attr:`dpid` attribute will be the unique identifier of a Switch.
It is the :attr:`pyof.*.controller2switch.SwitchFeatures.datapath-id` that
defined by the OpenFlow Specification, it is a 64 bit field that should be
thought of as analogous to a Ethernet Switches bridge MAC, its a unique
identifier for the specific packet processing pipeline being managed. One
physical switch may have more than one datapath-id (think virtualization of
the switch).
:attr:`socket` is the request from a TCP connection, it represents the
effective connection between the switch and the controller.
:attr:`connection_id` is a tuple, composed by the ip and port of the
stabilished connection (if any). It will be used to help map the connection
to the Switch and vice-versa.
:attr:`ofp_version` is a string representing the accorded version of
python-openflow that will be used on the communication between the
Controller and the Switch.
:attr:`features` is an instance of
:class:`pyof.*.controller2switch.FeaturesReply` representing the current
featues of the switch.
"""
def __init__(self, dpid, connection=None, ofp_version='0x01',
features=None):
"""Contructor of switches have the below parameters.
Args:
dpid (|DPID|): datapath_id of the switch
connection (:class:`~.Connection`): Connection used by switch.
ofp_version (string): Current talked OpenFlow version.
features (|features_reply|): FeaturesReply instance.
"""
self.dpid = dpid
self.connection = connection
self.ofp_version = ofp_version
self.features = features
self.firstseen = now()
self.lastseen = now()
self.sent_xid = None
self.waiting_for_reply = False
self.request_timestamp = 0
#: Dict associating mac addresses to switch ports.
#: the key of this dict is a mac_address, and the value is a set
#: containing the ports of this switch in which that mac can be
#: found.
self.mac2port = {}
#: This flood_table will keep track of flood packets to avoid over
#: flooding on the network. Its key is a hash composed by
#: (eth_type, mac_src, mac_dst) and the value is the timestamp of
#: the last flood.
self.flood_table = {}
self.interfaces = {}
self.flows = []
self.description = {}
if connection:
connection.switch = self
def update_description(self, desc):
"""Update switch'descriptions from Switch instance.
Args:
desc (|desc_stats|):
Description Class with new values of switch's descriptions.
"""
self.description['manufacturer'] = desc.mfr_desc.value
self.description['hardware'] = desc.hw_desc.value
self.description['software'] = desc.sw_desc.value
self.description['serial'] = desc.serial_num.value
self.description['data_path'] = desc.dp_desc.value
@property
def id(self): # pylint: disable=invalid-name
"""Return id from Switch instance.
Returns:
string: the switch id is the data_path_id from switch.
"""
return "{}".format(self.dpid)
def disconnect(self):
"""Disconnect the switch instance."""
self.connection.close()
self.connection = None
LOG.info("Switch %s is disconnected", self.dpid)
def get_interface_by_port_no(self, port_no):
"""Get interface by port number from Switch instance.
Returns:
:class:`~.core.switch.Interface`: Interface from specific port.
"""
return self.interfaces.get(port_no)
def get_flow_by_id(self, flow_id):
"""Return a Flow using the flow_id given. None if not found in flows.
Args:
flow_id (int): identifier from specific flow stored.
"""
for flow in self.flows:
if flow_id == flow.id:
return flow
return None
def is_active(self):
"""Return true if the switch connection is alive."""
return (now() - self.lastseen).seconds <= CONNECTION_TIMEOUT
def is_connected(self):
"""Verify if the switch is connected to a socket."""
return (self.connection is not None and
self.connection.is_alive() and
self.connection.is_established() and self.is_active())
def update_connection(self, connection):
"""Update switch connection.
Args:
connection (:class:`~.core.switch.Connection`):
New connection to this instance of switch.
"""
self.connection = connection
self.connection.switch = self
def update_features(self, features):
"""Update :attr:`features` attribute."""
self.features = features
def send(self, buffer):
"""Send a buffer data to the real switch.
Args:
buffer (bytes): bytes to be sent to the switch throught its
connection.
"""
if self.connection:
self.connection.send(buffer)
def update_lastseen(self):
"""Update the lastseen attribute."""
self.lastseen = now()
def update_interface(self, interface):
"""Update a interface from switch instance.
Args:
interface (:class:`~kytos.core.switch.Interface`):
Interface object to be storeged.
"""
if interface.port_number not in self.interfaces:
self.interfaces[interface.port_number] = interface
def update_mac_table(self, mac, port_number):
"""Link the mac address with a port number.
Args:
mac (|hw_address|): mac address from switch.
port (int): port linked in mac address.
"""
if mac.value in self.mac2port:
self.mac2port[mac.value].add(port_number)
else:
self.mac2port[mac.value] = set([port_number])
def last_flood(self, ethernet_frame):
"""Return the timestamp when the ethernet_frame was flooded.
This method is usefull to check if a frame was flooded before or not.
Args:
ethernet_frame (|ethernet|): Ethernet instance to be verified.
Returns:
datetime.datetime.now:
Last time when the ethernet_frame was flooded.
"""
try:
return self.flood_table[ethernet_frame.get_hash()]
except KeyError:
return None
def should_flood(self, ethernet_frame):
"""Verify if the ethernet frame should flood.
Args:
ethernet_frame (|ethernet|): Ethernet instance to be verified.
Returns:
bool: True if the ethernet_frame should flood.
"""
last_flood = self.last_flood(ethernet_frame)
diff = (now() - last_flood).microseconds
return last_flood is None or diff > FLOOD_TIMEOUT
def update_flood_table(self, ethernet_frame):
"""Update a flood table using the given ethernet frame.
Args:
ethernet_frame (|ethernet|): Ethernet frame to be updated.
"""
self.flood_table[ethernet_frame.get_hash()] = now()
def where_is_mac(self, mac):
"""Return all ports from specific mac address.
Args:
mac (|hw_address|): Mac address from switch.
Returns:
:class:`list`: A list of ports. None otherswise.
"""
try:
return list(self.mac2port[mac.value])
except KeyError:
return None
def as_dict(self):
"""Return a dictionary with switch attributes.
Example of output:
.. code-block:: python3
{'id': '00:00:00:00:00:00:00:03:2',
'name': '00:00:00:00:00:00:00:03:2',
'dpid': '00:00:00:00:03',
'connection': connection,
'ofp_version': '0x01',
'type': 'switch',
'manufacturer': "",
'serial': "",
'hardware': "Open vSwitch",
'software': 2.5,
'data_path': ""
}
Returns:
dict: Dictionary filled with interface attributes.
"""
connection = ""
if self.connection is not None:
address = self.connection.address
port = self.connection.port
connection = "{}:{}".format(address, port)
return {'id': self.id,
'name': self.id,
'dpid': self.dpid,
'connection': connection,
'ofp_version': self.ofp_version,
'type': 'switch',
'manufacturer': self.description.get('manufacturer', ''),
'serial': self.description.get('serial', ''),
'hardware': self.description.get('hardware', ''),
'software': self.description.get('software'),
'data_path': self.description.get('data_path', ''),
'interfaces': {i.id: i.as_dict()
for i in self.interfaces.values()}
}
def as_json(self):
"""Return a json with switch'attributes.
Example of output:
.. code-block:: json
{"data_path": "",
"hardware": "Open vSwitch",
"dpid": "00:00:00:00:03",
"name": "00:00:00:00:00:00:00:03:2",
"manufacturer": "",
"serial": "",
"software": 2.5,
"id": "00:00:00:00:00:00:00:03:2",
"ofp_version": "0x01",
"type": "switch",
"connection": ""}
Returns:
string: Json filled with switch'attributes.
"""
return json.dumps(self.as_dict())
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_sequence
short_description: Create, drop, or alter a PostgreSQL sequence
description:
- Allows to create, drop or change the definition of a sequence generator.
version_added: '2.9'
options:
sequence:
description:
- The name of the sequence.
required: true
type: str
aliases:
- name
state:
description:
- The sequence state.
- If I(state=absent) other options will be ignored except of I(name) and
I(schema).
default: present
choices: [ absent, present ]
type: str
data_type:
description:
- Specifies the data type of the sequence. Valid types are bigint, integer,
and smallint. bigint is the default. The data type determines the default
minimum and maximum values of the sequence. For more info see the
documentation
U(https://www.postgresql.org/docs/current/sql-createsequence.html).
- Supported from PostgreSQL 10.
choices: [ bigint, integer, smallint ]
type: str
increment:
description:
- Increment specifies which value is added to the current sequence value
to create a new value.
- A positive value will make an ascending sequence, a negative one a
descending sequence. The default value is 1.
type: int
minvalue:
description:
- Minvalue determines the minimum value a sequence can generate. The
default for an ascending sequence is 1. The default for a descending
sequence is the minimum value of the data type.
type: int
aliases:
- min
maxvalue:
description:
- Maxvalue determines the maximum value for the sequence. The default for
an ascending sequence is the maximum
value of the data type. The default for a descending sequence is -1.
type: int
aliases:
- max
start:
description:
- Start allows the sequence to begin anywhere. The default starting value
is I(minvalue) for ascending sequences and I(maxvalue) for descending
ones.
type: int
cache:
description:
- Cache specifies how many sequence numbers are to be preallocated and
stored in memory for faster access. The minimum value is 1 (only one
value can be generated at a time, i.e., no cache), and this is also
the default.
type: int
cycle:
description:
- The cycle option allows the sequence to wrap around when the I(maxvalue)
or I(minvalue) has been reached by an ascending or descending sequence
respectively. If the limit is reached, the next number generated will be
the minvalue or maxvalue, respectively.
- If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
has reached its maximum value will return an error. False (NO CYCLE) is
the default.
type: bool
default: no
cascade:
description:
- Automatically drop objects that depend on the sequence, and in turn all
objects that depend on those objects.
- Ignored if I(state=present).
- Only used with I(state=absent).
type: bool
default: no
rename_to:
description:
- The new name for the I(sequence).
- Works only for existing sequences.
type: str
owner:
description:
- Set the owner for the I(sequence).
type: str
schema:
description:
- The schema of the I(sequence). This is be used to create and relocate
a I(sequence) in the given schema.
default: public
type: str
newschema:
description:
- The new schema for the I(sequence). Will be used for moving a
I(sequence) to another I(schema).
- Works only for existing sequences.
type: str
session_role:
description:
- Switch to session_role after connecting. The specified I(session_role)
must be a role that the current I(login_user) is a member of.
- Permissions checking for SQL commands is carried out as though
the I(session_role) were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- database
- login_db
notes:
- If you do not pass db parameter, sequence will be created in the database
named postgres.
seealso:
- module: postgresql_table
- module: postgresql_owner
- module: postgresql_privs
- module: postgresql_tablespace
- name: CREATE SEQUENCE reference
description: Complete reference of the CREATE SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-createsequence.html
- name: ALTER SEQUENCE reference
description: Complete reference of the ALTER SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-altersequence.html
- name: DROP SEQUENCE reference
description: Complete reference of the DROP SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-dropsequence.html
author:
- Tobias Birkefeld (@tcraxs)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create an ascending bigint sequence called foobar in the default
database
postgresql_sequence:
name: foobar
- name: Create an ascending integer sequence called foobar, starting at 101
postgresql_sequence:
name: foobar
data_type: integer
start: 101
- name: Create an descending sequence called foobar, starting at 101 and
preallocated 10 sequence numbers in cache
postgresql_sequence:
name: foobar
increment: -1
cache: 10
start: 101
- name: Create an ascending sequence called foobar, which cycle between 1 to 10
postgresql_sequence:
name: foobar
cycle: yes
min: 1
max: 10
- name: Create an ascending bigint sequence called foobar in the default
database with owner foobar
postgresql_sequence:
name: foobar
owner: foobar
- name: Rename an existing sequence named foo to bar
postgresql_sequence:
name: foo
rename_to: bar
- name: Change the schema of an existing sequence to foobar
postgresql_sequence:
name: foobar
newschema: foobar
- name: Change the owner of an existing sequence to foobar
postgresql_sequence:
name: foobar
owner: foobar
- name: Drop a sequence called foobar
postgresql_sequence:
name: foobar
state: absent
- name: Drop a sequence called foobar with cascade
postgresql_sequence:
name: foobar
cascade: yes
state: absent
'''
RETURN = r'''
state:
description: Sequence state at the end of execution.
returned: always
type: str
sample: 'present'
sequence:
description: Sequence name.
returned: always
type: str
sample: 'foobar'
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE SEQUENCE \"foo\"" ]
schema:
description: Name of the schema of the sequence
returned: always
type: str
sample: 'foo'
data_type:
description: Shows the current data type of the sequence.
returned: always
type: str
sample: 'bigint'
increment:
description: The value of increment of the sequence. A positive value will
make an ascending sequence, a negative one a descending
sequence.
returned: always
type: int
sample: '-1'
minvalue:
description: The value of minvalue of the sequence.
returned: always
type: int
sample: '1'
maxvalue:
description: The value of maxvalue of the sequence.
returned: always
type: int
sample: '9223372036854775807'
start:
description: The value of start of the sequence.
returned: always
type: int
sample: '12'
cycle:
description: Shows if the sequence cycle or not.
returned: always
type: str
sample: 'NO'
owner:
description: Shows the current owner of the sequence
after the successful run of the task.
returned: always
type: str
sample: 'postgres'
newname:
description: Shows the new sequence name after rename.
returned: on success
type: str
sample: 'barfoo'
newschema:
description: Shows the new schema of the sequence after schema change.
returned: on success
type: str
sample: 'foobar'
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import pg_quote_identifier
from ansible.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
class Sequence(object):
"""Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
Arguments:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
Attributes:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
changed (bool) -- something was changed after execution or not
executed_queries (list) -- executed queries
name (str) -- name of the sequence
owner (str) -- name of the owner of the sequence
schema (str) -- name of the schema (default: public)
data_type (str) -- data type of the sequence
start_value (int) -- value of the sequence start
minvalue (int) -- minimum value of the sequence
maxvalue (int) -- maximum value of the sequence
increment (int) -- increment value of the sequence
cycle (bool) -- sequence can cycle or not
new_name (str) -- name of the renamed sequence
new_schema (str) -- name of the new schema
exists (bool) -- sequence exists or not
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.executed_queries = []
self.name = self.module.params['sequence']
self.owner = ''
self.schema = self.module.params['schema']
self.data_type = ''
self.start_value = ''
self.minvalue = ''
self.maxvalue = ''
self.increment = ''
self.cycle = ''
self.new_name = ''
self.new_schema = ''
self.exists = False
# Collect info
self.get_info()
def get_info(self):
"""Getter to refresh and get sequence info"""
query = ("SELECT "
"s.sequence_schema AS schemaname, "
"s.sequence_name AS sequencename, "
"pg_get_userbyid(c.relowner) AS sequenceowner, "
"s.data_type::regtype AS data_type, "
"s.start_value AS start_value, "
"s.minimum_value AS min_value, "
"s.maximum_value AS max_value, "
"s.increment AS increment_by, "
"s.cycle_option AS cycle "
"FROM information_schema.sequences s "
"JOIN pg_class c ON c.relname = s.sequence_name "
"LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE NOT pg_is_other_temp_schema(n.oid) "
"AND c.relkind = 'S'::\"char\" "
"AND sequence_name = '%s' "
"AND sequence_schema = '%s'" % (self.name,
self.schema))
res = exec_sql(self, query, add_to_executed=False)
if not res:
self.exists = False
return False
if res:
self.exists = True
self.schema = res[0]['schemaname']
self.name = res[0]['sequencename']
self.owner = res[0]['sequenceowner']
self.data_type = res[0]['data_type']
self.start_value = res[0]['start_value']
self.minvalue = res[0]['min_value']
self.maxvalue = res[0]['max_value']
self.increment = res[0]['increment_by']
self.cycle = res[0]['cycle']
def create(self):
"""Implements CREATE SEQUENCE command behavior."""
query = ['CREATE SEQUENCE']
query.append(self.__add_schema())
if self.module.params.get('data_type'):
query.append('AS %s' % self.module.params['data_type'])
if self.module.params.get('increment'):
query.append('INCREMENT BY %s' % self.module.params['increment'])
if self.module.params.get('minvalue'):
query.append('MINVALUE %s' % self.module.params['minvalue'])
if self.module.params.get('maxvalue'):
query.append('MAXVALUE %s' % self.module.params['maxvalue'])
if self.module.params.get('start'):
query.append('START WITH %s' % self.module.params['start'])
if self.module.params.get('cache'):
query.append('CACHE %s' % self.module.params['cache'])
if self.module.params.get('cycle'):
query.append('CYCLE')
return exec_sql(self, ' '.join(query), ddl=True)
def drop(self):
"""Implements DROP SEQUENCE command behavior."""
query = ['DROP SEQUENCE']
query.append(self.__add_schema())
if self.module.params.get('cascade'):
query.append('CASCADE')
return exec_sql(self, ' '.join(query), ddl=True)
def rename(self):
"""Implements ALTER SEQUENCE RENAME TO command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('RENAME TO %s' % pg_quote_identifier(self.module.params['rename_to'], 'sequence'))
return exec_sql(self, ' '.join(query), ddl=True)
def set_owner(self):
"""Implements ALTER SEQUENCE OWNER TO command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('OWNER TO %s' % pg_quote_identifier(self.module.params['owner'], 'role'))
return exec_sql(self, ' '.join(query), ddl=True)
def set_schema(self):
"""Implements ALTER SEQUENCE SET SCHEMA command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('SET SCHEMA %s' % pg_quote_identifier(self.module.params['newschema'], 'schema'))
return exec_sql(self, ' '.join(query), ddl=True)
def __add_schema(self):
return '.'.join([pg_quote_identifier(self.schema, 'schema'),
pg_quote_identifier(self.name, 'sequence')])
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
sequence=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
increment=dict(type='int'),
minvalue=dict(type='int', aliases=['min']),
maxvalue=dict(type='int', aliases=['max']),
start=dict(type='int'),
cache=dict(type='int'),
cycle=dict(type='bool', default=False),
schema=dict(type='str', default='public'),
cascade=dict(type='bool', default=False),
rename_to=dict(type='str'),
owner=dict(type='str'),
newschema=dict(type='str'),
db=dict(type='str', default='', aliases=['login_db', 'database']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['rename_to', 'data_type'],
['rename_to', 'increment'],
['rename_to', 'minvalue'],
['rename_to', 'maxvalue'],
['rename_to', 'start'],
['rename_to', 'cache'],
['rename_to', 'cycle'],
['rename_to', 'cascade'],
['rename_to', 'owner'],
['rename_to', 'newschema'],
['cascade', 'data_type'],
['cascade', 'increment'],
['cascade', 'minvalue'],
['cascade', 'maxvalue'],
['cascade', 'start'],
['cascade', 'cache'],
['cascade', 'cycle'],
['cascade', 'owner'],
['cascade', 'newschema'],
]
)
# Note: we don't need to check mutually exclusive params here, because they are
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
# Change autocommit to False if check_mode:
autocommit = not module.check_mode
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
data = Sequence(module, cursor)
# Set defaults:
changed = False
# Create new sequence
if not data.exists and module.params['state'] == 'present':
if module.params.get('rename_to'):
module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
if module.params.get('newschema'):
module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
changed = data.create()
# Drop non-existing sequence
elif not data.exists and module.params['state'] == 'absent':
# Nothing to do
changed = False
# Drop existing sequence
elif data.exists and module.params['state'] == 'absent':
changed = data.drop()
# Rename sequence
if data.exists and module.params.get('rename_to'):
if data.name != module.params['rename_to']:
changed = data.rename()
if changed:
data.new_name = module.params['rename_to']
# Refresh information
if module.params['state'] == 'present':
data.get_info()
# Change owner, schema and settings
if module.params['state'] == 'present' and data.exists:
# change owner
if module.params.get('owner'):
if data.owner != module.params['owner']:
changed = data.set_owner()
# Set schema
if module.params.get('newschema'):
if data.schema != module.params['newschema']:
changed = data.set_schema()
if changed:
data.new_schema = module.params['newschema']
# Rollback if it's possible and check_mode:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
sequence=data.name,
queries=data.executed_queries,
schema=data.schema,
data_type=data.data_type,
increment=data.increment,
minvalue=data.minvalue,
maxvalue=data.maxvalue,
start=data.start_value,
cycle=data.cycle,
owner=data.owner,
)
if module.params['state'] == 'present':
if data.new_name:
kw['newname'] = data.new_name
if data.new_schema:
kw['newschema'] = data.new_schema
elif module.params['state'] == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import http.client
from oslo_serialization import jsonutils
from keystone.common.policies import project as pp
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
def _override_policy(policy_file):
# TODO(lbragstad): Remove this once the deprecated policies in
# keystone.common.policies.project have been removed. This is only
# here to make sure we test the new policies instead of the deprecated
# ones. Oslo.policy will OR deprecated policies with new policies to
# maintain compatibility and give operators a chance to update
# permissions or update policies without breaking users. This will
# cause these specific tests to fail since we're trying to correct this
# broken behavior with better scope checking.
with open(policy_file, 'w') as f:
overridden_policies = {
'identity:get_project_tag': (
pp.SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER
),
'identity:list_project_tags': (
pp.SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER
),
'identity:create_project_tag': (
pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN
),
'identity:update_project_tags': (
pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN
),
'identity:delete_project_tag': (
pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN
),
'identity:delete_project_tags': (
pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN
)
}
f.write(jsonutils.dumps(overridden_policies))
class _SystemUserTests(object):
def test_user_can_get_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.get(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_user_can_list_project_tags(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
r = c.get(
'/v3/projects/%s/tags' % project['id'], headers=self.headers
)
self.assertTrue(len(r.json['tags']) == 1)
self.assertEqual(tag, r.json['tags'][0])
class _SystemMemberAndReaderTagTests(object):
def test_user_cannot_create_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
update = {"tags": [uuid.uuid4().hex]}
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags' % project['id'], headers=self.headers,
json=update, expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class _DomainAndProjectUserTagTests(object):
def test_user_cannot_create_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
update = {"tags": [uuid.uuid4().hex]}
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags' % project['id'], headers=self.headers,
json=update, expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserTests,
_SystemMemberAndReaderTagTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserTests,
_SystemMemberAndReaderTagTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_member['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserTests):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.CREATED
)
def test_user_can_update_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
update = {"tags": [uuid.uuid4().hex]}
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags' % project['id'], headers=self.headers,
json=update,
expected_status_code=http.client.OK
)
def test_user_can_delete_project_tag(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers
)
class _DomainUserTagTests(object):
def test_user_can_get_tag_for_project_in_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.get(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_user_can_list_tags_for_project_in_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
r = c.get(
'/v3/projects/%s/tags' % project['id'], headers=self.headers
)
self.assertTrue(len(r.json['tags']) == 1)
self.assertEqual(tag, r.json['tags'][0])
def test_user_cannot_create_project_tag_outside_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_project_tag_outside_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
new_tag = uuid.uuid4().hex
update = {"tags": [new_tag]}
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags' % project['id'], headers=self.headers,
json=update, expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_project_tag_outside_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_get_tag_for_project_outside_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.get(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_list_tags_for_project_outside_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.get(
'/v3/projects/%s/tags' % project['id'],
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class _DomainMemberAndReaderTagTests(object):
def test_user_cannot_create_project_tag_in_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_project_tag_in_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
new_tag = uuid.uuid4().hex
update = {"tags": [new_tag]}
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags' % project['id'], headers=self.headers,
json=update, expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_project_tag_in_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class DomainAdminUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainUserTagTests):
def setUp(self):
super(DomainAdminUserTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
_override_policy(self.policy_file_name)
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_admin['password'],
domain_id=self.domain_id,
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_project_tag_in_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers, expected_status_code=http.client.CREATED
)
def test_user_can_update_project_tag_in_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
new_tag = uuid.uuid4().hex
update = {"tags": [new_tag]}
with self.test_client() as c:
r = c.put(
'/v3/projects/%s/tags' % project['id'], headers=self.headers,
json=update, expected_status_code=http.client.OK
)
self.assertTrue(len(r.json['tags']) == 1)
self.assertEqual(new_tag, r.json['tags'][0])
def test_user_can_delete_project_tag_in_domain(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers
)
class DomainMemberUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainUserTagTests,
_DomainMemberAndReaderTagTests):
def setUp(self):
super(DomainMemberUserTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
_override_policy(self.policy_file_name)
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_admin['password'],
domain_id=self.domain_id,
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class DomainReaderUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainUserTagTests,
_DomainMemberAndReaderTagTests):
def setUp(self):
super(DomainReaderUserTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
_override_policy(self.policy_file_name)
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_admin['password'],
domain_id=self.domain_id,
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class _ProjectUserTagTests(object):
def test_user_can_get_tag_for_project(self):
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(self.project_id, tag)
with self.test_client() as c:
c.get(
'/v3/projects/%s/tags/%s' % (self.project_id, tag),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_user_can_list_tags_for_project(self):
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(self.project_id, tag)
with self.test_client() as c:
r = c.get(
'/v3/projects/%s/tags' % self.project_id, headers=self.headers
)
self.assertTrue(len(r.json['tags']) == 1)
self.assertEqual(tag, r.json['tags'][0])
def test_user_cannot_create_tag_for_other_project(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_tag_for_other_project(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
update = {"tags": [uuid.uuid4().hex]}
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags' % project['id'], headers=self.headers,
json=update, expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_tag_for_other_project(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_get_tag_for_other_project(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.get(
'/v3/projects/%s/tags/%s' % (project['id'], tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_list_tags_for_other_project(self):
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(project['id'], tag)
with self.test_client() as c:
c.get(
'/v3/projects/%s/tags' % project['id'],
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class _ProjectMemberAndReaderTagTests(object):
def test_user_cannot_create_project_tag(self):
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (self.project_id, tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_project_tag(self):
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(self.project_id, tag)
update = {"tags": [uuid.uuid4().hex]}
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags' % self.project_id, headers=self.headers,
json=update, expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_project_tag(self):
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(self.project_id, tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (self.project_id, tag),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class ProjectAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_ProjectUserTagTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
_override_policy(self.policy_file_name)
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
project_id=self.bootstrapper.project_id
)
self.project_id = self.bootstrapper.project_id
auth = self.build_authentication_request(
user_id=self.user_id, password=self.bootstrapper.admin_password,
project_id=self.bootstrapper.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_project_tag(self):
tag = uuid.uuid4().hex
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags/%s' % (self.project_id, tag),
headers=self.headers, expected_status_code=http.client.CREATED
)
def test_user_can_update_project_tag(self):
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(self.project_id, tag)
update = {"tags": [uuid.uuid4().hex]}
with self.test_client() as c:
c.put(
'/v3/projects/%s/tags' % self.project_id, headers=self.headers,
json=update, expected_status_code=http.client.OK
)
def test_user_can_delete_project_tag(self):
tag = uuid.uuid4().hex
PROVIDERS.resource_api.create_project_tag(self.project_id, tag)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/tags/%s' % (self.project_id, tag),
headers=self.headers
)
class ProjectMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_ProjectUserTagTests,
_ProjectMemberAndReaderTagTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
_override_policy(self.policy_file_name)
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
self.project_id = project['id']
self.user_id = self.bootstrapper.admin_user_id
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=self.bootstrapper.admin_password,
project_id=self.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_ProjectUserTagTests,
_ProjectMemberAndReaderTagTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
_override_policy(self.policy_file_name)
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
self.project_id = project['id']
self.user_id = self.bootstrapper.admin_user_id
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=self.bootstrapper.admin_password,
project_id=self.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
|
|
# -*- test-case-name: twisted.conch.test.test_knownhosts -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An implementation of the OpenSSH known_hosts database.
@since: 8.2
"""
from __future__ import absolute_import, division
import hmac
from binascii import Error as DecodeError, b2a_base64, a2b_base64
from contextlib import closing
from hashlib import sha1
import sys
from zope.interface import implementer
from twisted.python.randbytes import secureRandom
from twisted.internet import defer
from twisted.python import log
from twisted.python.util import FancyEqMixin
from twisted.conch.interfaces import IKnownHostEntry
from twisted.conch.error import HostKeyChanged, UserRejectedKey, InvalidEntry
from twisted.conch.ssh.keys import Key, BadKeyError
from twisted.python.compat import nativeString
def _b64encode(s):
"""
Encode a binary string as base64 with no trailing newline.
@param s: The string to encode.
@type s: L{bytes}
@return: The base64-encoded string.
@rtype: L{bytes}
"""
return b2a_base64(s).strip()
def _extractCommon(string):
"""
Extract common elements of base64 keys from an entry in a hosts file.
@param string: A known hosts file entry (a single line).
@type string: L{bytes}
@return: a 4-tuple of hostname data (L{bytes}), ssh key type (L{bytes}), key
(L{Key}), and comment (L{bytes} or L{None}). The hostname data is
simply the beginning of the line up to the first occurrence of
whitespace.
@rtype: L{tuple}
"""
elements = string.split(None, 2)
if len(elements) != 3:
raise InvalidEntry()
hostnames, keyType, keyAndComment = elements
splitkey = keyAndComment.split(None, 1)
if len(splitkey) == 2:
keyString, comment = splitkey
comment = comment.rstrip(b"\n")
else:
keyString = splitkey[0]
comment = None
key = Key.fromString(a2b_base64(keyString))
return hostnames, keyType, key, comment
class _BaseEntry(object):
"""
Abstract base of both hashed and non-hashed entry objects, since they
represent keys and key types the same way.
@ivar keyType: The type of the key; either ssh-dss or ssh-rsa.
@type keyType: L{bytes}
@ivar publicKey: The server public key indicated by this line.
@type publicKey: L{twisted.conch.ssh.keys.Key}
@ivar comment: Trailing garbage after the key line.
@type comment: L{bytes}
"""
def __init__(self, keyType, publicKey, comment):
self.keyType = keyType
self.publicKey = publicKey
self.comment = comment
def matchesKey(self, keyObject):
"""
Check to see if this entry matches a given key object.
@param keyObject: A public key object to check.
@type keyObject: L{Key}
@return: C{True} if this entry's key matches C{keyObject}, C{False}
otherwise.
@rtype: L{bool}
"""
return self.publicKey == keyObject
@implementer(IKnownHostEntry)
class PlainEntry(_BaseEntry):
"""
A L{PlainEntry} is a representation of a plain-text entry in a known_hosts
file.
@ivar _hostnames: the list of all host-names associated with this entry.
@type _hostnames: L{list} of L{bytes}
"""
def __init__(self, hostnames, keyType, publicKey, comment):
self._hostnames = hostnames
super(PlainEntry, self).__init__(keyType, publicKey, comment)
@classmethod
def fromString(cls, string):
"""
Parse a plain-text entry in a known_hosts file, and return a
corresponding L{PlainEntry}.
@param string: a space-separated string formatted like "hostname
key-type base64-key-data comment".
@type string: L{bytes}
@raise DecodeError: if the key is not valid encoded as valid base64.
@raise InvalidEntry: if the entry does not have the right number of
elements and is therefore invalid.
@raise BadKeyError: if the key, once decoded from base64, is not
actually an SSH key.
@return: an IKnownHostEntry representing the hostname and key in the
input line.
@rtype: L{PlainEntry}
"""
hostnames, keyType, key, comment = _extractCommon(string)
self = cls(hostnames.split(b","), keyType, key, comment)
return self
def matchesHost(self, hostname):
"""
Check to see if this entry matches a given hostname.
@param hostname: A hostname or IP address literal to check against this
entry.
@type hostname: L{bytes}
@return: C{True} if this entry is for the given hostname or IP address,
C{False} otherwise.
@rtype: L{bool}
"""
return hostname in self._hostnames
def toString(self):
"""
Implement L{IKnownHostEntry.toString} by recording the comma-separated
hostnames, key type, and base-64 encoded key.
@return: The string representation of this entry, with unhashed hostname
information.
@rtype: L{bytes}
"""
fields = [b','.join(self._hostnames),
self.keyType,
_b64encode(self.publicKey.blob())]
if self.comment is not None:
fields.append(self.comment)
return b' '.join(fields)
@implementer(IKnownHostEntry)
class UnparsedEntry(object):
"""
L{UnparsedEntry} is an entry in a L{KnownHostsFile} which can't actually be
parsed; therefore it matches no keys and no hosts.
"""
def __init__(self, string):
"""
Create an unparsed entry from a line in a known_hosts file which cannot
otherwise be parsed.
"""
self._string = string
def matchesHost(self, hostname):
"""
Always returns False.
"""
return False
def matchesKey(self, key):
"""
Always returns False.
"""
return False
def toString(self):
"""
Returns the input line, without its newline if one was given.
@return: The string representation of this entry, almost exactly as was
used to initialize this entry but without a trailing newline.
@rtype: L{bytes}
"""
return self._string.rstrip(b"\n")
def _hmacedString(key, string):
"""
Return the SHA-1 HMAC hash of the given key and string.
@param key: The HMAC key.
@type key: L{bytes}
@param string: The string to be hashed.
@type string: L{bytes}
@return: The keyed hash value.
@rtype: L{bytes}
"""
hash = hmac.HMAC(key, digestmod=sha1)
hash.update(string)
return hash.digest()
@implementer(IKnownHostEntry)
class HashedEntry(_BaseEntry, FancyEqMixin):
"""
A L{HashedEntry} is a representation of an entry in a known_hosts file
where the hostname has been hashed and salted.
@ivar _hostSalt: the salt to combine with a hostname for hashing.
@ivar _hostHash: the hashed representation of the hostname.
@cvar MAGIC: the 'hash magic' string used to identify a hashed line in a
known_hosts file as opposed to a plaintext one.
"""
MAGIC = b'|1|'
compareAttributes = (
"_hostSalt", "_hostHash", "keyType", "publicKey", "comment")
def __init__(self, hostSalt, hostHash, keyType, publicKey, comment):
self._hostSalt = hostSalt
self._hostHash = hostHash
super(HashedEntry, self).__init__(keyType, publicKey, comment)
@classmethod
def fromString(cls, string):
"""
Load a hashed entry from a string representing a line in a known_hosts
file.
@param string: A complete single line from a I{known_hosts} file,
formatted as defined by OpenSSH.
@type string: L{bytes}
@raise DecodeError: if the key, the hostname, or the is not valid
encoded as valid base64
@raise InvalidEntry: if the entry does not have the right number of
elements and is therefore invalid, or the host/hash portion contains
more items than just the host and hash.
@raise BadKeyError: if the key, once decoded from base64, is not
actually an SSH key.
@return: The newly created L{HashedEntry} instance, initialized with the
information from C{string}.
"""
stuff, keyType, key, comment = _extractCommon(string)
saltAndHash = stuff[len(cls.MAGIC):].split(b"|")
if len(saltAndHash) != 2:
raise InvalidEntry()
hostSalt, hostHash = saltAndHash
self = cls(a2b_base64(hostSalt), a2b_base64(hostHash),
keyType, key, comment)
return self
def matchesHost(self, hostname):
"""
Implement L{IKnownHostEntry.matchesHost} to compare the hash of the
input to the stored hash.
@param hostname: A hostname or IP address literal to check against this
entry.
@type hostname: L{bytes}
@return: C{True} if this entry is for the given hostname or IP address,
C{False} otherwise.
@rtype: L{bool}
"""
return (_hmacedString(self._hostSalt, hostname) == self._hostHash)
def toString(self):
"""
Implement L{IKnownHostEntry.toString} by base64-encoding the salt, host
hash, and key.
@return: The string representation of this entry, with the hostname part
hashed.
@rtype: L{bytes}
"""
fields = [self.MAGIC + b'|'.join([_b64encode(self._hostSalt),
_b64encode(self._hostHash)]),
self.keyType,
_b64encode(self.publicKey.blob())]
if self.comment is not None:
fields.append(self.comment)
return b' '.join(fields)
class KnownHostsFile(object):
"""
A structured representation of an OpenSSH-format ~/.ssh/known_hosts file.
@ivar _added: A list of L{IKnownHostEntry} providers which have been added
to this instance in memory but not yet saved.
@ivar _clobber: A flag indicating whether the current contents of the save
path will be disregarded and potentially overwritten or not. If
C{True}, this will be done. If C{False}, entries in the save path will
be read and new entries will be saved by appending rather than
overwriting.
@type _clobber: L{bool}
@ivar _savePath: See C{savePath} parameter of L{__init__}.
"""
def __init__(self, savePath):
"""
Create a new, empty KnownHostsFile.
Unless you want to erase the current contents of C{savePath}, you want
to use L{KnownHostsFile.fromPath} instead.
@param savePath: The L{FilePath} to which to save new entries.
@type savePath: L{FilePath}
"""
self._added = []
self._savePath = savePath
self._clobber = True
@property
def savePath(self):
"""
@see: C{savePath} parameter of L{__init__}
"""
return self._savePath
def iterentries(self):
"""
Iterate over the host entries in this file.
@return: An iterable the elements of which provide L{IKnownHostEntry}.
There is an element for each entry in the file as well as an element
for each added but not yet saved entry.
@rtype: iterable of L{IKnownHostEntry} providers
"""
for entry in self._added:
yield entry
if self._clobber:
return
try:
fp = self._savePath.open()
except IOError:
return
with fp:
for line in fp:
try:
if line.startswith(HashedEntry.MAGIC):
entry = HashedEntry.fromString(line)
else:
entry = PlainEntry.fromString(line)
except (DecodeError, InvalidEntry, BadKeyError):
entry = UnparsedEntry(line)
yield entry
def hasHostKey(self, hostname, key):
"""
Check for an entry with matching hostname and key.
@param hostname: A hostname or IP address literal to check for.
@type hostname: L{bytes}
@param key: The public key to check for.
@type key: L{Key}
@return: C{True} if the given hostname and key are present in this file,
C{False} if they are not.
@rtype: L{bool}
@raise HostKeyChanged: if the host key found for the given hostname
does not match the given key.
"""
for lineidx, entry in enumerate(self.iterentries(), -len(self._added)):
if entry.matchesHost(hostname):
if entry.matchesKey(key):
return True
else:
# Notice that lineidx is 0-based but HostKeyChanged.lineno
# is 1-based.
if lineidx < 0:
line = None
path = None
else:
line = lineidx + 1
path = self._savePath
raise HostKeyChanged(entry, path, line)
return False
def verifyHostKey(self, ui, hostname, ip, key):
"""
Verify the given host key for the given IP and host, asking for
confirmation from, and notifying, the given UI about changes to this
file.
@param ui: The user interface to request an IP address from.
@param hostname: The hostname that the user requested to connect to.
@param ip: The string representation of the IP address that is actually
being connected to.
@param key: The public key of the server.
@return: a L{Deferred} that fires with True when the key has been
verified, or fires with an errback when the key either cannot be
verified or has changed.
@rtype: L{Deferred}
"""
hhk = defer.maybeDeferred(self.hasHostKey, hostname, key)
def gotHasKey(result):
if result:
if not self.hasHostKey(ip, key):
ui.warn("Warning: Permanently added the %s host key for "
"IP address '%s' to the list of known hosts." %
(key.type(), nativeString(ip)))
self.addHostKey(ip, key)
self.save()
return result
else:
def promptResponse(response):
if response:
self.addHostKey(hostname, key)
self.addHostKey(ip, key)
self.save()
return response
else:
raise UserRejectedKey()
prompt = (
"The authenticity of host '%s (%s)' "
"can't be established.\n"
"RSA key fingerprint is %s.\n"
"Are you sure you want to continue connecting (yes/no)? " %
(nativeString(hostname), nativeString(ip),
key.fingerprint()))
proceed = ui.prompt(prompt.encode(sys.getdefaultencoding()))
return proceed.addCallback(promptResponse)
return hhk.addCallback(gotHasKey)
def addHostKey(self, hostname, key):
"""
Add a new L{HashedEntry} to the key database.
Note that you still need to call L{KnownHostsFile.save} if you wish
these changes to be persisted.
@param hostname: A hostname or IP address literal to associate with the
new entry.
@type hostname: L{bytes}
@param key: The public key to associate with the new entry.
@type key: L{Key}
@return: The L{HashedEntry} that was added.
@rtype: L{HashedEntry}
"""
salt = secureRandom(20)
keyType = key.sshType()
entry = HashedEntry(salt, _hmacedString(salt, hostname),
keyType, key, None)
self._added.append(entry)
return entry
def save(self):
"""
Save this L{KnownHostsFile} to the path it was loaded from.
"""
p = self._savePath.parent()
if not p.isdir():
p.makedirs()
if self._clobber:
mode = "wb"
else:
mode = "ab"
with self._savePath.open(mode) as hostsFileObj:
if self._added:
hostsFileObj.write(
b"\n".join([entry.toString() for entry in self._added]) +
b"\n")
self._added = []
self._clobber = False
@classmethod
def fromPath(cls, path):
"""
Create a new L{KnownHostsFile}, potentially reading existing known
hosts information from the given file.
@param path: A path object to use for both reading contents from and
later saving to. If no file exists at this path, it is not an
error; a L{KnownHostsFile} with no entries is returned.
@type path: L{FilePath}
@return: A L{KnownHostsFile} initialized with entries from C{path}.
@rtype: L{KnownHostsFile}
"""
knownHosts = cls(path)
knownHosts._clobber = False
return knownHosts
class ConsoleUI(object):
"""
A UI object that can ask true/false questions and post notifications on the
console, to be used during key verification.
"""
def __init__(self, opener):
"""
@param opener: A no-argument callable which should open a console
binary-mode file-like object to be used for reading and writing.
This initializes the C{opener} attribute.
@type opener: callable taking no arguments and returning a read/write
file-like object
"""
self.opener = opener
def prompt(self, text):
"""
Write the given text as a prompt to the console output, then read a
result from the console input.
@param text: Something to present to a user to solicit a yes or no
response.
@type text: L{bytes}
@return: a L{Deferred} which fires with L{True} when the user answers
'yes' and L{False} when the user answers 'no'. It may errback if
there were any I/O errors.
"""
d = defer.succeed(None)
def body(ignored):
with closing(self.opener()) as f:
f.write(text)
while True:
answer = f.readline().strip().lower()
if answer == b'yes':
return True
elif answer == b'no':
return False
else:
f.write(b"Please type 'yes' or 'no': ")
return d.addCallback(body)
def warn(self, text):
"""
Notify the user (non-interactively) of the provided text, by writing it
to the console.
@param text: Some information the user is to be made aware of.
@type text: L{bytes}
"""
try:
with closing(self.opener()) as f:
f.write(text)
except:
log.err()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitAuthorizationsOperations:
"""ExpressRouteCircuitAuthorizationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitAuthorization":
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitAuthorization, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitAuthorization
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "_models.ExpressRouteCircuitAuthorization",
**kwargs: Any
) -> "_models.ExpressRouteCircuitAuthorization":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "_models.ExpressRouteCircuitAuthorization",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitAuthorization"]:
"""Creates or updates an authorization in the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or update express route
circuit authorization operation.
:type authorization_parameters: ~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitAuthorization
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitAuthorization or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitAuthorization]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
authorization_parameters=authorization_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AuthorizationListResult"]:
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AuthorizationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.AuthorizationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AuthorizationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AuthorizationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'} # type: ignore
|
|
# module 're' -- A collection of regular expression operations
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It's 8-bit clean: the strings being processed may
contain both null bytes and characters whose high bit is set. Regular
expression pattern strings may not contain null bytes, but can specify
the null byte using the \\number notation. Characters with the high
bit set may be included.
Regular expressions can contain both special and ordinary
characters. Most ordinary characters, like "A", "a", or "0", are the
simplest regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsx) Set the I, L, M, S, or X flag for the RE.
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\\number Matches the contents of the group of the same number.
\\A Matches only at the start of the string.
\\Z Matches only at the end of the string.
\\b Matches the empty string, but only at the start or end of a word.
\\B Matches the empty string, but not at the start or end of a word.
\\d Matches any decimal digit; equivalent to the set [0-9].
\\D Matches any non-digit character; equivalent to the set [^0-9].
\\s Matches any whitespace character; equivalent to [ \\t\\n\\r\\f\\v].
\\S Matches any non-whitespace character; equiv. to [^ \\t\\n\\r\\f\\v].
\\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\\W Matches the complement of \\w.
\\\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
compile Compile a pattern into a RegexObject.
escape Backslash all non-alphanumerics in a string.
This module exports the following classes:
RegexObject Holds a compiled regular expression pattern.
MatchObject Contains information about pattern matches.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines as well as the string.
"$" matches the end of lines as well as the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
This module also defines an exception 'error'.
"""
import sys
from pcre import *
# XXX This module is deprecated as of Python 2.3, and should be removed
# in the version that follows 2.3.
import warnings as _warnings
_warnings.warn("Please use the 're' module, not the 'pre' module",
DeprecationWarning)
__all__ = ["match","search","sub","subn","split","findall","escape","compile",
"I","L","M","S","X","IGNORECASE","LOCALE","MULTILINE","DOTALL",
"VERBOSE","error"]
#
# First, the public part of the interface:
#
# pcre.error and re.error should be the same, since exceptions can be
# raised from either module.
# compilation flags
I = IGNORECASE
L = LOCALE
M = MULTILINE
S = DOTALL
X = VERBOSE
#
#
#
_cache = {}
_MAXCACHE = 20
def _cachecompile(pattern, flags=0):
key = (pattern, flags)
try:
return _cache[key]
except KeyError:
pass
value = compile(pattern, flags)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[key] = value
return value
def match(pattern, string, flags=0):
"""match (pattern, string[, flags]) -> MatchObject or None
If zero or more characters at the beginning of string match the
regular expression pattern, return a corresponding MatchObject
instance. Return None if the string does not match the pattern;
note that this is different from a zero-length match.
Note: If you want to locate a match anywhere in string, use
search() instead.
"""
return _cachecompile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""search (pattern, string[, flags]) -> MatchObject or None
Scan through string looking for a location where the regular
expression pattern produces a match, and return a corresponding
MatchObject instance. Return None if no position in the string
matches the pattern; note that this is different from finding a
zero-length match at some point in the string.
"""
return _cachecompile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0):
"""sub(pattern, repl, string[, count=0]) -> string
Return the string obtained by replacing the leftmost
non-overlapping occurrences of pattern in string by the
replacement repl. If the pattern isn't found, string is returned
unchanged. repl can be a string or a function; if a function, it
is called for every non-overlapping occurrence of pattern. The
function takes a single match object argument, and returns the
replacement string.
The pattern may be a string or a regex object; if you need to
specify regular expression flags, you must use a regex object, or
use embedded modifiers in a pattern; e.g.
sub("(?i)b+", "x", "bbbb BBBB") returns 'x x'.
The optional argument count is the maximum number of pattern
occurrences to be replaced; count must be a non-negative integer,
and the default value of 0 means to replace all occurrences.
"""
if type(pattern) == type(''):
pattern = _cachecompile(pattern)
return pattern.sub(repl, string, count)
def subn(pattern, repl, string, count=0):
"""subn(pattern, repl, string[, count=0]) -> (string, num substitutions)
Perform the same operation as sub(), but return a tuple
(new_string, number_of_subs_made).
"""
if type(pattern) == type(''):
pattern = _cachecompile(pattern)
return pattern.subn(repl, string, count)
def split(pattern, string, maxsplit=0):
"""split(pattern, string[, maxsplit=0]) -> list of strings
Split string by the occurrences of pattern. If capturing
parentheses are used in pattern, then the text of all groups in
the pattern are also returned as part of the resulting list. If
maxsplit is nonzero, at most maxsplit splits occur, and the
remainder of the string is returned as the final element of the
list.
"""
if type(pattern) == type(''):
pattern = _cachecompile(pattern)
return pattern.split(string, maxsplit)
def findall(pattern, string):
"""findall(pattern, string) -> list
Return a list of all non-overlapping matches of pattern in
string. If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern has
more than one group. Empty matches are included in the result.
"""
if type(pattern) == type(''):
pattern = _cachecompile(pattern)
return pattern.findall(string)
def escape(pattern):
"""escape(string) -> string
Return string with all non-alphanumerics backslashed; this is
useful if you want to match an arbitrary literal string that may
have regular expression metacharacters in it.
"""
result = list(pattern)
for i in range(len(pattern)):
char = pattern[i]
if not char.isalnum():
if char=='\000': result[i] = '\\000'
else: result[i] = '\\'+char
return ''.join(result)
def compile(pattern, flags=0):
"""compile(pattern[, flags]) -> RegexObject
Compile a regular expression pattern into a regular expression
object, which can be used for matching using its match() and
search() methods.
"""
groupindex={}
code=pcre_compile(pattern, flags, groupindex)
return RegexObject(pattern, flags, code, groupindex)
#
# Class definitions
#
class RegexObject:
"""Holds a compiled regular expression pattern.
Methods:
match Match the pattern to the beginning of a string.
search Search a string for the presence of the pattern.
sub Substitute occurrences of the pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of the pattern.
findall Find all occurrences of the pattern in a string.
"""
def __init__(self, pattern, flags, code, groupindex):
self.code = code
self.flags = flags
self.pattern = pattern
self.groupindex = groupindex
def search(self, string, pos=0, endpos=None):
"""search(string[, pos][, endpos]) -> MatchObject or None
Scan through string looking for a location where this regular
expression produces a match, and return a corresponding
MatchObject instance. Return None if no position in the string
matches the pattern; note that this is different from finding
a zero-length match at some point in the string. The optional
pos and endpos parameters have the same meaning as for the
match() method.
"""
if endpos is None or endpos>len(string):
endpos=len(string)
if endpos<pos: endpos=pos
regs = self.code.match(string, pos, endpos, 0)
if regs is None:
return None
self._num_regs=len(regs)
return MatchObject(self,
string,
pos, endpos,
regs)
def match(self, string, pos=0, endpos=None):
"""match(string[, pos][, endpos]) -> MatchObject or None
If zero or more characters at the beginning of string match
this regular expression, return a corresponding MatchObject
instance. Return None if the string does not match the
pattern; note that this is different from a zero-length match.
Note: If you want to locate a match anywhere in string, use
search() instead.
The optional second parameter pos gives an index in the string
where the search is to start; it defaults to 0. This is not
completely equivalent to slicing the string; the '' pattern
character matches at the real beginning of the string and at
positions just after a newline, but not necessarily at the
index where the search is to start.
The optional parameter endpos limits how far the string will
be searched; it will be as if the string is endpos characters
long, so only the characters from pos to endpos will be
searched for a match.
"""
if endpos is None or endpos>len(string):
endpos=len(string)
if endpos<pos: endpos=pos
regs = self.code.match(string, pos, endpos, ANCHORED)
if regs is None:
return None
self._num_regs=len(regs)
return MatchObject(self,
string,
pos, endpos,
regs)
def sub(self, repl, string, count=0):
"""sub(repl, string[, count=0]) -> string
Return the string obtained by replacing the leftmost
non-overlapping occurrences of the compiled pattern in string
by the replacement repl. If the pattern isn't found, string is
returned unchanged.
Identical to the sub() function, using the compiled pattern.
"""
return self.subn(repl, string, count)[0]
def subn(self, repl, source, count=0):
"""subn(repl, string[, count=0]) -> tuple
Perform the same operation as sub(), but return a tuple
(new_string, number_of_subs_made).
"""
if count < 0:
raise error, "negative substitution count"
if count == 0:
count = sys.maxint
n = 0 # Number of matches
pos = 0 # Where to start searching
lastmatch = -1 # End of last match
results = [] # Substrings making up the result
end = len(source)
if type(repl) is type(''):
# See if repl contains group references (if it does,
# pcre_expand will attempt to call _Dummy.group, which
# results in a TypeError)
try:
repl = pcre_expand(_Dummy, repl)
except (error, TypeError):
m = MatchObject(self, source, 0, end, [])
repl = lambda m, repl=repl, expand=pcre_expand: expand(m, repl)
else:
m = None
else:
m = MatchObject(self, source, 0, end, [])
match = self.code.match
append = results.append
while n < count and pos <= end:
regs = match(source, pos, end, 0)
if not regs:
break
self._num_regs = len(regs)
i, j = regs[0]
if i == j == lastmatch:
# Empty match adjacent to previous match
pos = pos + 1
append(source[lastmatch:pos])
continue
if pos < i:
append(source[pos:i])
if m:
m.pos = pos
m.regs = regs
append(repl(m))
else:
append(repl)
pos = lastmatch = j
if i == j:
# Last match was empty; don't try here again
pos = pos + 1
append(source[lastmatch:pos])
n = n + 1
append(source[pos:])
return (''.join(results), n)
def split(self, source, maxsplit=0):
"""split(source[, maxsplit=0]) -> list of strings
Split string by the occurrences of the compiled pattern. If
capturing parentheses are used in the pattern, then the text
of all groups in the pattern are also returned as part of the
resulting list. If maxsplit is nonzero, at most maxsplit
splits occur, and the remainder of the string is returned as
the final element of the list.
"""
if maxsplit < 0:
raise error, "negative split count"
if maxsplit == 0:
maxsplit = sys.maxint
n = 0
pos = 0
lastmatch = 0
results = []
end = len(source)
match = self.code.match
append = results.append
while n < maxsplit:
regs = match(source, pos, end, 0)
if not regs:
break
i, j = regs[0]
if i == j:
# Empty match
if pos >= end:
break
pos = pos+1
continue
append(source[lastmatch:i])
rest = regs[1:]
if rest:
for a, b in rest:
if a == -1 or b == -1:
group = None
else:
group = source[a:b]
append(group)
pos = lastmatch = j
n = n + 1
append(source[lastmatch:])
return results
def findall(self, source):
"""findall(source) -> list
Return a list of all non-overlapping matches of the compiled
pattern in string. If one or more groups are present in the
pattern, return a list of groups; this will be a list of
tuples if the pattern has more than one group. Empty matches
are included in the result.
"""
pos = 0
end = len(source)
results = []
match = self.code.match
append = results.append
while pos <= end:
regs = match(source, pos, end, 0)
if not regs:
break
i, j = regs[0]
rest = regs[1:]
if not rest:
gr = source[i:j]
elif len(rest) == 1:
a, b = rest[0]
gr = source[a:b]
else:
gr = []
for (a, b) in rest:
gr.append(source[a:b])
gr = tuple(gr)
append(gr)
pos = max(j, pos+1)
return results
# The following 3 functions were contributed by Mike Fletcher, and
# allow pickling and unpickling of RegexObject instances.
def __getinitargs__(self):
return (None,None,None,None) # any 4 elements, to work around
# problems with the
# pickle/cPickle modules not yet
# ignoring the __init__ function
def __getstate__(self):
return self.pattern, self.flags, self.groupindex
def __setstate__(self, statetuple):
self.pattern = statetuple[0]
self.flags = statetuple[1]
self.groupindex = statetuple[2]
self.code = pcre_compile(*statetuple)
class _Dummy:
# Dummy class used by _subn_string(). Has 'group' to avoid core dump.
group = None
class MatchObject:
"""Holds a compiled regular expression pattern.
Methods:
start Return the index of the start of a matched substring.
end Return the index of the end of a matched substring.
span Return a tuple of (start, end) of a matched substring.
groups Return a tuple of all the subgroups of the match.
group Return one or more subgroups of the match.
groupdict Return a dictionary of all the named subgroups of the match.
"""
def __init__(self, re, string, pos, endpos, regs):
self.re = re
self.string = string
self.pos = pos
self.endpos = endpos
self.regs = regs
def start(self, g = 0):
"""start([group=0]) -> int or None
Return the index of the start of the substring matched by
group; group defaults to zero (meaning the whole matched
substring). Return -1 if group exists but did not contribute
to the match.
"""
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, 'group %s is undefined' % `g`
return self.regs[g][0]
def end(self, g = 0):
"""end([group=0]) -> int or None
Return the indices of the end of the substring matched by
group; group defaults to zero (meaning the whole matched
substring). Return -1 if group exists but did not contribute
to the match.
"""
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, 'group %s is undefined' % `g`
return self.regs[g][1]
def span(self, g = 0):
"""span([group=0]) -> tuple
Return the 2-tuple (m.start(group), m.end(group)). Note that
if group did not contribute to the match, this is (-1,
-1). Group defaults to zero (meaning the whole matched
substring).
"""
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, 'group %s is undefined' % `g`
return self.regs[g]
def groups(self, default=None):
"""groups([default=None]) -> tuple
Return a tuple containing all the subgroups of the match, from
1 up to however many groups are in the pattern. The default
argument is used for groups that did not participate in the
match.
"""
result = []
for g in range(1, self.re._num_regs):
a, b = self.regs[g]
if a == -1 or b == -1:
result.append(default)
else:
result.append(self.string[a:b])
return tuple(result)
def group(self, *groups):
"""group([group1, group2, ...]) -> string or tuple
Return one or more subgroups of the match. If there is a
single argument, the result is a single string; if there are
multiple arguments, the result is a tuple with one item per
argument. Without arguments, group1 defaults to zero (i.e. the
whole match is returned). If a groupN argument is zero, the
corresponding return value is the entire matching string; if
it is in the inclusive range [1..99], it is the string
matching the the corresponding parenthesized group. If a group
number is negative or larger than the number of groups defined
in the pattern, an IndexError exception is raised. If a group
is contained in a part of the pattern that did not match, the
corresponding result is None. If a group is contained in a
part of the pattern that matched multiple times, the last
match is returned.
If the regular expression uses the (?P<name>...) syntax, the
groupN arguments may also be strings identifying groups by
their group name. If a string argument is not used as a group
name in the pattern, an IndexError exception is raised.
"""
if len(groups) == 0:
groups = (0,)
result = []
for g in groups:
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, 'group %s is undefined' % `g`
if g >= len(self.regs):
raise IndexError, 'group %s is undefined' % `g`
a, b = self.regs[g]
if a == -1 or b == -1:
result.append(None)
else:
result.append(self.string[a:b])
if len(result) > 1:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return ()
def groupdict(self, default=None):
"""groupdict([default=None]) -> dictionary
Return a dictionary containing all the named subgroups of the
match, keyed by the subgroup name. The default argument is
used for groups that did not participate in the match.
"""
dict = {}
for name, index in self.re.groupindex.items():
a, b = self.regs[index]
if a == -1 or b == -1:
dict[name] = default
else:
dict[name] = self.string[a:b]
return dict
|
|
#
# ColorBar.py -- color bar widget
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import math
import numpy
from ginga.gtkw import gtksel
import gtk, gobject, cairo
from ginga.misc import Callback
from ginga import RGBMap
class ColorBarError(Exception):
pass
# Create a GTK+ widget on which we will draw using Cairo
class ColorBar(gtk.DrawingArea, Callback.Callbacks):
# Draw in response to an expose-event
#__gsignals__ = { "expose-event": "override" }
def __init__(self, logger, rgbmap=None, link=False):
gtk.DrawingArea.__init__(self)
Callback.Callbacks.__init__(self)
self.surface = None
self.logger = logger
self.link_rgbmap = link
if not rgbmap:
rgbmap = RGBMap.RGBMapper(logger)
self.set_rgbmap(rgbmap)
self._start_x = 0
# for drawing range
self.t_showrange = True
self.t_font = 'Sans Serif'
self.t_fontsize = 10
self.t_spacing = 40
self.loval = 0
self.hival = 0
self._interval = {}
self._avg_pixels_per_range_num = 70
self.mark_pos = None
# For callbacks
for name in ('motion', 'scroll'):
self.enable_callback(name)
if not gtksel.have_gtk3:
self.connect("expose_event", self.expose_event)
else:
self.connect("draw", self.draw_event)
self.connect("configure_event", self.configure_event)
self.connect("size-request", self.size_request)
self.connect("motion_notify_event", self.motion_notify_event)
self.connect("button_press_event", self.button_press_event)
self.connect("button_release_event", self.button_release_event)
self.connect("scroll_event", self.scroll_event)
mask = self.get_events()
self.set_events(mask
| gtk.gdk.EXPOSURE_MASK
| gtk.gdk.BUTTON_PRESS_MASK
| gtk.gdk.BUTTON_RELEASE_MASK
| gtk.gdk.POINTER_MOTION_MASK
| gtk.gdk.POINTER_MOTION_HINT_MASK
| gtk.gdk.SCROLL_MASK)
def get_rgbmap(self):
return self.rgbmap
def set_rgbmap(self, rgbmap):
self.rgbmap = rgbmap
# TODO: figure out if we can get rid of this link option
if self.link_rgbmap:
rgbmap.add_callback('changed', self.rgbmap_cb)
self.redraw()
# TODO: deprecate these two?
def set_cmap(self, cm):
self.rgbmap.set_cmap(cm)
self.redraw()
def set_imap(self, im, reset=False):
self.rgbmap.set_imap(im)
self.redraw()
def set_range(self, loval, hival, redraw=True):
self.loval = loval
self.hival = hival
# Calculate reasonable spacing for range numbers
text = "%d" % (int(hival))
try:
win = self.get_window()
if win != None:
cr = win.cairo_create()
a, b, _wd, _ht, _i, _j = cr.text_extents(text)
self._avg_pixels_per_range_num = self.t_spacing + _wd
except Exception as e:
self.logger.error("Error getting text extents: %s" % (
str(e)))
if self.t_showrange and redraw:
self.redraw()
def configure_event(self, widget, event):
self.surface = None
rect = widget.get_allocation()
x, y, width, height = rect.x, rect.y, rect.width, rect.height
arr8 = numpy.zeros(height*width*4).astype(numpy.uint8)
stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_RGB24,
width)
surface = cairo.ImageSurface.create_for_data(arr8,
cairo.FORMAT_RGB24,
width, height, stride)
self.surface = surface
self.width = width
self.height = height
# calculate intervals for range numbers
nums = max(int(width // self._avg_pixels_per_range_num), 1)
spacing = 256 // nums
self._interval = {}
for i in range(nums):
self._interval[i*spacing] = True
self.logger.debug("nums=%d spacing=%d intervals=%s" % (
nums, spacing, self._interval))
self.redraw()
def size_request(self, widget, requisition):
"""Callback function to request our desired size.
"""
requisition.width = -1
requisition.height = 15
return True
# For Gtk3
def draw_event(self, widget, cr):
if self.surface != None:
self.logger.debug("surface is %s" % self.surface)
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
def expose_event(self, widget, event):
# When an area of the window is exposed, we just copy out of the
# server-side, off-screen pixmap to that area.
x , y, width, height = event.area
self.logger.debug("surface is %s" % self.surface)
if self.surface != None:
win = widget.get_window()
cr = win.cairo_create()
# set clip area for exposed region
cr.rectangle(x, y, width, height)
cr.clip()
# repaint from off-screen surface
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
# Handle the expose-event by drawing
def do_expose_event(self, event):
# Create the cairo context
win = self.get_window()
cr = win.cairo_create()
# TODO:
## # Restrict Cairo to the exposed area; avoid extra work
## cr.rectangle(event.area.x, event.area.y,
## event.area.width, event.area.height)
## cr.clip()
## win = self.get_window()
## self.draw(cr, *win.get_size())
self.draw(cr)
def _draw(self, cr):
rect = self.get_allocation()
# TODO: fill with white?
x1 = 0; x2 = rect.width
clr_wd = rect.width // 256
rem_px = x2 - (clr_wd * 256)
if rem_px > 0:
ival = 256 // rem_px
else:
ival = 0
clr_ht = rect.height
#print "clr is %dx%d width=%d rem=%d ival=%d" % (
# rect.width, rect.height, clr_wd, rem_px, ival)
dist = self.rgbmap.get_dist()
j = ival; off = 0
range_pts = []
for i in range(256):
wd = clr_wd
if rem_px > 0:
j -= 1
if j == 0:
rem_px -= 1
j = ival
wd += 1
x = off
(r, g, b) = self.rgbmap.get_rgbval(i)
r = float(r) / 255.0
g = float(g) / 255.0
b = float(b) / 255.0
cr.set_source_rgb(r, g, b)
cr.rectangle(x, 0, wd, clr_ht)
cr.fill()
# Draw range scale if we are supposed to
if self.t_showrange and i in self._interval:
cb_pct = float(i) / 256.0
# get inverse of distribution function and calculate value
# at this position
rng_pct = dist.get_dist_pct(cb_pct)
val = int(self.loval + (rng_pct * (self.hival - self.loval)))
text = "%d" % (val)
a, b, _wd, _ht, _i, _j = cr.text_extents(text)
rx = x
ry = 4 + _ht
range_pts.append((rx, ry, text))
off += wd
# draw range
cr.select_font_face(self.t_font)
cr.set_font_size(self.t_fontsize)
cr.set_source_rgb(0.0, 0.0, 0.0)
cr.set_line_width(1)
for (x, y, text) in range_pts:
# tick
cr.move_to (x, 0)
cr.line_to (x, 2)
cr.close_path()
cr.stroke()
#cr.fill()
# number
cr.move_to(x, y)
cr.show_text(text)
# Draw moving value wedge
if self.mark_pos != None:
cr.set_source_rgb(1.0, 0.0, 0.0)
cr.set_line_width(3)
cr.move_to (self.mark_pos-4, self.height)
cr.line_to (self.mark_pos, self.height//2)
cr.line_to (self.mark_pos+4, self.height)
cr.line_to (self.mark_pos-4, self.height)
cr.fill()
def draw(self, cr):
return self._draw(cr)
def redraw(self):
win = self.get_window()
## if not win:
## return
## cr = win.cairo_create()
if not self.surface:
return
cr = cairo.Context(self.surface)
self.draw(cr)
win.invalidate_rect(None, True)
# Process expose events right away so window is responsive
# to scrolling
win.process_updates(True)
def set_current_value(self, value):
range = self.hival - self.loval
if value < self.loval:
value = self.loval
elif value > self.hival:
value = self.hival
pct = float(value - self.loval) / float(range)
self.mark_pos = int(pct * self.width)
#print "mark position is %d (%.2f)" % (self.mark_pos, pct)
self.redraw()
def shift_colormap(self, pct):
self.rgbmap.set_sarr(self._sarr, callback=False)
self.rgbmap.shift(pct)
self.redraw()
def rgbmap_cb(self, rgbmap):
self.redraw()
def button_press_event(self, widget, event):
# event.button, event.x, event.y
x = event.x; y = event.y
button = event.button
## print "button event at %dx%d, button=%d" % (x, y, button)
if button == 1:
self._start_x = x
sarr = self.rgbmap.get_sarr()
self._sarr = sarr.copy()
return True
## return self.make_callback('button-press', event)
def button_release_event(self, widget, event):
# event.button, event.x, event.y
x = event.x; y = event.y
button = event.button
win = self.get_window()
#print "button release at %dx%d button=%d" % (x, y, button)
if button == 1:
dx = x - self._start_x
#wd, ht = win.get_size()
geom = win.get_geometry()
wd, ht = geom[2], geom[3]
pct = float(dx) / float(wd)
#print "dx=%f wd=%d pct=%f" % (dx, wd, pct)
self.shift_colormap(pct)
return True
elif button == 3:
self.rgbmap.reset_cmap()
return True
## return self.make_callback('button-release', event)
def motion_notify_event(self, widget, event):
button = 0
if event.is_hint:
tup = event.window.get_pointer()
if gtksel.have_gtk3:
xx, x, y, state = tup
else:
x, y, state = tup
else:
x, y, state = event.x, event.y, event.state
btn1_down = state & gtk.gdk.BUTTON1_MASK
if btn1_down:
win = self.get_window()
dx = x - self._start_x
#wd, ht = win.get_size()
geom = win.get_geometry()
wd, ht = geom[2], geom[3]
pct = float(dx) / float(wd)
#print "dx=%f wd=%d pct=%f" % (dx, wd, pct)
self.shift_colormap(pct)
return True
pct = float(x) / float(self.width)
value = int(self.loval + pct * (self.hival - self.loval))
return self.make_callback('motion', value, event)
def scroll_event(self, widget, event):
# event.button, event.x, event.y
x = event.x; y = event.y
#print "scroll at %dx%d event=%s" % (x, y, str(event))
return self.make_callback('scroll', event)
#END
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
This module of native functions is implemented for
compatability with 010 editor functions. Some of these functions
are nops, some are fully implemented.
"""
import six
import sys
from pfp.native import native
import pfp.errors as errors
import pfp.fields
def _cmp(a, b):
if six.PY3:
return (a > b) - (a < b)
else:
return cmp(a, b)
# http://www.sweetscape.com/010editor/manual/FuncString.htm
#double Atof( const char s[] )
@native(name="Atof", ret=pfp.fields.Double)
def Atof(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int Atoi( const char s[] )
@native(name="Atoi", ret=pfp.fields.Int)
def Atoi(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int64 BinaryStrToInt( const char s[] )
@native(name="BinaryStrToInt", ret=pfp.fields.Int64)
def BinaryStrToInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] ConvertString( const char src[], int srcCharSet, int destCharSet )
@native(name="ConvertString", ret=pfp.fields.String)
def ConvertString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#string DosDateToString( DOSDATE d, char format[] = "MM/dd/yyyy" )
@native(name="DosDateToString", ret=pfp.fields.String)
def DosDateToString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#string DosTimeToString( DOSTIME t, char format[] = "hh:mm:ss" )
@native(name="DosTimeToString", ret=pfp.fields.String)
def DosTimeToString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#string EnumToString( enum e )
@native(name="EnumToString", ret=pfp.fields.String)
def EnumToString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] FileNameGetBase( const char path[], int includeExtension=true )
@native(name="FileNameGetBase", ret=pfp.fields.String)
def FileNameGetBase(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wchar_t[] FileNameGetBaseW( const wchar_t path[], int includeExtension=true )
@native(name="FileNameGetBaseW", ret=pfp.fields.WString)
def FileNameGetBaseW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] FileNameGetExtension( const char path[] )
@native(name="FileNameGetExtension", ret=pfp.fields.String)
def FileNameGetExtension(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wchar_t[] FileNameGetExtensionW( const wchar_t path[] )
@native(name="FileNameGetExtensionW", ret=pfp.fields.WString)
def FileNameGetExtensionW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] FileNameGetPath( const char path[], int includeSlash=true )
@native(name="FileNameGetPath", ret=pfp.fields.String)
def FileNameGetPath(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wchar_t[] FileNameGetPathW( const wchar_t path[], int includeSlash=true )
@native(name="FileNameGetPathW", ret=pfp.fields.WString)
def FileNameGetPathW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] FileNameSetExtension( const char path[], const char extension[] )
@native(name="FileNameSetExtension", ret=pfp.fields.String)
def FileNameSetExtension(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wchar_t[] FileNameSetExtensionW( const wchar_t path[], const wchar_t extension[] )
@native(name="FileNameSetExtensionW", ret=pfp.fields.WString)
def FileNameSetExtensionW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#string FileTimeToString( FILETIME ft, char format[] = "MM/dd/yyyy hh:mm:ss" )
@native(name="FileTimeToString", ret=pfp.fields.String)
def FileTimeToString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] IntToBinaryStr( int64 num, int numGroups=0, int includeSpaces=true )
@native(name="IntToBinaryStr", ret=pfp.fields.String)
def IntToBinaryStr(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int Memcmp( const uchar s1[], const uchar s2[], int n )
@native(name="Memcmp", ret=pfp.fields.Int)
def Memcmp(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void Memcpy( uchar dest[], const uchar src[], int n, int destOffset=0, int srcOffset=0 )
@native(name="Memcpy", ret=pfp.fields.Void)
def Memcpy(params, ctxt, scope, stream, coord):
if len(params) < 3:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least 3 args")
if len(params) > 5:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at most 5 args")
dest = params[0]
src = params[1]
n = PYVAL(params[2])
if len(params) > 3:
dest_offset = PYVAL(params[3])
else:
dest_offset = 0
if len(params) > 4:
src_offset = PYVAL(params[4])
else:
src_offset = 0
if not isinstance(dest, pfp.fields.Array):
raise errors.InvalidArguments(coord,
dest.__class__.__name__,
"an array"
)
if not isinstance(src, pfp.fields.Array):
raise errors.InvalidArguments(coord,
src.__class__.__name__,
"an array"
)
count = 0
while n > 0:
val = dest.field_cls()
val._pfp__set_value(src[src_offset + count]._pfp__value)
# TODO clone it
dest[dest_offset + count] = val
count += 1
n -= 1
#void Memset( uchar s[], int c, int n )
@native(name="Memset", ret=pfp.fields.Void)
def Memset(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#string OleTimeToString( OLETIME ot, char format[] = "MM/dd/yyyy hh:mm:ss" )
@native(name="OleTimeToString", ret=pfp.fields.String)
def OleTimeToString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int RegExMatch( string str, string regex );
@native(name="RegExMatch", ret=pfp.fields.Int)
def RegExMatch(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int RegExMatchW( wstring str, wstring regex );
@native(name="RegExMatchW", ret=pfp.fields.Int)
def RegExMatchW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int RegExSearch( string str, string regex, int &matchSize, int startPos=0 );
@native(name="RegExSearch", ret=pfp.fields.Int)
def RegExSearch(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int RegExSearchW( wstring str, wstring regex, int &matchSize, int startPos=0 );
@native(name="RegExSearchW", ret=pfp.fields.Int)
def RegExSearchW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int SPrintf( char buffer[], const char format[] [, argument, ... ] )
@native(name="SPrintf", ret=pfp.fields.Int)
def SPrintf(params, ctxt, scope, stream, coord):
if len(params) < 2:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least 2 args")
if len(params) == 2:
params[0]._pfp__set_value(PYSTR(params[1]))
return len(PYSTR(params[1]))
parts = []
for part in params[2:]:
if isinstance(part, pfp.fields.Array) or isinstance(part, pfp.fields.String):
parts.append(PYSTR(part))
else:
parts.append(PYVAL(part))
new_value = PYSTR(params[1]) % tuple(parts)
params[0]._pfp__set_value(new_value)
return len(new_value)
#int SScanf( char str[], char format[], ... )
@native(name="SScanf", ret=pfp.fields.Int)
def SScanf(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void Strcat( char dest[], const char src[] )
@native(name="Strcat", ret=pfp.fields.Void)
def Strcat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int Strchr( const char s[], char c )
@native(name="Strchr", ret=pfp.fields.Int)
def Strchr(params, ctxt, scope, stream, coord):
if len(params) != 2:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "2 arguments")
haystack = PYSTR(params[0])
needle = chr(PYVAL(params[1]))
try:
return haystack.index(needle)
# expected condition when the substring doesn't exist
except ValueError as e:
return -1
#int Strcmp( const char s1[], const char s2[] )
@native(name="Strcmp", ret=pfp.fields.Int)
def Strcmp(params, ctxt, scope, stream, coord):
if len(params) != 2:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "2 arguments")
str1 = PYSTR(params[0])
str2 = PYSTR(params[1])
return _cmp(str1, str2)
#void Strcpy( char dest[], const char src[] )
@native(name="Strcpy", ret=pfp.fields.Void)
def Strcpy(params, ctxt, scope, stream, coord):
if len(params) != 2:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "2 arguments")
params[0]._pfp__set_value(PYSTR(params[1]))
#char[] StrDel( const char str[], int start, int count )
@native(name="StrDel", ret=pfp.fields.String)
def StrDel(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int Stricmp( const char s1[], const char s2[] )
@native(name="Stricmp", ret=pfp.fields.Int)
def Stricmp(params, ctxt, scope, stream, coord):
if len(params) != 2:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "2 arguments")
str1 = PYSTR(params[0]).lower()
str2 = PYSTR(params[1]).lower()
return _cmp(str1, str2)
#int StringToDosDate( string s, DOSDATE &d, char format[] = "MM/dd/yyyy" )
@native(name="StringToDosDate", ret=pfp.fields.Int)
def StringToDosDate(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int StringToDosTime( string s, DOSTIME &t, char format[] = "hh:mm:ss" )
@native(name="StringToDosTime", ret=pfp.fields.Int)
def StringToDosTime(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int StringToFileTime( string s, FILETIME &ft, char format[] = "MM/dd/yyyy hh:mm:ss" )
@native(name="StringToFileTime", ret=pfp.fields.Int)
def StringToFileTime(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int StringToOleTime( string s, OLETIME &ot, char format[] = "MM/dd/yyyy hh:mm:ss" )
@native(name="StringToOleTime", ret=pfp.fields.Int)
def StringToOleTime(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int StringToTimeT( string s, time_t &t, char format[] = "MM/dd/yyyy hh:mm:ss" )
@native(name="StringToTimeT", ret=pfp.fields.Int)
def StringToTimeT(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] StringToUTF8( const char src[], int srcCharSet=CHARSET_ANSI )
@native(name="StringToUTF8", ret=pfp.fields.String)
def StringToUTF8(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wstring StringToWString( const char str[], int srcCharSet=CHARSET_ANSI )
@native(name="StringToWString", ret=pfp.fields.WString)
def StringToWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int Strlen( const char s[] )
@native(name="Strlen", ret=pfp.fields.Int)
def Strlen(params, ctxt, scope, stream, coord):
if len(params) != 1:
raise errors.InvalidArguments(coord, "1 argument", "{} args".format(len(params)))
val = params[0]
if isinstance(val, pfp.fields.Array):
val = val._array_to_str()
else:
val = PYVAL(val)
return len(val)
#int Strncmp( const char s1[], const char s2[], int n )
@native(name="Strncmp", ret=pfp.fields.Int)
def Strncmp(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "3 arguments")
max_chars = PYVAL(params[2])
str1 = PYSTR(params[0])[:max_chars]
str2 = PYSTR(params[1])[:max_chars]
return _cmp(str1, str2)
#void Strncpy( char dest[], const char src[], int n )
@native(name="Strncpy", ret=pfp.fields.Void)
def Strncpy(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "3 arguments")
max_len = PYVAL(params[2])
params[0]._pfp__set_value(PYSTR(params[1])[:max_len])
#int Strnicmp( const char s1[], const char s2[], int n )
@native(name="Strnicmp", ret=pfp.fields.Int)
def Strnicmp(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "3 arguments")
max_chars = PYVAL(params[2])
str1 = PYSTR(params[0])[:max_chars].lower()
str2 = PYSTR(params[1])[:max_chars].lower()
return _cmp(str1, str2)
#int Strstr( const char s1[], const char s2[] )
@native(name="Strstr", ret=pfp.fields.Int)
def Strstr(params, ctxt, scope, stream, coord):
if len(params) != 2:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "2 arguments")
haystack = PYSTR(params[0])
needle = PYSTR(params[1])
try:
return haystack.index(needle)
# expected condition when the substring doesn't exist
except ValueError as e:
return -1
#char[] SubStr( const char str[], int start, int count=-1 )
@native(name="SubStr", ret=pfp.fields.String)
def SubStr(params, ctxt, scope, stream, coord):
if len(params) < 2:
raise errors.InvalidArguments(coord, "2 arguments", "{} args".format(len(params)))
string = PYSTR(params[0])
start = PYVAL(params[1])
count = -1
if len(params) > 2:
count = PYVAL(params[2])
if count < 0:
count = -1
if count == -1:
return string[start:]
else:
return string[start:start+count]
#string TimeTToString( time_t t, char format[] = "MM/dd/yyyy hh:mm:ss" )
@native(name="TimeTToString", ret=pfp.fields.String)
def TimeTToString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char ToLower( char c )
@native(name="ToLower", ret=pfp.fields.Char)
def ToLower(params, ctxt, scope, stream, coord):
if len(params) != 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "1 argument")
return ord(chr(PYVAL(params[0])).lower())
#wchar_t ToLowerW( wchar_t c )
@native(name="ToLowerW", ret=pfp.fields.WChar)
def ToLowerW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char ToUpper( char c )
@native(name="ToUpper", ret=pfp.fields.Char)
def ToUpper(params, ctxt, scope, stream, coord):
if len(params) != 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "1 argument")
return ord(chr(PYVAL(params[0])).upper())
#void WMemcmp( const wchar_t s1[], const wchar_t s2[], int n )
@native(name="WMemcmp", ret=pfp.fields.Void)
def WMemcmp(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WMemcpy( wchar_t dest[], const wchar_t src[], int n, int destOffset=0, int srcOffset=0 )
@native(name="WMemcpy", ret=pfp.fields.Void)
def WMemcpy(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WMemset( wchar_t s[], int c, int n )
@native(name="WMemset", ret=pfp.fields.Void)
def WMemset(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WStrcat( wchar_t dest[], const wchar_t src[] )
@native(name="WStrcat", ret=pfp.fields.Void)
def WStrcat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int WStrchr( const wchar_t s[], wchar_t c )
@native(name="WStrchr", ret=pfp.fields.Int)
def WStrchr(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int WStrcmp( const wchar_t s1[], const wchar_t s2[] )
@native(name="WStrcmp", ret=pfp.fields.Int)
def WStrcmp(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WStrcpy( wchar_t dest[], const wchar_t src[] )
@native(name="WStrcpy", ret=pfp.fields.Void)
def WStrcpy(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wchar_t[] WStrDel( const whar_t str[], int start, int count )
@native(name="WStrDel", ret=pfp.fields.WString)
def WStrDel(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int WStricmp( const wchar_t s1[], const wchar_t s2[] )
@native(name="WStricmp", ret=pfp.fields.Int)
def WStricmp(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] WStringToString( const wchar_t str[], int destCharSet=CHARSET_ANSI )
@native(name="WStringToString", ret=pfp.fields.String)
def WStringToString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#char[] WStringToUTF8( const wchar_t str[] )
@native(name="WStringToUTF8", ret=pfp.fields.String)
def WStringToUTF8(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int WStrlen( const wchar_t s[] )
@native(name="WStrlen", ret=pfp.fields.Int)
def WStrlen(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int WStrncmp( const wchar_t s1[], const wchar_t s2[], int n )
@native(name="WStrncmp", ret=pfp.fields.Int)
def WStrncmp(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WStrncpy( wchar_t dest[], const wchar_t src[], int n )
@native(name="WStrncpy", ret=pfp.fields.Void)
def WStrncpy(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int WStrnicmp( const wchar_t s1[], const wchar_t s2[], int n )
@native(name="WStrnicmp", ret=pfp.fields.Int)
def WStrnicmp(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int WStrstr( const wchar_t s1[], const wchar_t s2[] )
@native(name="WStrstr", ret=pfp.fields.Int)
def WStrstr(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wchar_t[] WSubStr( const wchar_t str[], int start, int count=-1 )
@native(name="WSubStr", ret=pfp.fields.WString)
def WSubStr(params, ctxt, scope, stream, coord):
raise NotImplementedError()
|
|
"""
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..feature_selection.base import SelectorMixin
from ..utils import (as_float_array, check_random_state, check_X_y, safe_mask)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.randint(
0, 2, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
SelectorMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if memory is None:
memory = Memory(cachedir=None, verbose=0)
elif isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
elif not isinstance(memory, Memory):
raise ValueError("'memory' should either be a string or"
" a sklearn.externals.joblib.Memory"
" instance, got 'memory={!r}' instead.".format(
type(memory)))
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def _get_support_mask(self):
"""Get the boolean mask indicating which features are selected.
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected
for retention.
"""
check_is_fitted(self, 'scores_')
return self.scores_ > self.selection_threshold
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by subsampling the training data and
computing a Lasso estimate where the penalty of a random subset of
coefficients has been scaled. By performing this double
randomization several times, the method assigns high scores to
features that are repeatedly selected across randomizations. This
is known as stability selection. In short, features selected more
often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold : float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learned more robust and almost independent of
the number of samples. The same property is not valid for
standardized data. However, if you wish to standardize, please
use `preprocessing.StandardScaler` before calling `fit` on an
estimator with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up calculations.
If set to 'auto' let us decide.
The Gram matrix can also be passed as argument, but it will be used
only for the selection of parameter alpha, if alpha is 'aic' or 'bic'.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of sklearn.externals.joblib.Memory or string, optional \
(default=None)
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
For an example, see :ref:`examples/linear_model/plot_sparse_recovery.py
<sphx_glr_auto_examples_linear_model_plot_sparse_recovery.py>`.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, Lasso, ElasticNet
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=None):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
precompute = self.precompute
# A precomputed Gram array is useless, since _randomized_lasso
# change X a each iteration
if hasattr(precompute, '__array__'):
precompute = 'auto'
assert precompute in (True, False, None, 'auto')
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
if C.ndim > 1:
raise ValueError("C should be 1-dimensional array-like, "
"but got a {}-dimensional array-like instead: {}."
.format(C.ndim, C))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Logistic Regression works by subsampling the training
data and fitting a L1-penalized LogisticRegression model where the
penalty of a random subset of coefficients has been scaled. By
performing this double randomization several times, the method
assigns high scores to features that are repeatedly selected across
randomizations. This is known as stability selection. In short,
features selected more often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float or array-like of shape [n_reg_parameter], optional, default=1
The regularization parameter C in the LogisticRegression.
When C is an array, fit will take each regularization parameter in C
one by one for LogisticRegression and store results for each one
in ``all_scores_``, where columns and rows represent corresponding
reg_parameters and features.
scaling : float, optional, default=0.5
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of sklearn.externals.joblib.Memory or string, optional \
(default=None)
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
For an example, see :ref:`examples/linear_model/plot_sparse_recovery.py
<sphx_glr_auto_examples_linear_model_plot_sparse_recovery.py>`.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, LogisticRegression
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=None):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in ascending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stability path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : int, RandomState instance or None, optional, default=None
The generator used to randomize the design. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
For an example, see :ref:`examples/linear_model/plot_sparse_recovery.py
<sphx_glr_auto_examples_linear_model_plot_sparse_recovery.py>`.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
|
|
from devito import Eq, Operator, Function, TimeFunction
def iso_stencil(field, model, **kwargs):
"""
Stencil for the scalar isotropic visco- acoustic variable density
self adjoint wave equation:
b/v^2 [ P.dt2 + w/Q P.dt ] = (b P.dx).dx + (b P.dy).dy + (b P.dz).dz + s
Note derivative shifts are omitted for simplicity above.
See implementation notebook sa_01_iso_implementation1.ipynb for more details.
Parameters
----------
field : TimeFunction, required
The pressure wavefield computed solution.
model : Dictionary <string>:<Function>, contains:
'b': Buoyancy = reciprocal density (units: m^3/kg)
'v': Velocity (units: m/msec or km/sec)
'wOverQ': The w/Q field for dissipation only attenuation.
forward : bool, optional
The propagation direction. Defaults to True.
q : TimeFunction, Function or float, optional
Full-space/time source of the wave-equation.
Returns
----------
The time update stencil.
"""
# Get the Functions for buoyancy, velocity, and wOverQ
vp, b, wOverQ = model.vp, model.b, model.damp
# Define time step of pressure wavefield to be updated
forward = kwargs.get('forward', True)
if forward:
field_next = field.forward
field_prev = field.backward
else:
field_next = field.backward
field_prev = field.forward
# Get the source
q = kwargs.get('q', 0)
# Define the time update equation for 2d/3d
if len(field.data.shape) == 3:
t, x, y = field.dimensions
eq_time_update = (t.spacing**2 * vp**2 / b) * \
((b * field.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +
(b * field.dy(x0=y+y.spacing/2)).dy(x0=y-y.spacing/2) + q) + \
(2 - t.spacing * wOverQ) * field + \
(t.spacing * wOverQ - 1) * field_prev
else:
t, x, y, z = field.dimensions
eq_time_update = (t.spacing**2 * vp**2 / b) * \
((b * field.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +
(b * field.dy(x0=y+y.spacing/2)).dy(x0=y-y.spacing/2) +
(b * field.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) + q) + \
(2 - t.spacing * wOverQ) * field + \
(t.spacing * wOverQ - 1) * field_prev
return [Eq(field_next, eq_time_update)]
def IsoFwdOperator(model, geometry, space_order=8, save=False, **kwargs):
"""
Construct a forward modeling Operator in a variable density visco- acoustic media.
See implementation notebook sa_01_iso_implementation1.ipynb for more details.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
save : int or Buffer, optional
Saving flag, True saves all time steps. False saves three timesteps.
Defaults to False.
Returns
----------
The Operator implementing forward modeling.
"""
src = geometry.src
rec = geometry.rec
vp, b = model.vp, model.b
# Create symbols for wavefield, source and receivers
u = TimeFunction(name='u', grid=model.grid,
save=geometry.nt if save else None,
time_order=2, space_order=space_order)
# Time update equation
eqn = iso_stencil(u, model, forward=True)
# Construct expression to inject source values, injecting at p(t+dt)
t = u.grid.time_dim
src_term = src.inject(field=u.forward, expr=src * t.spacing**2 * vp**2 / b)
# Create interpolation expression for receivers, extracting at p(t)
rec_term = rec.interpolate(expr=u)
# Substitute spacing terms to reduce flops
spacing_map = model.spacing_map
return Operator(eqn + src_term + rec_term, subs=spacing_map,
name='IsoFwdOperator', **kwargs)
def IsoAdjOperator(model, geometry, space_order=8, save=False, **kwargs):
"""
Construct an adjoint modeling Operator in a variable density visco- acoustic media.
Note the FD evolution will be time reversed.
See implementation notebook sa_01_iso_implementation1.ipynb for more details.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
save : int or Buffer, optional
Saving flag, True saves all time steps. False saves three timesteps.
Defaults to False.
Returns
----------
The Operator implementing adjoint modeling.
"""
rec = geometry.rec
src = geometry.src
vp, b = model.vp, model.b
# Create symbols for wavefield, source and receivers
v = TimeFunction(name='v', grid=model.grid,
save=geometry.nt if save else None,
time_order=2, space_order=space_order)
# Time update equation
eqn = iso_stencil(v, model, forward=False)
# Construct expression to inject receiver values, injecting at p(t-dt)
t = model.grid.time_dim
rec_term = rec.inject(field=v.backward, expr=rec * t.spacing**2 * vp**2 / b)
# Create interpolation expression for the adjoint-source, extracting at p(t)
src_term = src.interpolate(expr=v)
# Substitute spacing terms to reduce flops
spacing_map = model.spacing_map
return Operator(eqn + rec_term + src_term, subs=spacing_map,
name='IsoAdjOperator', **kwargs)
def IsoJacobianFwdOperator(model, geometry, space_order=8,
save=False, **kwargs):
"""
Construct a linearized JacobianForward modeling Operator in a variable density
visco- acoustic media.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
save : int or Buffer, optional
Saving flag, True saves all time steps. False saves three timesteps.
Defaults to False.
Returns
----------
The Operator implementing Jacobian forward modeling.
"""
src = geometry.src
rec = geometry.rec
vp, b, wOverQ = model.vp, model.b, model.damp
# Create p0, dp wavefields and dm velocity perturbation field
u0 = TimeFunction(name="u0", grid=model.grid,
save=geometry.nt if save else None,
time_order=2, space_order=space_order)
du = TimeFunction(name="du", grid=model.grid,
time_order=2, space_order=space_order)
dm = Function(name="dm", grid=model.grid, space_order=space_order)
# Time update equations
# JKW: this is pretty cool, simultaneously solving for p0 and dp!
# The 1st equation is derived in sa_01_iso_implementation1.ipynb
# The 2nd equation is derived in sa_02_iso_implementation2.ipynb
t = u0.time_dim
eqn1 = iso_stencil(u0, model, forward=True)
# Linearized source and stencil
lin_src = 2 * b * dm * vp**-3 * (wOverQ * u0.dt(x0=t-t.spacing/2) + u0.dt2)
eqn2 = iso_stencil(du, model, forward=True, q=lin_src)
# Construct expression to inject source values, injecting at p0(t+dt)
src_term = src.inject(field=u0.forward, expr=src * t.spacing**2 * vp**2 / b)
# Create interpolation expression for receivers, extracting at dp(t)
rec_term = rec.interpolate(expr=du)
# Substitute spacing terms to reduce flops
spacing_map = model.spacing_map
return Operator(eqn1 + src_term + eqn2 + rec_term, subs=spacing_map,
name='IsoJacobianFwdOperator', **kwargs)
def IsoJacobianAdjOperator(model, geometry, space_order=8,
save=True, **kwargs):
"""
Construct a linearized JacobianAdjoint modeling Operator in a variable density
visco- acoustic media.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
save : int or Buffer, optional
Saving flag, True saves all time steps. False saves three timesteps.
Defaults to False.
Returns
----------
The Operator implementing Jacobian adjoint modeling.
"""
rec = geometry.rec
vp, b, wOverQ = model.vp, model.b, model.damp
# Create p0, dp wavefields and dm velocity perturbation field
u0 = TimeFunction(name="u0", grid=model.grid,
save=geometry.nt if save else None,
time_order=2, space_order=space_order)
du = TimeFunction(name="du", grid=model.grid,
time_order=2, space_order=space_order)
dm = Function(name="dm", grid=model.grid, space_order=space_order)
# Time update equation
t = u0.time_dim
eqn = iso_stencil(du, model, forward=False)
dm_update = Eq(dm, dm +
du * (2 * b * vp**-3 * (wOverQ * u0.dt(x0=t-t.spacing/2) + u0.dt2)))
# Construct expression to inject receiver values, injecting at p(t-dt)
rec_term = rec.inject(field=du.backward, expr=rec * t.spacing**2 * vp**2 / b)
# Substitute spacing terms to reduce flops
spacing_map = model.spacing_map
return Operator([dm_update] + eqn + rec_term, subs=spacing_map,
name='IsoJacobianAdjOperator', **kwargs)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import sys
import threading
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def _run_load_weights_on_restart_test_common_iterations(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
filepath = os.path.join(self.get_temp_dir(), 'checkpoint.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
model.fit(
train_ds,
epochs=3,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
])
# The filepath should exist after fitting with callback.
self.assertTrue(os.path.exists(filepath))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
self.assertTrue(os.path.exists(filepath))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
model.load_weights(filepath)
weights_before_additional_fit = model.get_weights()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
])
model.load_weights(filepath)
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
if __name__ == '__main__':
test.main()
|
|
# coding: utf-8
#
# Copyright 2010-2014 Ning, Inc.
# Copyright 2014-2020 Groupon, Inc
# Copyright 2020-2021 Equinix, Inc
# Copyright 2014-2021 The Billing Project, LLC
#
# The Billing Project, LLC licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 0.22.22-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OverdueStateConfig(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'Str',
'is_clear_state': 'Bool',
'condition': 'OverdueCondition',
'external_message': 'Str',
'is_block_changes': 'Bool',
'is_disable_entitlement': 'Bool',
'subscription_cancellation_policy': 'Str',
'auto_reevaluation_interval_days': 'Int'
}
attribute_map = {
'name': 'name',
'is_clear_state': 'isClearState',
'condition': 'condition',
'external_message': 'externalMessage',
'is_block_changes': 'isBlockChanges',
'is_disable_entitlement': 'isDisableEntitlement',
'subscription_cancellation_policy': 'subscriptionCancellationPolicy',
'auto_reevaluation_interval_days': 'autoReevaluationIntervalDays'
}
def __init__(self, name=None, is_clear_state=None, condition=None, external_message=None, is_block_changes=None, is_disable_entitlement=None, subscription_cancellation_policy=None, auto_reevaluation_interval_days=None): # noqa: E501
"""OverdueStateConfig - a model defined in Swagger""" # noqa: E501
self._name = None
self._is_clear_state = None
self._condition = None
self._external_message = None
self._is_block_changes = None
self._is_disable_entitlement = None
self._subscription_cancellation_policy = None
self._auto_reevaluation_interval_days = None
self.discriminator = None
if name is not None:
self.name = name
if is_clear_state is not None:
self.is_clear_state = is_clear_state
if condition is not None:
self.condition = condition
if external_message is not None:
self.external_message = external_message
if is_block_changes is not None:
self.is_block_changes = is_block_changes
if is_disable_entitlement is not None:
self.is_disable_entitlement = is_disable_entitlement
if subscription_cancellation_policy is not None:
self.subscription_cancellation_policy = subscription_cancellation_policy
if auto_reevaluation_interval_days is not None:
self.auto_reevaluation_interval_days = auto_reevaluation_interval_days
@property
def name(self):
"""Gets the name of this OverdueStateConfig. # noqa: E501
:return: The name of this OverdueStateConfig. # noqa: E501
:rtype: Str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this OverdueStateConfig.
:param name: The name of this OverdueStateConfig. # noqa: E501
:type: Str
"""
self._name = name
@property
def is_clear_state(self):
"""Gets the is_clear_state of this OverdueStateConfig. # noqa: E501
:return: The is_clear_state of this OverdueStateConfig. # noqa: E501
:rtype: Bool
"""
return self._is_clear_state
@is_clear_state.setter
def is_clear_state(self, is_clear_state):
"""Sets the is_clear_state of this OverdueStateConfig.
:param is_clear_state: The is_clear_state of this OverdueStateConfig. # noqa: E501
:type: Bool
"""
self._is_clear_state = is_clear_state
@property
def condition(self):
"""Gets the condition of this OverdueStateConfig. # noqa: E501
:return: The condition of this OverdueStateConfig. # noqa: E501
:rtype: OverdueCondition
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this OverdueStateConfig.
:param condition: The condition of this OverdueStateConfig. # noqa: E501
:type: OverdueCondition
"""
self._condition = condition
@property
def external_message(self):
"""Gets the external_message of this OverdueStateConfig. # noqa: E501
:return: The external_message of this OverdueStateConfig. # noqa: E501
:rtype: Str
"""
return self._external_message
@external_message.setter
def external_message(self, external_message):
"""Sets the external_message of this OverdueStateConfig.
:param external_message: The external_message of this OverdueStateConfig. # noqa: E501
:type: Str
"""
self._external_message = external_message
@property
def is_block_changes(self):
"""Gets the is_block_changes of this OverdueStateConfig. # noqa: E501
:return: The is_block_changes of this OverdueStateConfig. # noqa: E501
:rtype: Bool
"""
return self._is_block_changes
@is_block_changes.setter
def is_block_changes(self, is_block_changes):
"""Sets the is_block_changes of this OverdueStateConfig.
:param is_block_changes: The is_block_changes of this OverdueStateConfig. # noqa: E501
:type: Bool
"""
self._is_block_changes = is_block_changes
@property
def is_disable_entitlement(self):
"""Gets the is_disable_entitlement of this OverdueStateConfig. # noqa: E501
:return: The is_disable_entitlement of this OverdueStateConfig. # noqa: E501
:rtype: Bool
"""
return self._is_disable_entitlement
@is_disable_entitlement.setter
def is_disable_entitlement(self, is_disable_entitlement):
"""Sets the is_disable_entitlement of this OverdueStateConfig.
:param is_disable_entitlement: The is_disable_entitlement of this OverdueStateConfig. # noqa: E501
:type: Bool
"""
self._is_disable_entitlement = is_disable_entitlement
@property
def subscription_cancellation_policy(self):
"""Gets the subscription_cancellation_policy of this OverdueStateConfig. # noqa: E501
:return: The subscription_cancellation_policy of this OverdueStateConfig. # noqa: E501
:rtype: Str
"""
return self._subscription_cancellation_policy
@subscription_cancellation_policy.setter
def subscription_cancellation_policy(self, subscription_cancellation_policy):
"""Sets the subscription_cancellation_policy of this OverdueStateConfig.
:param subscription_cancellation_policy: The subscription_cancellation_policy of this OverdueStateConfig. # noqa: E501
:type: Str
"""
self._subscription_cancellation_policy = subscription_cancellation_policy
@property
def auto_reevaluation_interval_days(self):
"""Gets the auto_reevaluation_interval_days of this OverdueStateConfig. # noqa: E501
:return: The auto_reevaluation_interval_days of this OverdueStateConfig. # noqa: E501
:rtype: Int
"""
return self._auto_reevaluation_interval_days
@auto_reevaluation_interval_days.setter
def auto_reevaluation_interval_days(self, auto_reevaluation_interval_days):
"""Sets the auto_reevaluation_interval_days of this OverdueStateConfig.
:param auto_reevaluation_interval_days: The auto_reevaluation_interval_days of this OverdueStateConfig. # noqa: E501
:type: Int
"""
self._auto_reevaluation_interval_days = auto_reevaluation_interval_days
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OverdueStateConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
'''add_master_protein - assign a unique master to each peptide across
all samples using a maximum parsimony approach
======================================================================
:Author: Tom Smith, Manasa Ramakrishna
:Release: $Id$
:Date: |today|
:Tags: Python RNP Proteomics
Purpose
-------
This script takes the xlsx output from a set of input files
(*.txt/*.xlsx) and annotates the table with unique protein information
for downstream analyses.
The following columns are added:
- master_protein: The master protein(s) for the peptide. See below
for how this is derived
- master_uniprot_id: The uniprot id(s) for the master protein(s)
- protein_description: Description(s) for the master protein(s)
- protein_length: The length(s) of the master protein(s)
- crap_protein: Is the protein in the cRAP database of common
proteomics proteins, e.g keratin
- crap_associated_protein: does the protein share peptides with a
protein in the cRAP database of common proteomics proteins
If a log file is requested (--log), basic statistics are collected and
written to the log file
Fasta description format
------------------------
The source of the protein (SwissProt or TrEMBL) is derived from the
protein fasta description, with SwissProt proteins starting 'sp' and
TrEMBL 'tr'. Furthermore, the description column is derived from the
fasta description too. For this reason the fasta databases must be
correctly formatted as in the examples below. This is the standard
format for fasta files from uniprot.
format:
Three-level identifier followed by protein description:
>[sp|tr]|[Uniprot id]|[Protein name] [Description]
examples:
>sp|P27361|MK03_HUMAN Mitogen-activated protein kinase 3 OS=Homo sapiens GN=MAPK3 PE=1 SV=4
>tr|F8W1T5|F8W1T5_HUMAN GTPase RhebL1 (Fragment) OS=Homo sapiens GN=RHEBL1 PE=4 SV=1
Deriving master proteins
----------------------------
Matching peptides to their source proteins (protein inference) is a
common task in proteomics and there are many possible
approaches. Ultimately, the aim is usually to identify the most likely
source protein since taking all possible sources makes downstream
analyses very complex. Here we use the parsimonious approach to
identify a minimal set of proteins which explains all peptides
observed. In essense, the approach is as follows:
- start with list of all peptides
- sort proteins by the number of peptides observed
- take the protein(s) with the most peptides and remove these from the peptides list
- continue through the sorted proteins, removing peptides, until the
peptides list is empty
Additionally, we prioritise matches to SwissProt proteins over TrEMBL
proteins. SwissProt proteins have been manually curated and should not
contain any redundant proteins, truncated sequences, miss-annotations
etc. On the other hand, the set of TrEMBL proteins will ceratainly
contain proteins which are redundant with respect to the SwissProt
proteins as well as truncated and just plain wrong(!) proteins. It is
useful to include the TrEMBL proteins to catch peptides which are from
a protein or isoform which has not been curated into SwissProt
yet. However, where a SwissProt match is found, we believe it is
preferable to ignore any TrEMBL match. Here, for all peptides with
matched to both SwissProt and TrEMBL proteins, we remove all the
TrEMBL matches.
In some instances, it is not possible to assign a single protein to a
peptide. In these cases, the proteins names, uniprot ids, descriptions
and lengths are ';' separated in the outfile.
In addition to the conditions above, In some cases we are looking for
master proteins that are consistent across a set of samples. This is
to ensure that for a given set of peptides, the same master protein is
assigned to all samples.
Usage
-----
By default, the outfile will be created in the same directory with the
suffix annotated.xlsx. You can change the outfile name by specifying
the option --outfile
python add_master_protein.py --infile=RNP.xlsx --fasta-db=Human_201701.fasta
--fasta-crap-db=cRAP_FullIdentifiers.fasta --outfile=master_prot_annotated.txt
--logfile=master_prot_annot.log
Command line options
--------------------
'''
import argparse
import collections
import copy
import os
import re
import sys
import io
import gzip
import math
import pandas as pd
import numpy as np
import proteomics.fasta as fasta
from time import gmtime, strftime
def writeSectionHeader(logfile, section_header):
#underliner = "".join(("-",)*len(section_header))
section_blocker = ("======================================="
"=======================================")
underliner1 = ("----------------------------------------"
"----------------------------------------")
logfile.write("\n%s\n%s\n" % (section_blocker, section_header))
logfile.write("%s\n" % underliner1)
return section_blocker
def main(argv=sys.argv):
parser = argparse.ArgumentParser(
argv, usage=__doc__)
optional = parser.add_argument_group('optional arguments')
required = parser.add_argument_group('required arguments')
required.add_argument('-i', '--infile', dest="infile",
required=True, nargs='+',
help=("Provide a single file or folder with "
"multiple files for processing"))
required.add_argument('-f', '--fasta-db', dest="fasta_db",
required=True,
help=("Input a fasta file for all proteins in "
"the species of interest"))
required.add_argument('-fc', '--fasta-crap-db', dest="fasta_crap_db",
required=True,
help=("Input a fasta file for all proteins that "
"are common contaminants in a mass-spec "
"experiment"))
required.add_argument('--peptide-column', dest="pep_column",
required=True,
help=("What's the name of the column with the "
"peptide sequence?"))
optional.add_argument('--matches-column', dest="matches_column",
default=None,
help=("Column with the matches already identified "
"for the peptide"))
optional.add_argument('--check-crap', dest="check_crap",
default=False, action='store_true',
help=("Check each peptide against the cRAP fasta"))
optional.add_argument('--only-swissprot', dest="strict_sw",
default=False, action='store_true',
help=("Ignore matches to non-swissprot proteins"))
optional.add_argument('--matches-separator', dest="matches_sep",
default=",",
help=("Separator for the matches column"))
optional.add_argument('-o', '--outfile', dest="outfile",
default=None,
help=("Enter a file name for your output"))
optional.add_argument('-os', '--outfile-suffix', dest="outfile_suffix",
default=None,
help=("Enter a suffix to add to the output files"))
optional.add_argument('-l', '--logfile', dest="logfile",
default=os.devnull,
help=("Enter a file name for logging program "
"output. Else, nothing will be printed"))
args = vars(parser.parse_args())
if not args['outfile'] and not args['outfile_suffix']:
raise ValueError("must supply either --outfile or "
"--outfile-suffix option")
logfile = open(args['logfile'], 'w')
logfile.write("Logfile for annotate_rnp.py %s\n\n" % (
strftime("%Y-%m-%d %H:%M:%S", gmtime())))
section_blocker = writeSectionHeader(logfile, "Script arguments:")
for key, value in args.items():
logfile.write("%s: %s\n" % (key, value))
logfile.write("%s\n\n" % section_blocker)
#(1) Get the mappings between peptide and proteins
# (1.1) Build dictionaries using the fasta database to map from:
# 1. protein accession: protein
# 2. protein accession: sequence
# 3. protein accession: description e.g >sp|O43707|ACTN4_HUMAN
# 3. protein accession: long description e.g >sp|O43707|ACTN4_HUMAN|Alpha-actinin-4
protein2description = {}
protein2longdescription = {}
protein2seq = {}
tr_proteins = set()
sp_proteins = set()
for fa_infile in (args['fasta_db'], args['fasta_crap_db']):
if fa_infile.endswith(".gz"):
fa_iterator = fasta.FastaIterator(
io.TextIOWrapper(gzip.open(fa_infile)))
else:
fa_iterator = fasta.FastaIterator(open(fa_infile))
for entry in fa_iterator:
accession = entry.title.split(" ")[0].split("|")[1]
protein2seq[accession] = entry.sequence.upper().replace("I", "L")
protein2description[accession] = entry.title.split(" ")[0]
protein2longdescription[accession] = "|".join(entry.title.split(" ")[0:2])
if entry.title.split(" ")[0].split("|")[0] == "sp":
sp_proteins.add(accession)
elif entry.title.split(" ")[0].split("|")[0] == "tr":
tr_proteins.add(accession)
else:
raise ValueError("Protein does not appear to be either"
"SwissProt(sp) or TrEMBL(tr)")
crap_proteins = set()
associated_crap_proteins = set()
if args['fasta_crap_db'].endswith(".gz"):
fa_iterator = fasta.FastaIterator(
io.TextIOWrapper(gzip.open(args['fasta_crap_db'])))
else:
fa_iterator = fasta.FastaIterator(open(args['fasta_crap_db']))
for entry in fa_iterator:
accession = entry.title.split(" ")[0].split("|")[1]
crap_proteins.add(accession)
# (1.2) Parse the infiles to obtain maps of peptides to proteins and vis versa
pep2pro = collections.defaultdict(lambda: collections.defaultdict(set))
pep2allpro = collections.defaultdict(set)
pro2pep = collections.defaultdict(set)
top_level_proteins = set()
initial_proteins = set()
if not args['matches_column']:
peptides = set()
for infile in args['infile']:
# read the data into a dataframe
infile_split = infile.split(".")
if infile_split[-1] == "xlsx":
peptide_df = pd.read_excel(infile)
elif infile_split[-1] in ["text", "txt", "tsv"]:
peptide_df = pd.read_table(infile, sep='\t', comment=None)
elif infile_split[-1] == "csv":
peptide_df = pd.read_table(infile, sep=',', comment=None)
else:
raise ValueError("File type must one of .xlsx, "
".txt, .text, .tsv, .csv")
# add some basic annotations
#rnp_df['tr_only'] = [x.count("sp|") == 0 for x in rnp_df['Proteins']]
#rnp_df['matches'] = [len(x.split(",")) for x in rnp_df['Proteins']]
# if matches have already been made, use these
# (1.1) extract the initial mappings between proteins and peptides
if args['matches_column']:
for row_ix, row_values in peptide_df[
[args['matches_column'], args['pep_column']]].iterrows():
# if empty match, will be converted to NaN (type=float)
if type(row_values[args['matches_column']]) is float:
# could manually search against all proteins in database?
continue
proteins = row_values[args['matches_column']].split(
args['matches_sep'])
# rather annoyingly, PD adds "#CONTAM#" to the crap protein ids
proteins = [x.replace("#CONTAM#", "") for x in proteins]
peptide = row_values[args['pep_column']]
if args['check_crap']:
add_crap_proteins = []
for prot in crap_proteins:
if peptide.replace("I", "L") in protein2seq[prot]:
add_crap_proteins.append(prot)
proteins.extend(add_crap_proteins)
'''if peptide == "RTPPAGVFYQGWSATPIANGSLGHDIHHPR":
add_all_proteins = []
print(proteins)
for prot in protein2seq:
if peptide.replace("I", "L") in protein2seq[prot]:
add_all_proteins.append(prot)
proteins.extend(add_all_proteins)
print(proteins)
raise ValueError()'''
for protein in proteins:
if protein in crap_proteins:
associated_crap_proteins.update(proteins)
if protein not in protein2seq:
logfile.write(
"protein %s matches peptide %s but is not found "
"in fasta database\n" % (protein, peptide))
# remove proteins not in fasta database
proteins = set([prot for prot in proteins if prot in protein2seq])
if peptide in pep2pro:
if not pep2allpro[peptide] == proteins:
current_protein_matches = ", ".join(pep2allpro[peptide])
new_protein_matches = ", ".join(proteins)
logfile.write(
("The same peptide is observed more than once with "
"different proteins! : peptide = %(peptide)s, "
"matching proteins = %(current_protein_matches)s "
"or %(new_protein_matches)s\n" % locals()))
pep2allpro[peptide].update(proteins)
else:
pep2allpro[peptide] = proteins
for protein in proteins:
initial_proteins.add(protein)
pro2pep[protein].add(peptide)
protein_description = protein2description[protein]
if protein in sp_proteins:
protein_level = 1
top_level_proteins.add(protein)
elif protein in tr_proteins:
protein_level = 2
else:
raise ValueError("Protein does not appear to be either"
"SwissProt(sp) or TrEMBL(tr)")
pep2pro[peptide][protein_level].add(protein)
else: # if we don't have a column of matches, get the set of all peptides
peptides.update(peptide_df[args['pep_column']])
if not args['matches_column']:
# search against all proteins in the provided databases
n = 0
for peptide in peptides:
n += 1
if n % 1000 == 0:
logfile.write("searched %i peptides against database %s\n" % (
n, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
proteins = [prot for prot in protein2seq if
peptide in protein2seq[prot]]
for protein in proteins:
initial_proteins.add(protein)
pro2pep[protein].add(peptide)
protein_description = protein2description[protein]
if protein in sp_proteins:
protein_level = 1
top_level_proteins.add(protein)
elif protein in tr_proteins:
protein_level = 2
else:
raise ValueError("Protein does not appear to be either"
"SwissProt(sp) or TrEMBL(tr)")
pep2pro[peptide][protein_level].add(protein)
section_blocker = writeSectionHeader(logfile, "Initial file(s) stats")
logfile.write("# initial peptides: %i\n" % len(pep2pro))
logfile.write("# initial proteins: %i\n" % len(pro2pep))
logfile.write("# initial SwissProt proteins: %i\n" % len(top_level_proteins))
logfile.write("# initial TrEMBL proteins: %i\n" % (
len(pro2pep)-len(top_level_proteins)))
logfile.write("%s\n\n" % section_blocker)
if not args['strict_sw']:
section_blocker = writeSectionHeader(
logfile, "Deciding which TrEMBL proteins to retain:")
# (1.2) find the peptides with only TrEMBL protein matches and
# 'upgrade' these TrEMBL proteins to being equivalent to SwissProt
# across all peptides where these TrEMBL proteins match
tr_only_peptides = set([x for x in pep2pro.keys() if len(pep2pro[x][1]) == 0])
logfile.write("# peptides with only TrEMBL matches: %i\n" % (
len(tr_only_peptides)))
set_upgraded = set()
for peptide in tr_only_peptides:
upgraded = pep2pro[peptide][2]
set_upgraded.update(upgraded)
top_level_proteins.update(upgraded)
logfile.write("# TrEMBL proteins retained as no SwissProt matches for "
"peptide: %i\n" % (len(set_upgraded)))
# 'upgrade' the selected TrEMBL proteins
for peptide in pep2pro:
pep2pro[peptide][2] = pep2pro[peptide][2].difference(set_upgraded)
pep2pro[peptide][1] = pep2pro[peptide][1].union(set_upgraded)
logfile.write("%s\n\n" % section_blocker)
# (1.3) Use a parsimonious approach to identifty the minimum number
# of proteins required to cover all the peptides:
# Start from the protein(s) with the most peptides and mark these as covered.
# Continue with remaining proteins in order of peptides per protein
# until all peptides are covered
section_blocker = writeSectionHeader(
logfile, ("Parsimonious method to identify minimal set of proteins"
" to account for all peptides %s" % (
strftime("%Y-%m-%d %H:%M:%S", gmtime()))))
retained_proteins = []
peptides = copy.deepcopy(set(pep2pro.keys()))
peptide_counts = {}
tmppro2pep = copy.deepcopy(pro2pep)
new_top_level_proteins = copy.deepcopy(top_level_proteins)
new_pep2pro = collections.defaultdict(set)
peptide_count = max(map(len, tmppro2pep.values()))
while True:
# (1.3.1) If all peptides covered or the maximum peptides per
# protein = 0, break.
if len(peptides) == 0 or peptide_count == 0:
logfile.write("All peptides are now accounted for %s\n" % (
strftime("%Y-%m-%d %H:%M:%S", gmtime())))
break
peptide_count -= 1
top_proteins = set()
top_score = 0
#(1.3.2) Find the proteins with the highest number of peptide matches
for protein in new_top_level_proteins:
if len(tmppro2pep[protein]) == top_score:
top_proteins.add(protein)
elif len(tmppro2pep[protein]) > top_score:
top_score = len(tmppro2pep[protein])
top_proteins = set((protein,))
logfile.write("%i protein(s) with %i peptides\n" % (
len(top_proteins), top_score))
# (1.3.3) Remove the top proteins and the associated peptides
for top_protein in top_proteins:
new_top_level_proteins.remove(top_protein)
retained_proteins.append(top_protein)
for peptide in pro2pep[top_protein]:
new_pep2pro[peptide].add(top_protein)
if peptide in peptides:
peptides.remove(peptide)
for protein in pep2pro[peptide][1]:
if protein == top_protein:
continue
if peptide in tmppro2pep[protein]:
tmppro2pep[protein].remove(peptide)
logfile.write("\n%i proteins retained\n" % len(retained_proteins))
logfile.write("%i SwissProt proteins retained\n" % len(
set(retained_proteins).intersection(sp_proteins)))
logfile.write("%i TrEMBL proteins retained\n" % len(
set(retained_proteins).intersection(tr_proteins)))
logfile.write("\nNote: If not all SwissProt proteins were retained, this means\n"
"these proteins only included peptides which were observed\n"
"in other proteins which had a greater number of peptides\n")
logfile.write("%s\n\n" % section_blocker)
section_blocker = writeSectionHeader(logfile, "proteins per peptide:")
counts = collections.Counter([len(x) for x in new_pep2pro.values()])
sum_counts = sum(counts.values())
for k, v in counts.items():
logfile.write("%i peptide(s) (%.2f %%) have %i master protein(s)\n" % (
v, (100 * v)/sum_counts, k))
logfile.write("%s\n\n" % section_blocker)
# Check all the peptides are covered
if not args['strict_sw']:
assert set(pep2pro.keys()).difference(set(new_pep2pro.keys())) == set()
else:
missing_peptides = set(pep2pro.keys()).difference(set(new_pep2pro.keys()))
logfile.write("%i peptide(s) (%.2f %%) have no master protein(s)\n" % (
len(missing_peptides), (100 * len(missing_peptides))/sum_counts))
if args['outfile']:
outfile = open(args['outfile'], "w")
for infile in args['infile']:
# read the data into a dataframe
infile_split = infile.split(".")
if infile_split[-1] == "xlsx":
peptide_df = pd.read_excel(infile)
elif infile_split[-1] in ["text", "txt", "tsv"]:
peptide_df = pd.read_table(infile, sep='\t', comment=None)
elif infile_split[-1] == "csv":
peptide_df = pd.read_table(infile, sep=',', comment=None)
else:
raise ValueError("File type must one of .xlsx, "
".txt, .text, .tsv, .csv")
# add the top protein and uniprot id annotations
peptide_df['master_protein'] = [
";".join(new_pep2pro[protein]) for protein in peptide_df[args['pep_column']]]
# (1.5) derive further annotations
protein_lengths = []
protein_descriptions = []
crap_protein = []
associated_crap_protein = []
peptide_start = []
peptide_end = []
for ix, row in peptide_df.iterrows():
proteins = row['master_protein'].split(";")
pep_sequence = row['Sequence'].upper().replace("I", "L").replace("X", "L")
if proteins == [""]:
protein_lengths.append("")
protein_descriptions.append("")
crap_protein.append("")
associated_crap_protein.append("")
peptide_start.append("")
peptide_end.append("")
else:
protein_lengths.append(
";".join(map(str, [len(protein2seq[x]) for x in proteins])))
protein_descriptions.append(
";".join([protein2description[x] for x in proteins]))
# (1.5.1) does peptide match a cRAP protein?
crap = 0
for protein in proteins:
if protein in crap_proteins:
crap = 1
break
crap_protein.append(crap)
# (1.5.2) does peptide match a protein associated with a cRAP protein?
associated_crap = 0
for protein in proteins:
if protein in associated_crap_proteins:
associated_crap = 1
break
associated_crap_protein.append(associated_crap)
starts = []
ends = []
for protein in proteins:
protein_sequence = protein2seq[protein]
all_matches = re.findall(pep_sequence, protein_sequence)
if len(all_matches) > 1:
logfile.write(
"peptide: %s is observed more than once in protein: %s\n" % (
pep_sequence, protein))
starts.append("NA")
ends.append("NA")
elif len(all_matches) == 0:
logfile.write(
"peptide: %s is not observed in protein: %s\n" % (
pep_sequence, protein))
starts.append("NA")
ends.append("NA")
else:
peptide_match = re.search(pep_sequence, protein_sequence)
starts.append(peptide_match.start())
ends.append(peptide_match.end())
try:
peptide_start.append(";".join(map(str, starts)))
peptide_end.append(";".join(map(str, ends)))
except:
print(starts)
print(ends)
raise ValueError()
peptide_df['protein_length'] = protein_lengths
peptide_df['protein_description'] = protein_descriptions
peptide_df['peptide_start'] = peptide_start
peptide_df['peptide_end'] = peptide_end
peptide_df['crap_protein'] = crap_protein
peptide_df['associated_crap_protein'] = associated_crap_protein
peptide_df['unique'] = [1 if len(x.split(";"))==1 else 0
for x in peptide_df['master_protein']]
if args['outfile']:
peptide_df['filename'] = infile
peptide_df.to_csv(outfile, index=False, sep="\t", mode="a")
os.chmod(args['outfile'], 0o666)
else:
outfile = ".".join(infile.split(".")[:-1]) + args['outfile_suffix']
peptide_df.to_csv(outfile, index=False, sep="\t")
os.chmod(outfile, 0o666)
if args['outfile']:
outfile.close()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
from django.contrib import admin
from django.contrib import databrowse
from django.contrib import messages
from django.conf import settings
from django.contrib.admin.util import get_deleted_objects
from django.shortcuts import render_to_response
from django.utils.encoding import force_unicode
from django.contrib.admin import helpers
from django import template
import fullhistory
from fullhistory.admin import FullHistoryAdmin
from django.contrib.auth.models import User
from accounts.models import Course, Batch, Profile
from accounts.google_apps_manager import GoogleAppsManager
from accounts import accounts_manager
def register(model, modelAdmin):
admin.site.register(model, modelAdmin)
databrowse.site.register(model)
fullhistory.register_model(model)
class DefaultAdmin(FullHistoryAdmin):
pass
class ProfileAdmin(FullHistoryAdmin):
# Options for admin
list_display = ('full_name',
'user',
'college_email_id',
'personal_email_id',
'personal_contact_number',
'actual_date_of_birth',
'google_account_created',
'active',
'last_modified_on',)
#list_editable = ('college_email_id',)
list_filter = ('google_account_created',
'active',
'year_of_joining',
'course',
'blood_group',
'reservation_category',
)
list_per_page = 20
search_fields = ('user__first_name',
'user__last_name',
'user__username',
'personal_email_id',
'college_email_id')
actions = ['create_accounts_in_google',
'delete_accounts_from_google',
'populate_college_email_id',
'reset_password',
'mark_as_processed',
'mark_as_not_processed',
'deactivate_account',
'reactivate_account',
]
fieldsets = ((None,
{'fields': ('user', 'course', 'year_of_joining', 'college_email_id',)
}
),
('Personal Details',
{'classes': ('collapse','closed',),
'fields': settings.PERSONAL_DETAILS_FIELD_LIST
}
),
('Family Details',
{'classes': ('collapse','closed',),
'fields': settings.FAMILY_DETAILS_FIELD_LIST
}
),
('Contact Details',
{'classes': ('collapse','closed',),
'fields': settings.CONTACT_DETAILS_FIELD_LIST
}
),
('Education Details',
{'classes': ('collapse','closed',),
'fields': settings.EDUCATION_DETAILS_FIELD_LIST
}
),
('Misc Details',
{'classes': ('collapse','closed',),
'fields': settings.MISC_DETAILS_FIELD_LIST
}
),
)
def create_accounts_in_google(self, request, queryset):
"""Creates the Google Apps account for the user
"""
gam = GoogleAppsManager()
password = User.objects.make_random_password(length=8, allowed_chars='0123456789')
for profile in queryset:
accounts_manager.create_account_in_google_apps(request, profile, password)
def delete_accounts_from_google(self, request, queryset):
"""Deletes the user from Google Apps database
"""
gam = GoogleAppsManager()
opts = self.model._meta
app_label = opts.app_label
deletable_objects = [profile for profile in queryset]
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
for profile in queryset:
try:
gam.delete_account(profile.user.username)
profile.google_account_created = False
profile.save()
except Exception, e:
messages.error(request,
'Error while deleting %s. Error : %s' %
(profile.register_number, e))
else:
messages.success(request,
'Successfully deleted %s' % profile.register_number)
return None
context = {
"title": "Are you sure?",
"object_name": force_unicode(opts.verbose_name),
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": False,
"opts": opts,
"root_path": self.admin_site.root_path,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return render_to_response('accounts/delete_from_google_apps_confirmation.html',
context,
context_instance=template.RequestContext(request))
def populate_college_email_id(self, request, queryset):
"""Computes unique email id and populates
"""
for profile in queryset:
# Populate only if it is empty.
if profile.college_email_id:
messages.error(request,
'College email id is already populated for %s. Not modifying.' % profile.register_number)
else:
username = accounts_manager.get_new_username(profile.user.first_name,
profile.user.last_name)
if username:
profile.college_email_id = username + '@' + settings.GOOGLE_APPS_DOMAIN
profile.save()
else:
messages.error(request,
'Could not generate a unique username for %s' % profile.register_number)
def reset_password(self, request, queryset):
gam = GoogleAppsManager()
passwd = User.objects.make_random_password(length=8, allowed_chars='0123456789')
for profile in queryset:
if not profile.google_account_created:
messages.error(request,
'No Google Apps account for %s' % profile.register_number)
continue
try:
username = profile.register_number
result = gam.change_password(username,
passwd)
except Exception, e:
messages.error(request,
'Failed to update password for %s. Reason : %s' %
(username, e))
else:
messages.success(request,
'Successfully updated password for %s. New Password is %s' %
(username, passwd))
def mark_as_processed(self, request, queryset):
queryset.update(google_account_created=True)
def mark_as_not_processed(self, request, queryset):
queryset.update(google_account_created=False)
def deactivate_account(self, request, queryset):
gam = GoogleAppsManager()
for profile in queryset:
try:
gam.suspend_user(profile.user.username)
profile.active = False
profile.save()
except Exception, e:
messages.error(request,
'Error while deactivating %s. Reason : %s' %
(profile.user.username, e))
else:
messages.success(request,
'Deactivated %s' % profile.user.username)
def reactivate_account(self, request, queryset):
gam = GoogleAppsManager()
for profile in queryset:
try:
gam.unsuspend_user(profile.user.username)
profile.active = True
profile.save()
except Exception, e:
messages.error(request,
'Error while reactivating %s. Reason : %s' %
(profile.user.username, e))
else:
messages.success(request,
'Reactivated %s' % profile.user.username)
register(Course, DefaultAdmin)
register(Batch, DefaultAdmin)
register(Profile, ProfileAdmin)
|
|
"""
@package mi.instrument.noaa.ooicore.driver
@file marine-integrations/mi/instrument/noaa/ooicore/driver.py
@author Pete Cable
@brief BOTPT
Release notes:
"""
import re
import time
import datetime
import ntplib
from mi.core.driver_scheduler import DriverSchedulerConfigKey, TriggerType
from mi.core.instrument.data_particle import DataParticleKey, DataParticleValue
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility, ParameterDictType
from mi.core.common import BaseEnum, Units, Prefixes
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol, InitializationType
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentDataException
from mi.core.instrument.driver_dict import DriverDictKey
import mi.instrument.noaa.botpt.ooicore.particles as particles
import mi.core.log
__author__ = 'Pete Cable'
__license__ = 'Apache 2.0'
log = mi.core.log.get_logger()
META_LOGGER = mi.core.log.get_logging_metaclass('trace')
###
# Driver Constant Definitions
###
NEWLINE = '\n'
LILY_STRING = 'LILY,'
NANO_STRING = 'NANO,'
IRIS_STRING = 'IRIS,'
HEAT_STRING = 'HEAT,'
SYST_STRING = 'SYST,'
LILY_COMMAND = '*9900XY'
IRIS_COMMAND = LILY_COMMAND
NANO_COMMAND = '*0100'
NANO_RATE_RESPONSE = '*0001TH'
MAX_BUFFER_SIZE = 2 ** 16
class ScheduledJob(BaseEnum):
"""
Instrument scheduled jobs
"""
LEVELING_TIMEOUT = 'botpt_leveling_timeout'
HEATER_TIMEOUT = 'botpt_heater_timeout'
NANO_TIME_SYNC = 'botpt_nano_time_sync'
ACQUIRE_STATUS = 'botpt_acquire_status'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
DISCOVER = DriverEvent.DISCOVER
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
START_DIRECT = DriverEvent.START_DIRECT
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
START_LEVELING = 'PROTOCOL_EVENT_START_LEVELING'
STOP_LEVELING = 'PROTOCOL_EVENT_STOP_LEVELING'
NANO_TIME_SYNC = 'PROTOCOL_EVENT_NANO_TIME_SYNC'
START_HEATER = 'PROTOCOL_EVENT_START_HEATER'
STOP_HEATER = 'PROTOCOL_EVENT_STOP_HEATER'
LEVELING_TIMEOUT = 'PROTOCOL_EVENT_LEVELING_TIMEOUT'
HEATER_TIMEOUT = 'PROTOCOL_EVENT_HEATER_TIMEOUT'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
START_DIRECT = ProtocolEvent.START_DIRECT
STOP_DIRECT = ProtocolEvent.STOP_DIRECT
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
START_LEVELING = ProtocolEvent.START_LEVELING
STOP_LEVELING = ProtocolEvent.STOP_LEVELING
START_HEATER = ProtocolEvent.START_HEATER
STOP_HEATER = ProtocolEvent.STOP_HEATER
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
AUTO_RELEVEL = 'auto_relevel' # Auto-relevel mode
XTILT_TRIGGER = 'xtilt_relevel_trigger'
YTILT_TRIGGER = 'ytilt_relevel_trigger'
LEVELING_TIMEOUT = 'relevel_timeout'
LEVELING_FAILED = 'leveling_failed'
OUTPUT_RATE = 'output_rate_hz'
HEAT_DURATION = 'heat_duration'
HEATER_ON = 'heater_on'
LILY_LEVELING = 'lily_leveling'
@classmethod
def reverse_dict(cls):
return dict((v, k) for k, v in cls.dict().iteritems())
class ParameterConstraint(BaseEnum):
"""
Constraints for parameters
(type, min, max)
"""
XTILT_TRIGGER = (float, 0, 330)
YTILT_TRIGGER = (float, 0, 330)
LEVELING_TIMEOUT = (int, 60, 6000)
OUTPUT_RATE = (int, 1, 40)
HEAT_DURATION = (int, 1, 8)
AUTO_RELEVEL = (bool, None, None)
class InstrumentCommand(BaseEnum):
"""
Instrument Commands
"""
LILY_ON = LILY_STRING + LILY_COMMAND + 'C2' # turns on continuous data
LILY_OFF = LILY_STRING + LILY_COMMAND + 'C-OFF' # turns off continuous data
LILY_DUMP1 = LILY_STRING + LILY_COMMAND + '-DUMP-SETTINGS' # outputs current settings
LILY_DUMP2 = LILY_STRING + LILY_COMMAND + '-DUMP2' # outputs current extended settings
LILY_START_LEVELING = LILY_STRING + LILY_COMMAND + '-LEVEL,1' # starts leveling
LILY_STOP_LEVELING = LILY_STRING + LILY_COMMAND + '-LEVEL,0' # stops leveling
NANO_ON = NANO_STRING + NANO_COMMAND + 'E4' # turns on continuous data
NANO_OFF = NANO_STRING + NANO_COMMAND + 'E3' # turns off continuous data
NANO_DUMP1 = NANO_STRING + NANO_COMMAND + 'IF' # outputs current settings
NANO_SET_TIME = NANO_STRING + 'TS' # requests the SBC to update the NANO time
NANO_SET_RATE = NANO_STRING + '*0100EW*0100TH=' # sets the sample rate in Hz
IRIS_ON = IRIS_STRING + IRIS_COMMAND + 'C2' # turns on continuous data
IRIS_OFF = IRIS_STRING + IRIS_COMMAND + 'C-OFF' # turns off continuous data
IRIS_DUMP1 = IRIS_STRING + IRIS_COMMAND + '-DUMP-SETTINGS' # outputs current settings
IRIS_DUMP2 = IRIS_STRING + IRIS_COMMAND + '-DUMP2' # outputs current extended settings
HEAT = HEAT_STRING # turns the heater on; HEAT,<number of hours>
SYST_DUMP1 = SYST_STRING + '1'
class Prompt(BaseEnum):
"""
Instrument responses (basic)
"""
LILY_ON = LILY_COMMAND + 'C2'
LILY_OFF = LILY_COMMAND + 'C-OFF'
IRIS_ON = IRIS_COMMAND + 'C2'
IRIS_OFF = IRIS_COMMAND + 'C-OFF'
LILY_START_LEVELING = LILY_COMMAND + '-LEVEL,1'
LILY_STOP_LEVELING = LILY_COMMAND + '-LEVEL,0'
class RegexResponse(BaseEnum):
"""
Instrument responses (regex)
"""
HEAT = re.compile(r'(HEAT,.{19},\*\d)\n')
###############################################################################
# Driver
###############################################################################
# noinspection PyMethodMayBeStatic
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state machine.
"""
def __init__(self, evt_callback):
"""
Driver constructor.
@param evt_callback Driver process event callback.
"""
#Construct superclass.
SingleConnectionInstrumentDriver.__init__(self, evt_callback)
########################################################################
# Superclass overrides for resource query.
########################################################################
def get_resource_params(self):
"""
Return list of device parameters available.
@return List of parameters
"""
return Parameter.list()
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(BaseEnum, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
# noinspection PyUnusedLocal,PyMethodMayBeStatic
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
__metaclass__ = META_LOGGER
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
handlers = {
ProtocolState.UNKNOWN: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
],
ProtocolState.AUTOSAMPLE: [
(ProtocolEvent.ENTER, self._handler_autosample_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.GET, self._handler_command_get),
(ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status),
(ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample),
(ProtocolEvent.START_LEVELING, self._handler_start_leveling),
(ProtocolEvent.STOP_LEVELING, self._handler_stop_leveling),
(ProtocolEvent.NANO_TIME_SYNC, self._handler_time_sync),
(ProtocolEvent.START_HEATER, self._handler_start_heater),
(ProtocolEvent.STOP_HEATER, self._handler_stop_heater),
(ProtocolEvent.LEVELING_TIMEOUT, self._handler_leveling_timeout),
(ProtocolEvent.HEATER_TIMEOUT, self._handler_heater_timeout),
],
ProtocolState.COMMAND: [
(ProtocolEvent.ENTER, self._handler_command_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.GET, self._handler_command_get),
(ProtocolEvent.SET, self._handler_command_set),
(ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status),
(ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample),
(ProtocolEvent.START_LEVELING, self._handler_start_leveling),
(ProtocolEvent.STOP_LEVELING, self._handler_stop_leveling),
(ProtocolEvent.START_DIRECT, self._handler_command_start_direct),
(ProtocolEvent.NANO_TIME_SYNC, self._handler_time_sync),
(ProtocolEvent.START_HEATER, self._handler_start_heater),
(ProtocolEvent.STOP_HEATER, self._handler_stop_heater),
(ProtocolEvent.LEVELING_TIMEOUT, self._handler_leveling_timeout),
(ProtocolEvent.HEATER_TIMEOUT, self._handler_heater_timeout),
],
ProtocolState.DIRECT_ACCESS: [
(ProtocolEvent.ENTER, self._handler_direct_access_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
(ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
],
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Construct the metadata dictionaries
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# Add build handlers for device commands.
for command in InstrumentCommand.list():
if command in [InstrumentCommand.NANO_SET_RATE, InstrumentCommand.HEAT]:
self._add_build_handler(command, self._build_command_with_value)
else:
self._add_build_handler(command, self._build_simple_command)
# # Add response handlers for device commands.
for command in InstrumentCommand.list():
self._add_response_handler(command, self._generic_response_handler)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
# create chunker
self._chunker = StringChunker(Protocol.sieve_function)
self._last_data_timestamp = 0
self.has_pps = True
# set up scheduled event handling
self.initialize_scheduler()
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS)
self._add_scheduler_event(ScheduledJob.NANO_TIME_SYNC, ProtocolEvent.NANO_TIME_SYNC)
@staticmethod
def sieve_function(raw_data):
"""
Sort data in the chunker...
@param raw_data: Data to be searched for samples
@return: list of (start,end) tuples
"""
matchers = []
return_list = []
matchers.append(particles.HeatSampleParticle.regex_compiled())
matchers.append(particles.IrisSampleParticle.regex_compiled())
matchers.append(particles.NanoSampleParticle.regex_compiled())
matchers.append(particles.LilySampleParticle.regex_compiled())
matchers.append(particles.LilyLevelingParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, ts):
"""
Process chunk output by the chunker. Generate samples and (possibly) react
@param chunk: data
@param ts: ntp timestamp
@return sample
@throws InstrumentProtocolException
"""
possible_particles = [
(particles.LilySampleParticle, self._check_for_autolevel),
(particles.LilyLevelingParticle, self._check_completed_leveling),
(particles.HeatSampleParticle, None),
(particles.IrisSampleParticle, None),
(particles.NanoSampleParticle, self._check_pps_sync),
]
for particle_type, func in possible_particles:
sample = self._extract_sample(particle_type, particle_type.regex_compiled(), chunk, ts)
if sample:
if func:
func(sample)
return sample
raise InstrumentProtocolException(u'unhandled chunk received by _got_chunk: [{0!r:s}]'.format(chunk))
def _extract_sample(self, particle_class, regex, line, timestamp, publish=True):
"""
Overridden to set the quality flag for LILY particles that are out of range.
@param particle_class: Class type for particle
@param regex: regular expression to verify data
@param line: data
@param timestamp: ntp timestamp
@param publish: boolean to indicate if sample should be published
@return: extracted sample
"""
if regex.match(line):
if particle_class == particles.LilySampleParticle and self._param_dict.get(Parameter.LEVELING_FAILED):
particle = particle_class(line, port_timestamp=timestamp, quality_flag=DataParticleValue.OUT_OF_RANGE)
else:
particle = particle_class(line, port_timestamp=timestamp)
parsed_sample = particle.generate()
if publish and self._driver_event:
self._driver_event(DriverAsyncEvent.SAMPLE, parsed_sample)
return parsed_sample
def _filter_capabilities(self, events):
"""
Filter a list of events to only include valid capabilities
@param events: list of events to be filtered
@return: list of filtered events
"""
return [x for x in events if Capability.has(x)]
def _build_command_dict(self):
"""
Populate the command dictionary with commands.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status")
self._cmd_dict.add(Capability.START_LEVELING, display_name="Start LILY Leveling")
self._cmd_dict.add(Capability.STOP_LEVELING, display_name="Stop LILY Leveling")
self._cmd_dict.add(Capability.START_HEATER, display_name="Start Heater")
self._cmd_dict.add(Capability.STOP_HEATER, display_name="Stop Heater")
def _build_param_dict(self):
"""
Populate the parameter dictionary with parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
my_regex = 'Not used'
ro, rw = ParameterDictVisibility.READ_ONLY, ParameterDictVisibility.READ_WRITE
_bool, _float, _int = ParameterDictType.BOOL, ParameterDictType.FLOAT, ParameterDictType.INT
parameters = {
Parameter.AUTO_RELEVEL: {
'type': _bool,
'display_name': 'Automatic Releveling Enabled',
'description': 'Enable LILY re-leveling automatically: (true | off)',
'visibility': rw,
'startup_param': True,
},
Parameter.XTILT_TRIGGER: {
'type': _float,
'display_name': 'X-tilt Releveling Trigger',
'description': 'The X-tilt value that must be exceeded before LILY auto releveling occurs.',
'units': Prefixes.MICRO + Units.RADIAN,
'visibility': rw,
'startup_param': True,
},
Parameter.YTILT_TRIGGER: {
'type': _float,
'display_name': 'Y-tilt Releveling Trigger',
'description': 'The Y-tilt value that must be exceeded before LILY auto releveling occurs.',
'units': Prefixes.MICRO + Units.RADIAN,
'visibility': rw,
'startup_param': True,
},
Parameter.LEVELING_TIMEOUT: {
'type': _int,
'display_name': 'LILY Leveling Timeout',
'description': 'Leveling timeout',
'units': Units.SECOND,
'visibility': rw,
'startup_param': True,
},
Parameter.HEAT_DURATION: {
'type': _int,
'display_name': 'Heater Run Time Duration',
'description': 'The number of hours the heater will run when it is given the command to turn on.',
'units': Units.HOUR,
'visibility': rw,
'startup_param': True,
},
Parameter.OUTPUT_RATE: {
'type': _int,
'display_name': 'NANO Output Rate',
'description': 'Sample rate',
'units': Units.HERTZ,
'visibility': rw,
'startup_param': True,
},
Parameter.HEATER_ON: {
'type': _bool,
'display_name': 'Heater Running',
'description': 'Indicates if the heater is running: (true | false)',
'value': False,
'visibility': ro,
},
Parameter.LILY_LEVELING: {
'type': _bool,
'display_name': 'Lily Leveling',
'description': 'Indicates if LILY leveling is occurring: (true | false)',
'value': False,
'visibility': ro,
},
Parameter.LEVELING_FAILED: {
'type': _bool,
'display_name': 'LILY Leveling Failed',
'description': 'Indicates if LILY leveling failed: (true | false)',
'value': False,
'visibility': ro,
},
}
for param in parameters:
self._param_dict.add(param, my_regex, None, None, **parameters[param])
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_with_value(self, cmd, value):
"""
Build a simple command with one value specified
@param cmd: instrument command
@param value: value to be sent
@return: command string
"""
return '%s%d%s' % (cmd, value, NEWLINE)
def _verify_set_values(self, params):
"""
Verify supplied values are in range, if applicable
@param params: Dictionary of Parameter:value pairs to be verified
@throws InstrumentParameterException
"""
constraints = ParameterConstraint.dict()
parameters = Parameter.reverse_dict()
# step through the list of parameters
for key, val in params.iteritems():
# verify this parameter exists
if not Parameter.has(key):
raise InstrumentParameterException('Received invalid parameter in SET: %s' % key)
# if constraint exists, verify we have not violated it
constraint_key = parameters.get(key)
if constraint_key in constraints:
var_type, minimum, maximum = constraints[constraint_key]
constraint_string = 'Parameter: %s Value: %s Type: %s Minimum: %s Maximum: %s' % \
(key, val, var_type, minimum, maximum)
log.debug('SET CONSTRAINT: %s', constraint_string)
# check bool values are actual booleans
if var_type == bool:
if val not in [True, False]:
raise InstrumentParameterException('Non-boolean value!: %s' % constraint_string)
# else, check if we can cast to the correct type
else:
try:
var_type(val)
except ValueError:
raise InstrumentParameterException('Type mismatch: %s' % constraint_string)
# now, verify we are within min/max
if val < minimum or val > maximum:
raise InstrumentParameterException('Out of range: %s' % constraint_string)
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
@param args: arglist, should contain a dictionary of parameters/values to be set
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
self._verify_set_values(params)
self._verify_not_readonly(*args, **kwargs)
# if setting the output rate, get the current rate from the instrument first...
if Parameter.OUTPUT_RATE in params:
self._update_params()
old_config = self._param_dict.get_config()
# all constraints met or no constraints exist, set the values
for key, value in params.iteritems():
self._param_dict.set_value(key, value)
new_config = self._param_dict.get_config()
if not old_config == new_config:
log.debug('Config change: %r %r', old_config, new_config)
if old_config[Parameter.OUTPUT_RATE] is not None:
if int(old_config[Parameter.OUTPUT_RATE]) != int(new_config[Parameter.OUTPUT_RATE]):
self._do_cmd_no_resp(InstrumentCommand.NANO_SET_RATE, int(new_config[Parameter.OUTPUT_RATE]))
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _update_params(self, *args, **kwargs):
"""
Update the param dictionary based on instrument response
"""
result, _ = self._do_cmd_resp(InstrumentCommand.NANO_DUMP1,
response_regex=particles.NanoStatusParticle.regex_compiled())
rate = int(re.search(r'NANO,\*TH:(\d+)', result).group(1))
self._param_dict.set_value(Parameter.OUTPUT_RATE, rate)
def _wakeup(self, timeout, delay=1):
"""
Overriding _wakeup; does not apply to this instrument
"""
def add_to_buffer(self, data):
"""
Overriding base class to reduce logging due to NANO high data rate
@param data: data to be added to buffers
"""
# Update the line and prompt buffers.
self._linebuf += data
self._promptbuf += data
self._last_data_timestamp = time.time()
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
max_size = self._max_buffer_size()
if len(self._linebuf) > max_size:
self._linebuf = self._linebuf[max_size * -1:]
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
if len(self._promptbuf) > max_size:
self._promptbuf = self._linebuf[max_size * -1:]
def _max_buffer_size(self):
"""
Overriding base class to increase max buffer size
@return int max_buffer_size
"""
return MAX_BUFFER_SIZE
def _remove_leveling_timeout(self):
"""
Clean up the leveling timer
"""
try:
self._remove_scheduler(ScheduledJob.LEVELING_TIMEOUT)
except KeyError:
log.debug('Unable to remove LEVELING_TIMEOUT scheduled job, job does not exist.')
def _schedule_leveling_timeout(self):
"""
Set up a leveling timer to make sure we don't stay in leveling state forever if something goes wrong
"""
self._remove_leveling_timeout()
dt = datetime.datetime.now() + datetime.timedelta(seconds=self._param_dict.get(Parameter.LEVELING_TIMEOUT))
job_name = ScheduledJob.LEVELING_TIMEOUT
config = {
DriverConfigKey.SCHEDULER: {
job_name: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.ABSOLUTE,
DriverSchedulerConfigKey.DATE: dt
},
}
}
}
self.set_init_params(config)
self._add_scheduler_event(ScheduledJob.LEVELING_TIMEOUT, ProtocolEvent.LEVELING_TIMEOUT)
def _remove_heater_timeout(self):
"""
Clean up the heater timer
"""
try:
self._remove_scheduler(ScheduledJob.HEATER_TIMEOUT)
except KeyError:
log.debug('Unable to remove HEATER_TIMEOUT scheduled job, job does not exist.')
def _schedule_heater_timeout(self):
"""
Set up a timer to set HEATER_ON to false around the time the heater shuts off
"""
self._remove_heater_timeout()
dt = datetime.datetime.now() + datetime.timedelta(hours=self._param_dict.get(Parameter.HEAT_DURATION))
job_name = ScheduledJob.HEATER_TIMEOUT
config = {
DriverConfigKey.SCHEDULER: {
job_name: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.ABSOLUTE,
DriverSchedulerConfigKey.DATE: dt
},
}
}
}
self.set_init_params(config)
self._add_scheduler_event(ScheduledJob.HEATER_TIMEOUT, ProtocolEvent.HEATER_TIMEOUT)
def _stop_autosample(self):
"""
Stop autosample, leveling if in progress.
"""
self.leveling = False
self._do_cmd_no_resp(InstrumentCommand.NANO_OFF)
self._do_cmd_resp(InstrumentCommand.LILY_STOP_LEVELING, expected_prompt=Prompt.LILY_STOP_LEVELING)
self._do_cmd_resp(InstrumentCommand.LILY_OFF, expected_prompt=Prompt.LILY_OFF)
self._do_cmd_resp(InstrumentCommand.IRIS_OFF, expected_prompt=Prompt.IRIS_OFF)
def _generic_response_handler(self, resp, prompt):
"""
Pass through response handler
@param resp: response
@param prompt: prompt
@return: (response, prompt)
"""
return resp, prompt
def _particle_to_dict(self, sample):
"""
Convert a particle to a dictionary of value_id:value
@param sample: particle to be parsed
@return: dictionary representing the particle
"""
sample_dict = {}
values = sample.get(DataParticleKey.VALUES, [])
for each in values:
sample_dict[each[DataParticleKey.VALUE_ID]] = each[DataParticleKey.VALUE]
return sample_dict
def _check_for_autolevel(self, sample):
"""
Check this sample, kick off a leveling event if out of range
@param sample: sample to be checked
"""
if self._param_dict.get(Parameter.AUTO_RELEVEL) and self.get_current_state() == ProtocolState.AUTOSAMPLE:
# Find the current X and Y tilt values
# If they exceed the trigger parameters, begin autolevel
relevel = False
sample = self._particle_to_dict(sample)
x_tilt = abs(sample[particles.LilySampleParticleKey.X_TILT])
y_tilt = abs(sample[particles.LilySampleParticleKey.Y_TILT])
x_trig = int(self._param_dict.get(Parameter.XTILT_TRIGGER))
y_trig = int(self._param_dict.get(Parameter.YTILT_TRIGGER))
if x_tilt > x_trig or y_tilt > y_trig:
self._async_raise_fsm_event(ProtocolEvent.START_LEVELING)
def _failed_leveling(self, axis):
"""
Handle a failed leveling event. Set the failed flag, disable auto relevel and notify the operator
@param axis: Axis which failed leveling
"""
log.error('Detected leveling error in %s axis!', axis)
# Read only parameter, must be set outside of handler
self._param_dict.set_value(Parameter.LEVELING_FAILED, True)
# Use the handler to disable auto relevel to raise a config change event if needed.
self._handler_command_set({Parameter.AUTO_RELEVEL: False})
raise InstrumentDataException('LILY Leveling (%s) Failed. Disabling auto relevel' % axis)
def _check_completed_leveling(self, sample):
"""
Check this sample if leveling is complete or failed
@param sample: Sample to be checked
"""
sample = self._particle_to_dict(sample)
status = sample[particles.LilyLevelingParticleKey.STATUS]
if status is not None:
# Leveling status update received
# If leveling complete, send STOP_LEVELING, set the _leveling_failed flag to False
if 'Leveled' in status:
if self._param_dict.get(Parameter.LEVELING_FAILED):
self._handler_command_set({Parameter.LEVELING_FAILED: False})
self._async_raise_fsm_event(ProtocolEvent.STOP_LEVELING)
# Leveling X failed! Set the flag and raise an exception to notify the operator
# and disable auto leveling. Let the instrument attempt to level
# in the Y axis.
elif 'X Axis out of range' in status:
self._failed_leveling('X')
# Leveling X failed! Set the flag and raise an exception to notify the operator
# and disable auto leveling. Send STOP_LEVELING
elif 'Y Axis out of range' in status:
self._async_raise_fsm_event(ProtocolEvent.STOP_LEVELING)
self._failed_leveling('Y')
def _check_pps_sync(self, sample):
"""
Check if PPS sync status has changed. Update driver flag and, if appropriate, trigger a time sync
@param sample: sample to be checked
"""
sample = self._particle_to_dict(sample)
pps_sync = sample[particles.NanoSampleParticleKey.PPS_SYNC] == 'P'
if pps_sync:
if not self.has_pps:
# pps sync regained, sync the time
self.has_pps = True
if self.get_current_state() in [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]:
self._async_raise_fsm_event(ProtocolEvent.NANO_TIME_SYNC)
else:
self.has_pps = False
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_discover(self, *args, **kwargs):
"""
Process discover event
@return next_state, next_agent_state
"""
return ProtocolState.COMMAND, ResourceAgentState.IDLE
########################################################################
# Autosample handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state.
"""
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample
@return next_state, (next_agent_state, result)
"""
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
########################################################################
# Command handlers.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
# key off the initialization flag to determine if we should sync the time
if self._init_type == InitializationType.STARTUP:
self._handler_time_sync()
self._init_params()
self._stop_autosample()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_get(self, *args, **kwargs):
"""
Process GET event
@return next_state, result
"""
return self._handler_get(*args, **kwargs)
def _handler_command_set(self, *args, **kwargs):
"""
Perform a set command.
@param args[0] parameter : value dict.
@return (next_state, result)
@throws InstrumentParameterException
"""
next_state = None
result = None
startup = False
if len(args) < 1:
raise InstrumentParameterException('Set command requires a parameter dict.')
params = args[0]
if len(args) > 1:
startup = args[1]
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
if not isinstance(startup, bool):
raise InstrumentParameterException('Startup not a bool.')
self._set_params(params, startup)
return next_state, result
def _handler_command_start_direct(self):
"""
Start direct access
@return next_state, (next_agent_state, result)
"""
return ProtocolState.DIRECT_ACCESS, (ResourceAgentState.DIRECT_ACCESS, None)
def _handler_command_start_autosample(self):
"""
Start autosample
@return next_state, (next_agent_state, result)
"""
self._do_cmd_resp(InstrumentCommand.LILY_ON, expected_prompt=Prompt.LILY_ON)
self._do_cmd_resp(InstrumentCommand.NANO_ON, expected_prompt=NANO_STRING)
self._do_cmd_resp(InstrumentCommand.IRIS_ON, expected_prompt=Prompt.IRIS_ON)
return ProtocolState.AUTOSAMPLE, (ResourceAgentState.STREAMING, None)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_execute_direct(self, data):
"""
Execute direct access command
@return next_state, (next_agent_state, result)
"""
self._do_cmd_direct(data)
self._sent_cmds.append(data)
return None, (None, None)
def _handler_direct_access_stop_direct(self):
"""
Stop direct access
@return next_state, (next_agent_state, result)
"""
next_state, next_agent_state = self._handler_unknown_discover()
if next_state == DriverProtocolState.COMMAND:
next_agent_state = ResourceAgentState.COMMAND
return next_state, (next_agent_state, None)
########################################################################
# Generic handlers.
########################################################################
def _handler_generic_enter(self, *args, **kwargs):
"""
Generic enter state handler
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_generic_exit(self, *args, **kwargs):
"""
Generic exit state handler
"""
def _handler_acquire_status(self, *args, **kwargs):
"""
We generate these particles here to avoid the chunker. This allows us to process status
messages with embedded messages from the other parts of the instrument.
@return next_state, (next_agent_state, result)
"""
ts = ntplib.system_to_ntp_time(time.time())
parts = []
for command, particle_class in [
(InstrumentCommand.SYST_DUMP1, particles.SystStatusParticle),
(InstrumentCommand.LILY_DUMP1, particles.LilyStatusParticle1),
(InstrumentCommand.LILY_DUMP2, particles.LilyStatusParticle2),
(InstrumentCommand.IRIS_DUMP1, particles.IrisStatusParticle1),
(InstrumentCommand.IRIS_DUMP2, particles.IrisStatusParticle2),
(InstrumentCommand.NANO_DUMP1, particles.NanoStatusParticle),
]:
result, _ = self._do_cmd_resp(command, response_regex=particle_class.regex_compiled())
parts.append(result)
sample = self._extract_sample(particles.BotptStatusParticle,
particles.BotptStatusParticle.regex_compiled(),
NEWLINE.join(parts), ts)
if self.get_current_state() == ProtocolState.AUTOSAMPLE:
# acquiring status stops NANO output, restart it
self._do_cmd_resp(InstrumentCommand.NANO_ON, expected_prompt=NANO_STRING)
if not sample:
raise InstrumentProtocolException('Failed to generate status particle')
return None, (None, sample)
def _handler_time_sync(self, *args, **kwargs):
"""
Syncing time starts autosample...
@return next_state, (next_agent_state, result)
"""
self._do_cmd_resp(InstrumentCommand.NANO_SET_TIME, expected_prompt=NANO_STRING)
if self.get_current_state() == ProtocolState.COMMAND:
self._do_cmd_no_resp(InstrumentCommand.NANO_OFF)
return None, (None, None)
def _handler_start_leveling(self):
"""
Send the start leveling command
@return next_state, (next_agent_state, result)
"""
if not self._param_dict.get(Parameter.LILY_LEVELING):
self._schedule_leveling_timeout()
self._do_cmd_resp(InstrumentCommand.LILY_START_LEVELING, expected_prompt=Prompt.LILY_START_LEVELING)
self._param_dict.set_value(Parameter.LILY_LEVELING, True)
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return None, (None, None)
def _handler_stop_leveling(self):
"""
Send the stop leveling command
@return next_state, (next_agent_state, result)
"""
if self._param_dict.get(Parameter.LILY_LEVELING):
self._remove_leveling_timeout()
self._do_cmd_resp(InstrumentCommand.LILY_STOP_LEVELING, expected_prompt=Prompt.LILY_STOP_LEVELING)
self._param_dict.set_value(Parameter.LILY_LEVELING, False)
if self.get_current_state() == ProtocolState.AUTOSAMPLE:
self._do_cmd_resp(InstrumentCommand.LILY_ON, expected_prompt=Prompt.LILY_ON)
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return None, (None, None)
def _handler_leveling_timeout(self):
"""
Leveling has timed out, disable auto-relevel and mark leveling as failed.
handler_stop_leveling will raise the config change event.
@throws InstrumentProtocolException
"""
self._param_dict.set_value(Parameter.AUTO_RELEVEL, False)
self._param_dict.set_value(Parameter.LEVELING_FAILED, True)
self._handler_stop_leveling()
raise InstrumentProtocolException('Leveling failed to complete within timeout, disabling auto-relevel')
def _handler_start_heater(self, *args, **kwargs):
"""
Turn the heater on for Parameter.HEAT_DURATION hours
@return next_state, (next_agent_state, result)
"""
if not self._param_dict.get(Parameter.HEATER_ON):
self._do_cmd_resp(InstrumentCommand.HEAT,
self._param_dict.get(Parameter.HEAT_DURATION),
response_regex=RegexResponse.HEAT)
self._param_dict.set_value(Parameter.HEATER_ON, True)
# Want to disable auto leveling when the heater is on
self._param_dict.set_value(Parameter.AUTO_RELEVEL, False)
self._schedule_heater_timeout()
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return None, (None, None)
def _handler_stop_heater(self, *args, **kwargs):
"""
Turn the heater on for Parameter.HEAT_DURATION hours
@return next_state, (next_agent_state, result)
"""
if self._param_dict.get(Parameter.HEATER_ON):
self._do_cmd_resp(InstrumentCommand.HEAT,
0,
response_regex=RegexResponse.HEAT)
self._param_dict.set_value(Parameter.HEATER_ON, False)
self._remove_heater_timeout()
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return None, (None, None)
def _handler_heater_timeout(self):
"""
Heater should be finished. Set HEATER_ON to false.
"""
self._param_dict.set_value(Parameter.HEATER_ON, False)
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return None, None
|
|
"""
Module for managing the climate within a room.
* It reads/listens to a temperature address from KNX bus.
* Manages and sends the desired setpoint to KNX bus.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Iterator
from xknx.remote_value import (
GroupAddressesType,
RemoteValue,
RemoteValueScaling,
RemoteValueSetpointShift,
RemoteValueSwitch,
RemoteValueTemp,
)
from xknx.remote_value.remote_value_setpoint_shift import SetpointShiftMode
from .climate_mode import ClimateMode
from .device import Device, DeviceCallbackType
if TYPE_CHECKING:
from xknx.telegram import Telegram
from xknx.telegram.address import DeviceGroupAddress
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEFAULT_TEMPERATURE_STEP = 0.1
class Climate(Device):
"""Class for managing the climate."""
def __init__(
self,
xknx: XKNX,
name: str,
group_address_temperature: GroupAddressesType | None = None,
group_address_target_temperature: GroupAddressesType | None = None,
group_address_target_temperature_state: GroupAddressesType | None = None,
group_address_setpoint_shift: GroupAddressesType | None = None,
group_address_setpoint_shift_state: GroupAddressesType | None = None,
setpoint_shift_mode: SetpointShiftMode | None = None,
setpoint_shift_max: float = DEFAULT_SETPOINT_SHIFT_MAX,
setpoint_shift_min: float = DEFAULT_SETPOINT_SHIFT_MIN,
temperature_step: float = DEFAULT_TEMPERATURE_STEP,
group_address_on_off: GroupAddressesType | None = None,
group_address_on_off_state: GroupAddressesType | None = None,
on_off_invert: bool = False,
group_address_active_state: GroupAddressesType | None = None,
group_address_command_value_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
min_temp: float | None = None,
max_temp: float | None = None,
mode: ClimateMode | None = None,
device_updated_cb: DeviceCallbackType | None = None,
):
"""Initialize Climate class."""
super().__init__(xknx, name, device_updated_cb)
self.min_temp = min_temp
self.max_temp = max_temp
self.setpoint_shift_min = setpoint_shift_min
self.setpoint_shift_max = setpoint_shift_max
self.temperature_step = temperature_step
self.temperature = RemoteValueTemp(
xknx,
group_address_state=group_address_temperature,
sync_state=sync_state,
device_name=self.name,
feature_name="Current temperature",
after_update_cb=self.after_update,
)
self.target_temperature = RemoteValueTemp(
xknx,
group_address_target_temperature,
group_address_target_temperature_state,
sync_state=sync_state,
device_name=self.name,
feature_name="Target temperature",
after_update_cb=self.after_update,
)
self._setpoint_shift = RemoteValueSetpointShift(
xknx,
group_address_setpoint_shift,
group_address_setpoint_shift_state,
sync_state=sync_state,
device_name=self.name,
after_update_cb=self.after_update,
setpoint_shift_mode=setpoint_shift_mode,
setpoint_shift_step=self.temperature_step,
)
self.supports_on_off = (
group_address_on_off is not None or group_address_on_off_state is not None
)
self.on = RemoteValueSwitch( # pylint: disable=invalid-name
xknx,
group_address_on_off,
group_address_on_off_state,
sync_state=sync_state,
device_name=self.name,
after_update_cb=self.after_update,
invert=on_off_invert,
)
self.active = RemoteValueSwitch(
xknx,
group_address_state=group_address_active_state,
sync_state=sync_state,
device_name=self.name,
feature_name="Active",
after_update_cb=self.after_update,
)
self.command_value = RemoteValueScaling(
xknx,
group_address_state=group_address_command_value_state,
sync_state=sync_state,
device_name=self.name,
feature_name="Command value",
after_update_cb=self.after_update,
)
self.mode = mode
def _iter_remote_values(self) -> Iterator[RemoteValue[Any, Any]]:
"""Iterate the devices RemoteValue classes."""
yield self.temperature
yield self.target_temperature
yield self._setpoint_shift
yield self.on
yield self.active
yield self.command_value
def has_group_address(self, group_address: DeviceGroupAddress) -> bool:
"""Test if device has given group address."""
if self.mode is not None and self.mode.has_group_address(group_address):
return True
return super().has_group_address(group_address)
@property
def is_on(self) -> bool:
"""Return power status."""
# None will return False
return bool(self.on.value)
@property
def is_active(self) -> bool | None:
"""Return if currently active. None if unknown."""
if self.active.value is not None:
return self.active.value
if self.command_value.value is not None:
return bool(self.command_value.value)
return None
async def turn_on(self) -> None:
"""Set power status to on."""
await self.on.on()
async def turn_off(self) -> None:
"""Set power status to off."""
await self.on.off()
@property
def initialized_for_setpoint_shift_calculations(self) -> bool:
"""Test if object is initialized for setpoint shift calculations."""
if (
self._setpoint_shift.initialized
and self._setpoint_shift.value is not None
and self.target_temperature.initialized
and self.target_temperature.value is not None
):
return True
return False
async def set_target_temperature(self, target_temperature: float) -> None:
"""Send new target temperature or setpoint_shift to KNX bus."""
if self.base_temperature is not None:
# implies initialized_for_setpoint_shift_calculations
temperature_delta = target_temperature - self.base_temperature
await self.set_setpoint_shift(temperature_delta)
else:
validated_temp = self.validate_value(
target_temperature, self.min_temp, self.max_temp
)
await self.target_temperature.set(validated_temp)
@property
def base_temperature(self) -> float | None:
"""
Return the base temperature when setpoint_shift is initialized.
Base temperature is the default temperature (setpoint-shift=0) for the active climate mode.
As this value is usually not available via KNX, we have to derive this from the current
target temperature and the current set point shift.
"""
# implies self.initialized_for_setpoint_shift_calculations in a mypy compatible way:
if (
self.target_temperature.value is not None
and self._setpoint_shift.value is not None
):
return self.target_temperature.value - self._setpoint_shift.value
return None
@property
def setpoint_shift(self) -> float | None:
"""Return current offset from base temperature in Kelvin."""
return self._setpoint_shift.value
def validate_value(
self, value: float, min_value: float | None, max_value: float | None
) -> float:
"""Check boundaries of temperature and return valid temperature value."""
if (min_value is not None) and (value < min_value):
logger.warning("Min value exceeded at %s: %s", self.name, value)
return min_value
if (max_value is not None) and (value > max_value):
logger.warning("Max value exceeded at %s: %s", self.name, value)
return max_value
return value
async def set_setpoint_shift(self, offset: float) -> None:
"""Send new temperature offset to KNX bus."""
validated_offset = self.validate_value(
offset, self.setpoint_shift_min, self.setpoint_shift_max
)
base_temperature = self.base_temperature
await self._setpoint_shift.set(validated_offset)
# broadcast new target temperature and set internally
if self.target_temperature.writable and base_temperature is not None:
await self.target_temperature.set(base_temperature + validated_offset)
@property
def target_temperature_max(self) -> float | None:
"""Return the highest possible target temperature."""
if self.max_temp is not None:
return self.max_temp
if self.base_temperature is not None:
# implies initialized_for_setpoint_shift_calculations
return self.base_temperature + self.setpoint_shift_max
return None
@property
def target_temperature_min(self) -> float | None:
"""Return the lowest possible target temperature."""
if self.min_temp is not None:
return self.min_temp
if self.base_temperature is not None:
# implies initialized_for_setpoint_shift_calculations
return self.base_temperature + self.setpoint_shift_min
return None
async def process_group_write(self, telegram: Telegram) -> None:
"""Process incoming and outgoing GROUP WRITE telegram."""
for remote_value in self._iter_remote_values():
await remote_value.process(telegram)
if self.mode is not None:
await self.mode.process_group_write(telegram)
async def sync(self, wait_for_result: bool = False) -> None:
"""Read states of device from KNX bus."""
await super().sync(wait_for_result=wait_for_result)
if self.mode is not None:
await self.mode.sync(wait_for_result=wait_for_result)
def __str__(self) -> str:
"""Return object as readable string."""
return (
f'<Climate name="{self.name}" '
f"temperature={self.temperature.group_addr_str()} "
f"target_temperature={self.target_temperature.group_addr_str()} "
f'temperature_step="{self.temperature_step}" '
f"setpoint_shift={self._setpoint_shift.group_addr_str()} "
f'setpoint_shift_max="{self.setpoint_shift_max}" '
f'setpoint_shift_min="{self.setpoint_shift_min}" '
f"group_address_on_off={self.on.group_addr_str()} "
"/>"
)
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Test cmdline.py for coverage.py."""
import pprint
import re
import shlex
import sys
import textwrap
import mock
import coverage
import coverage.cmdline
from coverage.config import CoverageConfig
from coverage.data import CoverageData, CoverageDataFiles
from coverage.misc import ExceptionDuringRun
from tests.coveragetest import CoverageTest, OK, ERR
class BaseCmdLineTest(CoverageTest):
"""Tests of execution paths through the command line interpreter."""
run_in_temp_dir = False
# Make a dict mapping function names to the default values that cmdline.py
# uses when calling the function.
defaults = mock.Mock()
defaults.coverage(
cover_pylib=None, data_suffix=None, timid=None, branch=None,
config_file=True, source=None, include=None, omit=None, debug=None,
concurrency=None,
)
defaults.annotate(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
)
defaults.html_report(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
title=None,
)
defaults.report(
ignore_errors=None, include=None, omit=None, morfs=[],
show_missing=None, skip_covered=None
)
defaults.xml_report(
ignore_errors=None, include=None, omit=None, morfs=[], outfile=None,
)
DEFAULT_KWARGS = dict((name, kw) for name, _, kw in defaults.mock_calls)
def model_object(self):
"""Return a Mock suitable for use in CoverageScript."""
mk = mock.Mock()
# We'll invoke .coverage as the constructor, and then keep using the
# same object as the resulting coverage object.
mk.coverage.return_value = mk
# The mock needs to get options, but shouldn't need to set them.
config = CoverageConfig()
mk.get_option = config.get_option
return mk
def mock_command_line(self, args, path_exists=None):
"""Run `args` through the command line, with a Mock.
Returns the Mock it used and the status code returned.
"""
m = self.model_object()
m.path_exists.return_value = path_exists
ret = coverage.cmdline.CoverageScript(
_covpkg=m, _run_python_file=m.run_python_file,
_run_python_module=m.run_python_module, _help_fn=m.help_fn,
_path_exists=m.path_exists,
).command_line(shlex.split(args))
return m, ret
def cmd_executes(self, args, code, ret=OK, path_exists=None):
"""Assert that the `args` end up executing the sequence in `code`."""
m1, r1 = self.mock_command_line(args, path_exists=path_exists)
self.assertEqual(r1, ret, "Wrong status: got %r, wanted %r" % (r1, ret))
# Remove all indentation, and change ".foo()" to "m2.foo()".
code = re.sub(r"(?m)^\s+", "", code)
code = re.sub(r"(?m)^\.", "m2.", code)
m2 = self.model_object()
m2.path_exists.return_value = path_exists
code_obj = compile(code, "<code>", "exec")
eval(code_obj, globals(), { 'm2': m2 }) # pylint: disable=eval-used
# Many of our functions take a lot of arguments, and cmdline.py
# calls them with many. But most of them are just the defaults, which
# we don't want to have to repeat in all tests. For each call, apply
# the defaults. This lets the tests just mention the interesting ones.
for name, args, kwargs in m2.method_calls:
for k, v in self.DEFAULT_KWARGS.get(name, {}).items():
if k not in kwargs:
kwargs[k] = v
self.assert_same_method_calls(m1, m2)
def cmd_executes_same(self, args1, args2):
"""Assert that the `args1` executes the same as `args2`."""
m1, r1 = self.mock_command_line(args1)
m2, r2 = self.mock_command_line(args2)
self.assertEqual(r1, r2)
self.assert_same_method_calls(m1, m2)
def assert_same_method_calls(self, m1, m2):
"""Assert that `m1.method_calls` and `m2.method_calls` are the same."""
# Use a real equality comparison, but if it fails, use a nicer assert
# so we can tell what's going on. We have to use the real == first due
# to CmdOptionParser.__eq__
if m1.method_calls != m2.method_calls:
pp1 = pprint.pformat(m1.method_calls)
pp2 = pprint.pformat(m2.method_calls)
self.assertMultiLineEqual(pp1+'\n', pp2+'\n')
def cmd_help(self, args, help_msg=None, topic=None, ret=ERR):
"""Run a command line, and check that it prints the right help.
Only the last function call in the mock is checked, which should be the
help message that we want to see.
"""
m, r = self.mock_command_line(args)
self.assertEqual(r, ret,
"Wrong status: got %s, wanted %s" % (r, ret)
)
if help_msg:
self.assertEqual(m.method_calls[-1],
('help_fn', (help_msg,), {})
)
else:
self.assertEqual(m.method_calls[-1],
('help_fn', (), {'topic':topic})
)
class BaseCmdLineTestTest(BaseCmdLineTest):
"""Tests that our BaseCmdLineTest helpers work."""
def test_assert_same_method_calls(self):
# All the other tests here use self.cmd_executes_same in successful
# ways, so here we just check that it fails.
with self.assertRaises(AssertionError):
self.cmd_executes_same("run", "debug")
class CmdLineTest(BaseCmdLineTest):
"""Tests of the coverage.py command line."""
def test_annotate(self):
# coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("annotate", """\
.coverage()
.load()
.annotate()
""")
self.cmd_executes("annotate -d dir1", """\
.coverage()
.load()
.annotate(directory="dir1")
""")
self.cmd_executes("annotate -i", """\
.coverage()
.load()
.annotate(ignore_errors=True)
""")
self.cmd_executes("annotate --omit fooey", """\
.coverage(omit=["fooey"])
.load()
.annotate(omit=["fooey"])
""")
self.cmd_executes("annotate --omit fooey,booey", """\
.coverage(omit=["fooey", "booey"])
.load()
.annotate(omit=["fooey", "booey"])
""")
self.cmd_executes("annotate mod1", """\
.coverage()
.load()
.annotate(morfs=["mod1"])
""")
self.cmd_executes("annotate mod1 mod2 mod3", """\
.coverage()
.load()
.annotate(morfs=["mod1", "mod2", "mod3"])
""")
def test_combine(self):
# coverage combine with args
self.cmd_executes("combine datadir1", """\
.coverage()
.load()
.combine(["datadir1"])
.save()
""")
# coverage combine without args
self.cmd_executes("combine", """\
.coverage()
.load()
.combine(None)
.save()
""")
def test_combine_doesnt_confuse_options_with_args(self):
# https://bitbucket.org/ned/coveragepy/issues/385/coverage-combine-doesnt-work-with-rcfile
self.cmd_executes("combine --rcfile cov.ini", """\
.coverage(config_file='cov.ini')
.load()
.combine(None)
.save()
""")
self.cmd_executes("combine --rcfile cov.ini data1 data2/more", """\
.coverage(config_file='cov.ini')
.load()
.combine(["data1", "data2/more"])
.save()
""")
def test_debug(self):
self.cmd_help("debug", "What information would you like: data, sys?")
self.cmd_help("debug foo", "Don't know what you mean by 'foo'")
def test_debug_sys(self):
self.command_line("debug sys")
out = self.stdout()
self.assertIn("version:", out)
self.assertIn("data_path:", out)
def test_erase(self):
# coverage erase
self.cmd_executes("erase", """\
.coverage()
.erase()
""")
def test_version(self):
# coverage --version
self.cmd_help("--version", topic="version", ret=OK)
def test_help_option(self):
# coverage -h
self.cmd_help("-h", topic="help", ret=OK)
self.cmd_help("--help", topic="help", ret=OK)
def test_help_command(self):
self.cmd_executes("help", ".help_fn(topic='help')")
def test_cmd_help(self):
self.cmd_executes("run --help",
".help_fn(parser='<CmdOptionParser:run>')")
self.cmd_executes_same("help run", "run --help")
def test_html(self):
# coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("html", """\
.coverage()
.load()
.html_report()
""")
self.cmd_executes("html -d dir1", """\
.coverage()
.load()
.html_report(directory="dir1")
""")
self.cmd_executes("html -i", """\
.coverage()
.load()
.html_report(ignore_errors=True)
""")
self.cmd_executes("html --omit fooey", """\
.coverage(omit=["fooey"])
.load()
.html_report(omit=["fooey"])
""")
self.cmd_executes("html --omit fooey,booey", """\
.coverage(omit=["fooey", "booey"])
.load()
.html_report(omit=["fooey", "booey"])
""")
self.cmd_executes("html mod1", """\
.coverage()
.load()
.html_report(morfs=["mod1"])
""")
self.cmd_executes("html mod1 mod2 mod3", """\
.coverage()
.load()
.html_report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("html --title=Hello_there", """\
.coverage()
.load()
.html_report(title='Hello_there')
""")
def test_report(self):
# coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("report", """\
.coverage()
.load()
.report(show_missing=None)
""")
self.cmd_executes("report -i", """\
.coverage()
.load()
.report(ignore_errors=True)
""")
self.cmd_executes("report -m", """\
.coverage()
.load()
.report(show_missing=True)
""")
self.cmd_executes("report --omit fooey", """\
.coverage(omit=["fooey"])
.load()
.report(omit=["fooey"])
""")
self.cmd_executes("report --omit fooey,booey", """\
.coverage(omit=["fooey", "booey"])
.load()
.report(omit=["fooey", "booey"])
""")
self.cmd_executes("report mod1", """\
.coverage()
.load()
.report(morfs=["mod1"])
""")
self.cmd_executes("report mod1 mod2 mod3", """\
.coverage()
.load()
.report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("report --skip-covered", """\
.coverage()
.load()
.report(skip_covered=True)
""")
def test_run(self):
# coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...]
# run calls coverage.erase first.
self.cmd_executes("run foo.py", """\
.coverage()
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
# run -a combines with an existing data file before saving.
self.cmd_executes("run -a foo.py", """\
.coverage()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.path_exists('.coverage')
.combine(data_paths=['.coverage'])
.save()
""", path_exists=True)
# run -a doesn't combine anything if the data file doesn't exist.
self.cmd_executes("run -a foo.py", """\
.coverage()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.path_exists('.coverage')
.save()
""", path_exists=False)
# --timid sets a flag, and program arguments get passed through.
self.cmd_executes("run --timid foo.py abc 123", """\
.coverage(timid=True)
.erase()
.start()
.run_python_file('foo.py', ['foo.py', 'abc', '123'])
.stop()
.save()
""")
# -L sets a flag, and flags for the program don't confuse us.
self.cmd_executes("run -p -L foo.py -a -b", """\
.coverage(cover_pylib=True, data_suffix=True)
.erase()
.start()
.run_python_file('foo.py', ['foo.py', '-a', '-b'])
.stop()
.save()
""")
self.cmd_executes("run --branch foo.py", """\
.coverage(branch=True)
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --rcfile=myrc.rc foo.py", """\
.coverage(config_file="myrc.rc")
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --include=pre1,pre2 foo.py", """\
.coverage(include=["pre1", "pre2"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --omit=opre1,opre2 foo.py", """\
.coverage(omit=["opre1", "opre2"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --include=pre1,pre2 --omit=opre1,opre2 foo.py",
"""\
.coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --source=quux,hi.there,/home/bar foo.py", """\
.coverage(source=["quux", "hi.there", "/home/bar"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --concurrency=gevent foo.py", """\
.coverage(concurrency='gevent')
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
def test_bad_concurrency(self):
self.command_line("run --concurrency=nothing", ret=ERR)
out = self.stdout()
self.assertIn("option --concurrency: invalid choice: 'nothing'", out)
def test_run_debug(self):
self.cmd_executes("run --debug=opt1 foo.py", """\
.coverage(debug=["opt1"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --debug=opt1,opt2 foo.py", """\
.coverage(debug=["opt1","opt2"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
def test_run_module(self):
self.cmd_executes("run -m mymodule", """\
.coverage()
.erase()
.start()
.run_python_module('mymodule', ['mymodule'])
.stop()
.save()
""")
self.cmd_executes("run -m mymodule -qq arg1 arg2", """\
.coverage()
.erase()
.start()
.run_python_module('mymodule', ['mymodule', '-qq', 'arg1', 'arg2'])
.stop()
.save()
""")
self.cmd_executes("run --branch -m mymodule", """\
.coverage(branch=True)
.erase()
.start()
.run_python_module('mymodule', ['mymodule'])
.stop()
.save()
""")
self.cmd_executes_same("run -m mymodule", "run --module mymodule")
def test_run_nothing(self):
self.command_line("run", ret=ERR)
self.assertIn("Nothing to do", self.stdout())
def test_cant_append_parallel(self):
self.command_line("run --append --parallel-mode foo.py", ret=ERR)
self.assertIn("Can't append to data files in parallel mode.", self.stdout())
def test_xml(self):
# coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("xml", """\
.coverage()
.load()
.xml_report()
""")
self.cmd_executes("xml -i", """\
.coverage()
.load()
.xml_report(ignore_errors=True)
""")
self.cmd_executes("xml -o myxml.foo", """\
.coverage()
.load()
.xml_report(outfile="myxml.foo")
""")
self.cmd_executes("xml -o -", """\
.coverage()
.load()
.xml_report(outfile="-")
""")
self.cmd_executes("xml --omit fooey", """\
.coverage(omit=["fooey"])
.load()
.xml_report(omit=["fooey"])
""")
self.cmd_executes("xml --omit fooey,booey", """\
.coverage(omit=["fooey", "booey"])
.load()
.xml_report(omit=["fooey", "booey"])
""")
self.cmd_executes("xml mod1", """\
.coverage()
.load()
.xml_report(morfs=["mod1"])
""")
self.cmd_executes("xml mod1 mod2 mod3", """\
.coverage()
.load()
.xml_report(morfs=["mod1", "mod2", "mod3"])
""")
def test_no_arguments_at_all(self):
self.cmd_help("", topic="minimum_help", ret=OK)
def test_bad_command(self):
self.cmd_help("xyzzy", "Unknown command: 'xyzzy'")
class CmdLineWithFilesTest(BaseCmdLineTest):
"""Test the command line in ways that need temp files."""
run_in_temp_dir = True
no_files_in_temp_dir = True
def test_debug_data(self):
data = CoverageData()
data.add_lines({
"file1.py": dict.fromkeys(range(1, 18)),
"file2.py": dict.fromkeys(range(1, 24)),
})
data.add_file_tracers({"file1.py": "a_plugin"})
data_files = CoverageDataFiles()
data_files.write(data)
self.command_line("debug data")
self.assertMultiLineEqual(self.stdout(), textwrap.dedent("""\
-- data ------------------------------------------------------
path: FILENAME
has_arcs: False
2 files:
file1.py: 17 lines [a_plugin]
file2.py: 23 lines
""").replace("FILENAME", data_files.filename))
def test_debug_data_with_no_data(self):
data_files = CoverageDataFiles()
self.command_line("debug data")
self.assertMultiLineEqual(self.stdout(), textwrap.dedent("""\
-- data ------------------------------------------------------
path: FILENAME
No data collected
""").replace("FILENAME", data_files.filename))
class CmdLineStdoutTest(BaseCmdLineTest):
"""Test the command line with real stdout output."""
def test_minimum_help(self):
self.command_line("")
out = self.stdout()
self.assertIn("Code coverage for Python.", out)
self.assertLess(out.count("\n"), 4)
def test_version(self):
self.command_line("--version")
out = self.stdout()
self.assertIn("ersion ", out)
self.assertLess(out.count("\n"), 4)
def test_help(self):
self.command_line("help")
out = self.stdout()
self.assertIn("readthedocs.org", out)
self.assertGreater(out.count("\n"), 10)
def test_cmd_help(self):
self.command_line("help run")
out = self.stdout()
self.assertIn("<pyfile>", out)
self.assertIn("--timid", out)
self.assertGreater(out.count("\n"), 10)
def test_error(self):
self.command_line("fooey kablooey", ret=ERR)
out = self.stdout()
self.assertIn("fooey", out)
self.assertIn("help", out)
class CmdMainTest(CoverageTest):
"""Tests of coverage.cmdline.main(), using mocking for isolation."""
run_in_temp_dir = False
class CoverageScriptStub(object):
"""A stub for coverage.cmdline.CoverageScript, used by CmdMainTest."""
def command_line(self, argv):
"""Stub for command_line, the arg determines what it will do."""
if argv[0] == 'hello':
print("Hello, world!")
elif argv[0] == 'raise':
try:
raise Exception("oh noes!")
except:
raise ExceptionDuringRun(*sys.exc_info())
elif argv[0] == 'internalraise':
raise ValueError("coverage is broken")
elif argv[0] == 'exit':
sys.exit(23)
else:
raise AssertionError("Bad CoverageScriptStub: %r"% (argv,))
return 0
def setUp(self):
super(CmdMainTest, self).setUp()
self.old_CoverageScript = coverage.cmdline.CoverageScript
coverage.cmdline.CoverageScript = self.CoverageScriptStub
self.addCleanup(self.cleanup_coverage_script)
def cleanup_coverage_script(self):
"""Restore CoverageScript when the test is done."""
coverage.cmdline.CoverageScript = self.old_CoverageScript
def test_normal(self):
ret = coverage.cmdline.main(['hello'])
self.assertEqual(ret, 0)
self.assertEqual(self.stdout(), "Hello, world!\n")
def test_raise(self):
ret = coverage.cmdline.main(['raise'])
self.assertEqual(ret, 1)
self.assertEqual(self.stdout(), "")
err = self.stderr().split('\n')
self.assertEqual(err[0], 'Traceback (most recent call last):')
self.assertEqual(err[-3], ' raise Exception("oh noes!")')
self.assertEqual(err[-2], 'Exception: oh noes!')
def test_internalraise(self):
with self.assertRaisesRegex(ValueError, "coverage is broken"):
coverage.cmdline.main(['internalraise'])
def test_exit(self):
ret = coverage.cmdline.main(['exit'])
self.assertEqual(ret, 23)
|
|
import requests
from pandas import *
from numpy import *
from datetime import *
import os
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36'
}
src_dir = os.getcwd()
csv_dir = os.path.join(src_dir, "CSV_price")
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def RepresentsFloat(s):
try:
float(s)
return True
except ValueError:
return False
class Quote(object):
CLASS_AND_FUNC_FMT = '%Y%m%d'
CSV_INPUT_AND_OUTPUT_FMT = "%Y-%m-%d"
def __init__(self, symbol):
self.symbol = symbol
self.date, self.time, self.open_, self.high, self.low, self.close, self.volume = ([] for _ in range(7))
def append(self, dt, open_, high, low, close, volume):
if RepresentsFloat(open_) and RepresentsFloat(high) and RepresentsFloat(low) and RepresentsFloat(close) and \
RepresentsFloat(volume):
self.date.append(dt)
self.open_.append(float(open_))
self.high.append(float(high))
self.low.append(float(low))
self.close.append(float(close))
self.volume.append(float(volume))
def prepend(self, dt, open_, high, low, close, volume):
if RepresentsFloat(open_) and RepresentsFloat(high) and RepresentsFloat(low) and RepresentsFloat(close) and \
RepresentsFloat(volume):
self.date = [dt] + self.date
self.open_ = [float(open_)] + self.open_
self.high = [float(high)] + self.high
self.low = [float(low)] + self.low
self.close = [float(close)] + self.close
self.volume = [float(volume)] + self.volume
def return_list(self):
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in
self.date], self.open_, self.high, self.low, self.close, self.volume
def get_vol(self, date_b=None, date_e=None):
if date_e is None or date_b is None:
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date], self.volume
else:
date_b = datetime.strptime(date_b, "%Y%m%d")
date_e = datetime.strptime(date_e, "%Y%m%d")
b, e = 0, len(self.date) - 1
while b < len(self.date) and date_b > self.date[b]:
b += 1
while e >= 0 and date_e < self.date[e]:
e -= 1
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date[b:e+1]], self.volume[b:e+1]
def get_price(self, date_b=None, date_e=None):
if date_e is None or date_b is None:
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date], self.close
else:
date_b = datetime.strptime(date_b, "%Y%m%d")
date_e = datetime.strptime(date_e, "%Y%m%d")
b, e = 0, len(self.date) - 1
while b < len(self.date) and date_b > self.date[b]:
b += 1
while e >= 0 and date_e < self.date[e]:
e -= 1
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date[b:e+1]], self.close[b:e+1]
def get_high_price(self, date_b=None, date_e=None):
if date_e is None or date_b is None:
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date], self.high
else:
date_b = datetime.strptime(date_b, "%Y%m%d")
date_e = datetime.strptime(date_e, "%Y%m%d")
b, e = 0, len(self.date) - 1
while b < len(self.date) and date_b > self.date[b]:
b += 1
while e >= 0 and date_e < self.date[e]:
e -= 1
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date[b:e+1]], self.high[b:e+1]
def get_low_price(self, date_b=None, date_e=None):
if date_e is None or date_b is None:
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date], self.low
else:
date_b = datetime.strptime(date_b, "%Y%m%d")
date_e = datetime.strptime(date_e, "%Y%m%d")
b, e = 0, len(self.date) - 1
while b < len(self.date) and date_b > self.date[b]:
b += 1
while e >= 0 and date_e < self.date[e]:
e -= 1
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date[b:e+1]], self.low[b:e+1]
def get_date(self):
return [d.strftime(self.CLASS_AND_FUNC_FMT) for d in self.date]
def to_csv(self):
return ''.join(["{0},{1:.2f},{2:.2f},{3:.2f},{4:.2f},{5}\n".format(self.date[bar].strftime(
self.CSV_INPUT_AND_OUTPUT_FMT),
self.open_[bar], self.high[bar],
self.low[bar], self.close[bar],
self.volume[bar])
for bar in range(len(self.close))])
def write_csv(self, filename=None):
with open(os.path.join("/Users/apple/Desktop/TCH_api/Technical/CSV_price", self.symbol + ".csv") if filename is None
else os.path.join("/CSV_price", filename), 'w') as f:
f.write(self.to_csv())
def read_api_csv(self, filename):
"""
It reads csv of entry format "yyyy-mm-dd, open, high, low, close, volume".
This format is exactly the same as the output of write_csv().
:param filename: filename.
:return: return a boolean indicating the success of this function.
"""
self.date, self.open_, self.high, self.low, self.close, self.volume = ([] for _ in range(6))
for line in open(os.path.join("/CSV_price", filename), 'r'):
ds, open_, high, low, close, volume = line.rstrip().split(',')
dt = datetime.strptime(ds, self.CSV_INPUT_AND_OUTPUT_FMT)
self.append(dt, open_, high, low, close, volume)
return True
def read_yahoo_csv(self, filename):
"""
It reads csv of entry format "yyyy-mm-dd, open, high, low, close, adjclose, volume".
It also has one extra row at the beginning of the file specify the name of each column.
This format is the format of data from yahoo finance.
:param filename: filename.
:return: return a boolean indicating the success of this function.
"""
flag = True
self.date, self.open_, self.high, self.low, self.close, self.volume = ([] for _ in range(6))
for line in open(os.path.join("/CSV_price", filename), 'r'):
if flag:
flag = False
continue
ds, open_, high, low, close, adj_close, volume = line.rstrip().split(',')
dt = datetime.strptime(ds, self.CSV_INPUT_AND_OUTPUT_FMT)
self.append(dt, open_, high, low, adj_close, volume)
return True
def __repr__(self):
return self.to_csv()
class GoogleQuote(Quote):
"""
Daily quotes from Google. input date format='yyyymmdd'
"""
def __init__(self, symbol, start_date, end_date):
super(GoogleQuote, self).__init__(symbol)
self.symbol = symbol.upper()
start = datetime(int(start_date[0:4]), int(start_date[4:6]), int(start_date[6:8]))
end = datetime(int(end_date[0:4]), int(end_date[4:6]), int(end_date[6:8]))
url_string = "http://www.google.com/finance/historical?q={0}".format(self.symbol)
url_string += "&startdate={0}&enddate={1}&output=csv".format(
start.strftime('%b %d, %Y'), end.strftime('%b %d, %Y'))
csv = requests.get(url_string, headers=headers).text.split("\n")
csv.reverse()
for bar in range(1, len(csv) - 1):
ds, open_, high, low, close, volume = csv[bar].split(',')
# The condition is here because a date may only have a closing price.
# We can simply assign a fake value to the rest of the fileds because closing price is the only one we need.
open_, high, low, close = [float(x) if x != "-" else 0.0 for x in [open_, high, low, close]]
dt = datetime.strptime(ds, '%d-%b-%y')
self.append(dt, open_, high, low, close, volume)
def read_api_csv(filename, symbol):
q = Quote(symbol)
q.read_api_csv(filename)
return q
def read_yahoo_csv(filename, symbol):
q = Quote(symbol)
q.read_yahoo_csv(filename)
return q
def write_csv(q, filename):
q.write_csv(filename)
|
|
from importlib import import_module
from django.conf import settings
from django.contrib.admindocs.views import simplify_regex
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
from django.utils import six
from django.utils.encoding import force_text
from rest_framework import exceptions, serializers
from rest_framework.compat import coreapi, uritemplate, urlparse
from rest_framework.request import clone_request
from rest_framework.views import APIView
def as_query_fields(items):
"""
Take a list of Fields and plain strings.
Convert any pain strings into `location='query'` Field instances.
"""
return [
item if isinstance(item, coreapi.Field) else coreapi.Field(name=item, required=False, location='query')
for item in items
]
def is_api_view(callback):
"""
Return `True` if the given view callback is a REST framework view/viewset.
"""
cls = getattr(callback, 'cls', None)
return (cls is not None) and issubclass(cls, APIView)
class SchemaGenerator(object):
default_mapping = {
'get': 'read',
'post': 'create',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
}
known_actions = (
'create', 'read', 'retrieve', 'list',
'update', 'partial_update', 'destroy'
)
def __init__(self, title=None, url=None, patterns=None, urlconf=None):
assert coreapi, '`coreapi` must be installed for schema support.'
if patterns is None and urlconf is not None:
if isinstance(urlconf, six.string_types):
urls = import_module(urlconf)
else:
urls = urlconf
self.patterns = urls.urlpatterns
elif patterns is None and urlconf is None:
urls = import_module(settings.ROOT_URLCONF)
self.patterns = urls.urlpatterns
else:
self.patterns = patterns
if url and not url.endswith('/'):
url += '/'
self.title = title
self.url = url
self.endpoints = None
def get_schema(self, request=None):
if self.endpoints is None:
self.endpoints = self.get_api_endpoints(self.patterns)
links = []
for path, method, category, action, callback in self.endpoints:
view = callback.cls()
for attr, val in getattr(callback, 'initkwargs', {}).items():
setattr(view, attr, val)
view.args = ()
view.kwargs = {}
view.format_kwarg = None
actions = getattr(callback, 'actions', None)
if actions is not None:
if method == 'OPTIONS':
view.action = 'metadata'
else:
view.action = actions.get(method.lower())
if request is not None:
view.request = clone_request(request, method)
try:
view.check_permissions(view.request)
except exceptions.APIException:
continue
else:
view.request = None
link = self.get_link(path, method, callback, view)
links.append((category, action, link))
if not links:
return None
# Generate the schema content structure, eg:
# {'users': {'list': Link()}}
content = {}
for category, action, link in links:
if category is None:
content[action] = link
elif category in content:
content[category][action] = link
else:
content[category] = {action: link}
# Return the schema document.
return coreapi.Document(title=self.title, content=content, url=self.url)
def get_api_endpoints(self, patterns, prefix=''):
"""
Return a list of all available API endpoints by inspecting the URL conf.
"""
api_endpoints = []
for pattern in patterns:
path_regex = prefix + pattern.regex.pattern
if isinstance(pattern, RegexURLPattern):
path = self.get_path(path_regex)
callback = pattern.callback
if self.should_include_endpoint(path, callback):
for method in self.get_allowed_methods(callback):
action = self.get_action(path, method, callback)
category = self.get_category(path, method, callback, action)
endpoint = (path, method, category, action, callback)
api_endpoints.append(endpoint)
elif isinstance(pattern, RegexURLResolver):
nested_endpoints = self.get_api_endpoints(
patterns=pattern.url_patterns,
prefix=path_regex
)
api_endpoints.extend(nested_endpoints)
return api_endpoints
def get_path(self, path_regex):
"""
Given a URL conf regex, return a URI template string.
"""
path = simplify_regex(path_regex)
path = path.replace('<', '{').replace('>', '}')
return path
def should_include_endpoint(self, path, callback):
"""
Return `True` if the given endpoint should be included.
"""
if not is_api_view(callback):
return False # Ignore anything except REST framework views.
if path.endswith('.{format}') or path.endswith('.{format}/'):
return False # Ignore .json style URLs.
if path == '/':
return False # Ignore the root endpoint.
return True
def get_allowed_methods(self, callback):
"""
Return a list of the valid HTTP methods for this endpoint.
"""
if hasattr(callback, 'actions'):
return [method.upper() for method in callback.actions.keys()]
return [
method for method in
callback.cls().allowed_methods if method not in ('OPTIONS', 'HEAD')
]
def get_action(self, path, method, callback):
"""
Return a descriptive action string for the endpoint, eg. 'list'.
"""
actions = getattr(callback, 'actions', self.default_mapping)
return actions[method.lower()]
def get_category(self, path, method, callback, action):
"""
Return a descriptive category string for the endpoint, eg. 'users'.
Examples of category/action pairs that should be generated for various
endpoints:
/users/ [users][list], [users][create]
/users/{pk}/ [users][read], [users][update], [users][destroy]
/users/enabled/ [users][enabled] (custom action)
/users/{pk}/star/ [users][star] (custom action)
/users/{pk}/groups/ [groups][list], [groups][create]
/users/{pk}/groups/{pk}/ [groups][read], [groups][update], [groups][destroy]
"""
path_components = path.strip('/').split('/')
path_components = [
component for component in path_components
if '{' not in component
]
if action in self.known_actions:
# Default action, eg "/users/", "/users/{pk}/"
idx = -1
else:
# Custom action, eg "/users/{pk}/activate/", "/users/active/"
idx = -2
try:
return path_components[idx]
except IndexError:
return None
# Methods for generating each individual `Link` instance...
def get_link(self, path, method, callback, view):
"""
Return a `coreapi.Link` instance for the given endpoint.
"""
fields = self.get_path_fields(path, method, callback, view)
fields += self.get_serializer_fields(path, method, callback, view)
fields += self.get_pagination_fields(path, method, callback, view)
fields += self.get_filter_fields(path, method, callback, view)
if fields and any([field.location in ('form', 'body') for field in fields]):
encoding = self.get_encoding(path, method, callback, view)
else:
encoding = None
if self.url and path.startswith('/'):
path = path[1:]
return coreapi.Link(
url=urlparse.urljoin(self.url, path),
action=method.lower(),
encoding=encoding,
fields=fields
)
def get_encoding(self, path, method, callback, view):
"""
Return the 'encoding' parameter to use for a given endpoint.
"""
# Core API supports the following request encodings over HTTP...
supported_media_types = set((
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data',
))
parser_classes = getattr(view, 'parser_classes', [])
for parser_class in parser_classes:
media_type = getattr(parser_class, 'media_type', None)
if media_type in supported_media_types:
return media_type
# Raw binary uploads are supported with "application/octet-stream"
if media_type == '*/*':
return 'application/octet-stream'
return None
def get_path_fields(self, path, method, callback, view):
"""
Return a list of `coreapi.Field` instances corresponding to any
templated path variables.
"""
fields = []
for variable in uritemplate.variables(path):
field = coreapi.Field(name=variable, location='path', required=True)
fields.append(field)
return fields
def get_serializer_fields(self, path, method, callback, view):
"""
Return a list of `coreapi.Field` instances corresponding to any
request body input, as determined by the serializer class.
"""
if method not in ('PUT', 'PATCH', 'POST'):
return []
if not hasattr(view, 'get_serializer'):
return []
serializer = view.get_serializer()
if isinstance(serializer, serializers.ListSerializer):
return [coreapi.Field(name='data', location='body', required=True)]
if not isinstance(serializer, serializers.Serializer):
return []
fields = []
for field in serializer.fields.values():
if field.read_only or isinstance(field, serializers.HiddenField):
continue
required = field.required and method != 'PATCH'
description = force_text(field.help_text) if field.help_text else ''
field = coreapi.Field(
name=field.source,
location='form',
required=required,
description=description
)
fields.append(field)
return fields
def get_pagination_fields(self, path, method, callback, view):
if method != 'GET':
return []
if hasattr(callback, 'actions') and ('list' not in callback.actions.values()):
return []
if not getattr(view, 'pagination_class', None):
return []
paginator = view.pagination_class()
return as_query_fields(paginator.get_fields(view))
def get_filter_fields(self, path, method, callback, view):
if method != 'GET':
return []
if hasattr(callback, 'actions') and ('list' not in callback.actions.values()):
return []
if not hasattr(view, 'filter_backends'):
return []
fields = []
for filter_backend in view.filter_backends:
fields += as_query_fields(filter_backend().get_fields(view))
return fields
|
|
import unittest, pprint
from simpleparse.parser import Parser
from simpleparse.stt.TextTools import TextTools
from genericvalues import NullResult, AnyInt
class ParserGenerationTests(unittest.TestCase):
def doBasicTest(self, definition, parserName, testValue, expected, ):
result = Parser( definition).parse( testValue, parserName )
assert result == expected, '''\nexpected:%s\n got:%s\n'''%( expected, result )
def testGenNegRange1( self ):
self.doBasicTest(
'''s := - something *
<something> := [ab]''',
's',
'mmmab',
(1,[],3)
)
def testGenNegRange2( self ):
self.doBasicTest(
'''s := - something
<something> := [ab]''',
's',
'mmmab',
(1,[],1)
)
def testGenNegLit1( self ):
self.doBasicTest(
'''s := - something *
<something> := "a"''',
's',
'mmmab',
(1,[],3)
)
def testGenPosReptOpt1( self ):
self.doBasicTest(
'''s := something *
something := "a" ''',
's',
'aammmab',
(1,[("something",0,1,NullResult),("something",1,2,NullResult)],2)
)
def testGenPosReptOpt2( self ):
self.doBasicTest(
'''s := something *
something := "a" ''',
's',
'mmmab',
(1,[],0)
)
def testGenPosRept1( self ):
self.doBasicTest(
'''s := something +
something := "a" ''',
's',
'mmmab',
(0,[],AnyInt)
)
def testLookaheadPositive( self ):
self.doBasicTest(
'''s := ?"b"
''',
's',
'bbbba',
(1,[
],0)
)
def testLookaheadNeg( self ):
self.doBasicTest(
'''s := ?-"b"
''',
's',
'bbbba',
(0,[
],AnyInt)
)
def testLookaheadNeg2( self ):
self.doBasicTest(
'''s := ?-"b"?
''',
's',
'bbbba',
(1,[
],0)
)
def testLookaheadNeg3( self ):
self.doBasicTest(
'''s := "b", ?-"a"
''',
's',
'bbbba',
(1,[
],1)
)
def testLookaheadNeg4( self ):
self.doBasicTest(
'''s := "b", ?-"a", "ba"
''',
's',
'bba',
(1,[
],3)
)
def testLookaheadNeg5( self ):
self.doBasicTest(
'''s := ?-t, "ba"
t := "bad"
''',
's',
'bac',
(1,[
],2)
)
def testLookaheadNeg6( self ):
self.doBasicTest(
'''s := ?-t, "ba"
t := "bad"
''',
's',
'bad',
(0,[
],AnyInt)
)
def testLookahead2( self ):
"""Test lookahead on literals (more complex)"""
self.doBasicTest(
'''s := something+, "ba"
something := "b",?-"a"
''',
's',
'bbbba',
(1,[
("something",0,1,NullResult),
("something",1,2,NullResult),
("something",2,3,NullResult),
],5)
)
def testLookahead3( self ):
"""Test lookahead on reported positive productions"""
self.doBasicTest(
'''s := ?trailer
trailer := "bad"
''',
's',
'badba',
(1,[
("trailer",0,3,NullResult),
],0)
)
def testLookahead4( self ):
self.doBasicTest(
'''s := ?-trailer?
trailer := "bad"
''',
's',
'badba',
(1,[
],0)
)
def testLookahead5( self ):
self.doBasicTest(
'''s := ?-trailer, 'ba'
trailer := "bad"
''',
's',
'babba',
(1,[
],2)
)
def testLookahead6( self ):
self.doBasicTest(
'''s := ?-trailer, 'ba'
trailer := "bad"
''',
's',
'badba',
(0,[
],AnyInt)
)
def testGenPos1( self ):
self.doBasicTest(
'''s := something
something := "a" ''',
's',
'mmmab',
(0,[],AnyInt)
)
def testGenPos2( self ):
self.doBasicTest(
'''s := something
something := "a" ''',
's',
'ammmab',
(1,[('something',0,1,NullResult),],1)
)
def testOptionalGroupHitEOF( self ):
"""Test optional group hitting an EOF during success run"""
self.doBasicTest(
'''s := something*
something := ("a"/"b") ''',
's',
'aa',
(1,[
('something',0,1,NullResult),
('something',1,2,NullResult),
],2)
)
def testMultiLineDef( self ):
"""Test multi-line definitions"""
self.doBasicTest(
'''s :=
something*
something := (
"a"/
"b"
) ''',
's',
'aa',
(1,[
('something',0,1,NullResult),
('something',1,2,NullResult),
],2)
)
## def testRepeatOptionalFail( self ):
## """Explicit test of the optional-repeating-child of repeating object
## """
## self.doBasicTest(
## r'''
## controlword := '\\',('*','\\')?,[-a-zA-Z0-9]+
## contents := -[\012}\\]*
## file := (controlword/contents)+
## ''',
## "file",
## "\\*\\test sdf ff f f sdfff\\",
## (1, [
## ("controlword", 0,7,[]),
## ("contents",7,24),
## ],24),
## )
def testGenCILiteral1( self ):
self.doBasicTest(
'''s := c"this"''',
's',
'this',
(1,[],4)
)
def testGenCILiteral2( self ):
self.doBasicTest(
'''s := c"this"''',
's',
'This',
(1,[],4)
)
def testGenCILiteral3( self ):
self.doBasicTest(
'''s := c"this"''',
's',
'THIS',
(1,[],4)
)
def testGenCILiteral4( self ):
self.doBasicTest(
'''s := -c"this"''',
's',
' THIS',
(1,[],1)
)
def testGenCILiteral5( self ):
self.doBasicTest(
'''s := -c"this"''',
's',
' thi',
(1,[],1)
)
def testGenCILiteral6( self ):
self.doBasicTest(
'''s := -c"this"*''',
's',
' thi',
(1,[],4)
)
class NameTests(unittest.TestCase):
def doBasicTest(self, definition, parserName, testValue, expected, ):
result = Parser( definition).parse( testValue, production=parserName )
assert result == expected, '''\nexpected:%s\n got:%s\n'''%( expected, result )
def test_p( self ):
self.doBasicTest(
'''s := something
something := "a" ''',
's',
'ammmab',
(1,[('something',0,1,NullResult),],1)
)
def test_po( self ):
self.doBasicTest(
'''s := something?
something := "a" ''',
's',
'ammmab',
(1,[('something',0,1,NullResult),],1)
)
def test_por( self ):
self.doBasicTest(
'''s := something*
something := "a" ''',
's',
'ammmab',
(1,[('something',0,1,NullResult),],1)
)
def test_pr( self ):
self.doBasicTest(
'''s := something+
something := "a" ''',
's',
'ammmab',
(1,[('something',0,1,NullResult),],1)
)
def test_n( self ):
self.doBasicTest(
'''s := - something
<something> := [ab]''',
's',
'mmmab',
(1,[],1)
)
def test_no( self ):
self.doBasicTest(
'''s := - something?
<something> := [ab]''',
's',
'mmmab',
(1,[],1)
)
def test_nor( self ):
self.doBasicTest(
'''s := - something*
<something> := [ab]''',
's',
'mmmab',
(1,[],3)
)
def test_nr( self ):
self.doBasicTest(
'''s := - something+
<something> := [ab]''',
's',
'mmmab',
(1,[],3)
)
def test_n_f( self ):
self.doBasicTest(
'''s := - something
<something> := [ab]''',
's',
'ammmab',
(0,[],AnyInt)
)
def test_no_f( self ):
self.doBasicTest(
'''s := - something?
<something> := [ab]''',
's',
'ammmab',
(1,[],0)
)
def test_nor_f( self ):
self.doBasicTest(
'''s := - something*
<something> := [ab]''',
's',
'ammmab',
(1,[],0)
)
def test_nr_f( self ):
self.doBasicTest(
'''s := - something +
<something> := [ab]''',
's',
'ammmab',
(0,[],AnyInt)
)
## def test_por_big( self ):
## """This test creates 1,000,000 result tuples (very inefficiently, I might add)...
## on my machine that takes a long time, so I do not bother with the test
## (note that with a recursive mx.TextTools, this should actually blow up
## long before you get into memory problems :) ).
## """
## self.doBasicTest(
## '''s := something*
## something := "a" ''',
## 's',
## 'a'*1000000,
## (1,[
## ],1000000)
## )
def test_expanded_name( self ):
"""Non-reporting (expanded) name test
Tests new feature, a name whose children
are reported, but which is not itself reported,
basically this lets you create anonymous
groups which can be referenced from other
productions.
"""
self.doBasicTest(
'''s := something +
>something< := r
r := [ab]
v := [c]
''',
's',
'abammmab',
(1,[
('r',0,1, NullResult),
('r',1,2, NullResult),
('r',2,3, NullResult),
],3)
)
def test_expanded_SingleNameChild( self ):
"""Expanded group with single child which is a Name itself
This originally failed when the Name object's report value
was changed to 0 (redundant information for the "expanded" code),
resulting in the child production not getting reported.
"""
self.doBasicTest(
'''s := something +
something := r
r := [ab]''',
'something',
'abammmab',
(1,[
('r',0,1, NullResult),
],1)
)
class BasicMethodSource:
def __init__( self ):
self.results = []
def _m_a( self, taglist,text,l,r,subtags ):
self.results.append( ('a',text[l:r]))
def _m_b( self, taglist, text, l,r,subtags):
self.results.append( ('b',l,r) )
_m_c = TextTools.AppendMatch
_m_d = TextTools.AppendTagobj
_o_d = "hello world"
class AppendToTagobjMethodSource:
def __init__( self ):
self._o_d = []
_m_d = TextTools.AppendToTagobj
class CallTests(unittest.TestCase):
"""Tests semantics of calling objects from a method source during parsing"""
def parse( self, definition, parserName, testValue, source):
result = Parser(
definition,
).parse(testValue, production=parserName, processor = source)
return result
def test_basic_call( self ):
"""Test basic ability to call a method instead of regular functioning"""
source = BasicMethodSource()
self.parse( """
x := (a/b)*
a := "a"
b := "b"
""", 'x', 'abba', source)
assert source.results == [ ('a','a'),('b',1,2),('b',2,3),('a','a'),], """Method source methods were not called, or called improperly:\n%s"""%(source.results,)
def test_AppendMatch( self ):
"""Test ability to append the text-string match to the results list"""
source = BasicMethodSource()
result = self.parse( """
x := c*
c := 'c'
""", 'x', 'ccc', source)
assert result == (1,[
'c','c','c',
],3), """Result was %s"""%( result, )
def test_AppendTagObj( self ):
"""Test appending the tagobject to the results list"""
source = BasicMethodSource()
result = self.parse( """
x := d*
d := 'd'
""", 'x', 'ddd', source)
assert result == (1,[
"hello world","hello world","hello world",
],3)
def test_AppendToTagObj( self ):
"""Test basic ability to call a method instead of regular functioning"""
source = AppendToTagobjMethodSource()
result = self.parse( """
x := d*
d := 'd'
""", 'x', 'ddd', source)
assert source._o_d == [ (None,0,1,NullResult),(None,1,2,NullResult),(None,2,3,NullResult)], """Method source methods were not called, or called improperly:\n%s"""%(source._o_d,)
import test_grammarparser
import test_erroronfail
def getSuite():
return unittest.TestSuite((
test_grammarparser.getSuite(),
test_erroronfail.getSuite(),
unittest.makeSuite(ParserGenerationTests, 'test'),
unittest.makeSuite(NameTests, 'test'),
unittest.makeSuite(CallTests, 'test'),
))
if __name__ == "__main__":
unittest.main(defaultTest="getSuite")
|
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Cathal McCabe"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "[email protected]"
import time
import struct
from pynq import MMIO
from pynq.iop import request_iop
from pynq.iop import iop_const
from pynq.iop import PMODA
from pynq.iop import PMODB
from pynq.iop import ARDUINO
from pynq.iop import PMOD_GROVE_G3
from pynq.iop import PMOD_GROVE_G4
from pynq.iop import ARDUINO_GROVE_I2C
PMOD_GROVE_ADC_PROGRAM = "pmod_grove_adc.bin"
ARDUINO_GROVE_ADC_PROGRAM = "arduino_grove_adc.bin"
GROVE_ADC_LOG_START = iop_const.MAILBOX_OFFSET+16
GROVE_ADC_LOG_END = GROVE_ADC_LOG_START+(1000*4)
class Grove_ADC(object):
"""This class controls the Grove IIC ADC.
Grove ADC is a 12-bit precision ADC module based on ADC121C021. Hardware
version: v1.2.
Attributes
----------
iop : _IOP
I/O processor instance used by Grove_ADC.
mmio : MMIO
Memory-mapped I/O instance to read and write instructions and data.
log_running : int
The state of the log (0: stopped, 1: started).
log_interval_ms : int
Time in milliseconds between sampled reads of the Grove_ADC sensor.
"""
def __init__(self, if_id, gr_pin):
"""Return a new instance of an Grove_ADC object.
Note
----
The parameter `gr_pin` is a list organized as [scl_pin, sda_pin].
Parameters
----------
if_id : int
The interface ID (1,2,3) corresponding to (PMODA,PMODB,ARDUINO).
gr_pin: list
A group of pins on stickit connector or arduino shield.
"""
if if_id in [PMODA, PMODB]:
if not gr_pin in [PMOD_GROVE_G3,
PMOD_GROVE_G4]:
raise ValueError("ADC group number can only be G3 - G4.")
GROVE_ADC_PROGRAM = PMOD_GROVE_ADC_PROGRAM
elif if_id in [ARDUINO]:
if not gr_pin in [ARDUINO_GROVE_I2C]:
raise ValueError("ADC group number can only be I2C.")
GROVE_ADC_PROGRAM = ARDUINO_GROVE_ADC_PROGRAM
else:
raise ValueError("No such IOP for grove device.")
self.iop = request_iop(if_id, GROVE_ADC_PROGRAM)
self.mmio = self.iop.mmio
self.log_interval_ms = 1000
self.log_running = 0
self.iop.start()
if if_id in [PMODA, PMODB]:
# Write SCL and SDA pin config
self.mmio.write(iop_const.MAILBOX_OFFSET, gr_pin[0])
self.mmio.write(iop_const.MAILBOX_OFFSET+4, gr_pin[1])
# Write configuration and wait for ACK
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 1)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 1):
pass
def read_raw(self):
"""Read the ADC raw value from the Grove ADC peripheral.
Returns
-------
int
The raw value from the sensor.
"""
self.mmio.write(iop_const.MAILBOX_OFFSET+
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 2)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 2):
pass
value = self.mmio.read(iop_const.MAILBOX_OFFSET)
return value
def read(self):
"""Read the ADC voltage from the Grove ADC peripheral.
Returns
-------
float
The float value after translation.
"""
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 3)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 3):
pass
value = self.mmio.read(iop_const.MAILBOX_OFFSET)
return self._reg2float(value)
def set_log_interval_ms(self, log_interval_ms):
"""Set the length of the log for the Grove_ADC peripheral.
This method can set the time interval between two samples, so that
users can read out multiple values in a single log.
Parameters
----------
log_interval_ms : int
The time between two samples in milliseconds, for logging only.
Returns
-------
None
"""
if log_interval_ms < 0:
raise ValueError("Time between samples should be no less than 0.")
self.log_interval_ms = log_interval_ms
self.mmio.write(iop_const.MAILBOX_OFFSET+4, self.log_interval_ms)
def start_log_raw(self):
"""Start recording raw data in a log.
This method will first call set_log_interval_ms() before writting to
the MMIO.
Returns
-------
None
"""
self.log_running = 1
self.set_log_interval_ms(self.log_interval_ms)
self.mmio.write(iop_const.MAILBOX_OFFSET + \
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 4)
def start_log(self):
"""Start recording multiple voltage values (float) in a log.
This method will first call set_log_interval_ms() before writting to
the MMIO.
Returns
-------
None
"""
self.log_running = 1
self.set_log_interval_ms(self.log_interval_ms)
self.mmio.write(iop_const.MAILBOX_OFFSET + \
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 5)
def stop_log_raw(self):
"""Stop recording the raw values in the log.
Simply write 0xC to the MMIO to stop the log.
Returns
-------
None
"""
if self.log_running == 1:
self.mmio.write(iop_const.MAILBOX_OFFSET+ \
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 12)
self.log_running = 0
else:
raise RuntimeError("No grove ADC log running.")
def stop_log(self):
"""Stop recording the voltage values in the log.
This can be done by calling the stop_log_raw() method.
Returns
-------
None
"""
if self.log_running == 1:
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 12)
self.log_running = 0
else:
raise RuntimeError("No grove ADC log running.")
def get_log_raw(self):
"""Return list of logged raw samples.
Returns
-------
list
List of valid raw samples from the ADC sensor.
"""
# Stop logging
self.stop_log()
# Prep iterators and results list
head_ptr = self.mmio.read(iop_const.MAILBOX_OFFSET+0x8)
tail_ptr = self.mmio.read(iop_const.MAILBOX_OFFSET+0xC)
readings = list()
# Sweep circular buffer for samples
if head_ptr == tail_ptr:
return None
elif head_ptr < tail_ptr:
for i in range(head_ptr,tail_ptr,4):
readings.append(self.mmio.read(i))
else:
for i in range(head_ptr,GROVE_ADC_LOG_END,4):
readings.append(self.mmio.read(i))
for i in range(GROVE_ADC_LOG_START,tail_ptr,4):
readings.append(self.mmio.read(i))
return readings
def get_log(self):
"""Return list of logged samples.
Returns
-------
list
List of valid voltage samples (floats) from the ADC sensor.
"""
# Stop logging
self.stop_log()
# Prep iterators and results list
head_ptr = self.mmio.read(iop_const.MAILBOX_OFFSET+0x8)
tail_ptr = self.mmio.read(iop_const.MAILBOX_OFFSET+0xC)
readings = list()
# Sweep circular buffer for samples
if head_ptr == tail_ptr:
return None
elif head_ptr < tail_ptr:
for i in range(head_ptr,tail_ptr,4):
readings.append(float("{0:.4f}"\
.format(self._reg2float(self.mmio.read(i)))))
else:
for i in range(head_ptr,GROVE_ADC_LOG_END,4):
readings.append(float("{0:.4f}"\
.format(self._reg2float(self.mmio.read(i)))))
for i in range(GROVE_ADC_LOG_START,tail_ptr,4):
readings.append(float("{0:.4f}"\
.format(self._reg2float(self.mmio.read(i)))))
return readings
def reset(self):
"""Resets/initializes the ADC.
Returns
-------
None
"""
# Send command and wait for acknowledge
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 12)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 12):
pass
def _reg2float(self, reg):
"""Converts 32-bit register value to floats in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
float
A float number translated from the register value.
"""
s = struct.pack('>l', reg)
return struct.unpack('>f', s)[0]
|
|
# Authors: Robert Luke <[email protected]>
#
# License: BSD (3-clause)
import re
import numpy as np
import datetime
from ..base import BaseRaw
from ..meas_info import create_info
from ..utils import _mult_cal_one
from ...annotations import Annotations
from ...utils import logger, verbose, fill_doc, warn
from ...utils.check import _require_version
from ..constants import FIFF
from .._digitization import _make_dig_points
from ...transforms import _frame_to_str
@fill_doc
def read_raw_snirf(fname, preload=False, verbose=None):
"""Reader for a continuous wave SNIRF data.
.. note:: This reader supports the .snirf file type only,
not the .jnirs version.
Parameters
----------
fname : str
Path to the SNIRF data file.
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawSNIRF
A Raw object containing SNIRF data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawSNIRF(fname, preload, verbose)
def _open(fname):
return open(fname, 'r', encoding='latin-1')
@fill_doc
class RawSNIRF(BaseRaw):
"""Raw object from a continuous wave SNIRF file.
Parameters
----------
fname : str
Path to the SNIRF data file.
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, fname, preload=False, verbose=None):
_require_version('h5py', 'read raw SNIRF data')
from ...externals.pymatreader.utils import _import_h5py
h5py = _import_h5py()
logger.info('Loading %s' % fname)
with h5py.File(fname, 'r') as dat:
if 'data2' in dat['nirs']:
warn("File contains multiple recordings. "
"MNE does not support this feature. "
"Only the first dataset will be processed.")
if np.array(dat.get('nirs/data1/measurementList1/dataType')) != 1:
raise RuntimeError('File does not contain continuous wave '
'data. MNE only supports reading continuous'
' wave amplitude SNIRF files. Expected type'
' code 1 but received type code %d' %
(np.array(dat.get(
'nirs/data1/measurementList1/dataType'
))))
last_samps = dat.get('/nirs/data1/dataTimeSeries').shape[0] - 1
samplingrate_raw = np.array(dat.get('nirs/data1/time'))
sampling_rate = 0
if samplingrate_raw.shape == (2, 1):
# specified as onset/samplerate
warn("Onset/sample rate SNIRF not yet supported.")
else:
# specified as time points
fs_diff = np.around(np.diff(samplingrate_raw), decimals=4)
if len(np.unique(fs_diff)) == 1:
# Uniformly sampled data
sampling_rate = 1. / np.unique(fs_diff)
else:
# print(np.unique(fs_diff))
warn("Non uniform sampled data not supported.")
if sampling_rate == 0:
warn("Unable to extract sample rate from SNIRF file.")
sources = np.array(dat.get('nirs/probe/sourceLabels'))
detectors = np.array(dat.get('nirs/probe/detectorLabels'))
sources = [s.decode('UTF-8') for s in sources]
detectors = [d.decode('UTF-8') for d in detectors]
# Extract source and detector locations
detPos3D = np.array(dat.get('nirs/probe/detectorPos3D'))
srcPos3D = np.array(dat.get('nirs/probe/sourcePos3D'))
assert len(sources) == srcPos3D.shape[0]
assert len(detectors) == detPos3D.shape[0]
# Extract wavelengths
fnirs_wavelengths = np.array(dat.get('nirs/probe/wavelengths'))
fnirs_wavelengths = [int(w) for w in fnirs_wavelengths]
# Extract channels
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
channels = np.array([name for name in dat['nirs']['data1'].keys()])
channels_idx = np.array(['measurementList' in n for n in channels])
channels = channels[channels_idx]
channels = sorted(channels, key=natural_keys)
chnames = []
for chan in channels:
src_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/sourceIndex'))[0])
det_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/detectorIndex'))[0])
wve_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/wavelengthIndex'))[0])
ch_name = sources[src_idx - 1] + '_' +\
detectors[det_idx - 1] + ' ' +\
str(fnirs_wavelengths[wve_idx - 1])
chnames.append(ch_name)
# Create mne structure
info = create_info(chnames,
sampling_rate,
ch_types='fnirs_cw_amplitude')
subject_info = {}
names = np.array(dat.get('nirs/metaDataTags/SubjectID'))
subject_info['first_name'] = names[0].decode('UTF-8')
# Read non standard (but allowed) custom metadata tags
if 'lastName' in dat.get('nirs/metaDataTags/'):
ln = dat.get('/nirs/metaDataTags/lastName')[0].decode('UTF-8')
subject_info['last_name'] = ln
if 'middleName' in dat.get('nirs/metaDataTags/'):
m = dat.get('/nirs/metaDataTags/middleName')[0].decode('UTF-8')
subject_info['middle_name'] = m
if 'sex' in dat.get('nirs/metaDataTags/'):
s = dat.get('/nirs/metaDataTags/sex')[0].decode('UTF-8')
if s in {'M', 'Male', '1', 'm'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE
elif s in {'F', 'Female', '2', 'f'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE
elif s in {'0', 'u'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN
# End non standard name reading
# Update info
info.update(subject_info=subject_info)
LengthUnit = np.array(dat.get('/nirs/metaDataTags/LengthUnit'))
LengthUnit = LengthUnit[0].decode('UTF-8')
scal = 1
if "cm" in LengthUnit:
scal = 100
elif "mm" in LengthUnit:
scal = 1000
for idx, chan in enumerate(channels):
src_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/sourceIndex'))[0])
det_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/detectorIndex'))[0])
wve_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/wavelengthIndex'))[0])
info['chs'][idx]['loc'][3:6] = srcPos3D[src_idx - 1, :] / scal
info['chs'][idx]['loc'][6:9] = detPos3D[det_idx - 1, :] / scal
# Store channel as mid point
midpoint = (info['chs'][idx]['loc'][3:6] +
info['chs'][idx]['loc'][6:9]) / 2
info['chs'][idx]['loc'][0:3] = midpoint
info['chs'][idx]['loc'][9] = fnirs_wavelengths[wve_idx - 1]
if 'MNE_coordFrame' in dat.get('nirs/metaDataTags/'):
coord_frame = int(dat.get('/nirs/metaDataTags/MNE_coordFrame')
[0])
else:
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
if 'landmarkPos3D' in dat.get('nirs/probe/'):
diglocs = np.array(dat.get('/nirs/probe/landmarkPos3D'))
digname = np.array(dat.get('/nirs/probe/landmarkLabels'))
nasion, lpa, rpa, hpi = None, None, None, None
extra_ps = dict()
for idx, dign in enumerate(digname):
if dign == b'LPA':
lpa = diglocs[idx, :]
elif dign == b'NASION':
nasion = diglocs[idx, :]
elif dign == b'RPA':
rpa = diglocs[idx, :]
else:
extra_ps[f'EEG{len(extra_ps) + 1:03d}'] = diglocs[idx]
info['dig'] = _make_dig_points(nasion=nasion, lpa=lpa, rpa=rpa,
hpi=hpi, dig_ch_pos=extra_ps,
coord_frame=_frame_to_str[
coord_frame])
str_date = np.array((dat.get(
'/nirs/metaDataTags/MeasurementDate')))[0].decode('UTF-8')
str_time = np.array((dat.get(
'/nirs/metaDataTags/MeasurementTime')))[0].decode('UTF-8')
str_datetime = str_date + str_time
# Several formats have been observed so we try each in turn
for dt_code in ['%Y-%m-%d%H:%M:%SZ',
'%Y-%m-%d%H:%M:%S']:
try:
meas_date = datetime.datetime.strptime(
str_datetime, dt_code)
except ValueError:
pass
else:
break
else:
warn("Extraction of measurement date from SNIRF file failed. "
"The date is being set to January 1st, 2000, "
f"instead of {str_datetime}")
meas_date = datetime.datetime(2000, 1, 1, 0, 0, 0)
meas_date = meas_date.replace(tzinfo=datetime.timezone.utc)
info['meas_date'] = meas_date
if 'DateOfBirth' in dat.get('nirs/metaDataTags/'):
str_birth = np.array((dat.get('/nirs/metaDataTags/'
'DateOfBirth')))[0].decode()
birth_matched = re.fullmatch(r'(\d+)-(\d+)-(\d+)', str_birth)
if birth_matched is not None:
info["subject_info"]['birthday'] = (
int(birth_matched.groups()[0]),
int(birth_matched.groups()[1]),
int(birth_matched.groups()[2]))
super(RawSNIRF, self).__init__(info, preload, filenames=[fname],
last_samps=[last_samps],
verbose=verbose)
# Extract annotations
annot = Annotations([], [], [])
for key in dat['nirs']:
if 'stim' in key:
data = np.array(dat.get('/nirs/' + key + '/data'))
if data.size > 0:
annot.append(data[:, 0], 1.0, key[4:])
self.set_annotations(annot)
# Reorder channels to match expected ordering in MNE
num_chans = len(self.ch_names)
chans = []
for idx in range(num_chans // 2):
chans.append(idx)
chans.append(idx + num_chans // 2)
self.pick(picks=chans)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file."""
import h5py
with h5py.File(self._filenames[0], 'r') as dat:
one = dat['/nirs/data1/dataTimeSeries'][start:stop].T
_mult_cal_one(data, one, idx, cals, mult)
|
|
#!/usr/bin/env python3
import json
import re
import sys
from datetime import datetime
import dateutil.parser
from dateutil.tz import tzutc
from mtools.util.pattern import json2pattern
class DateTimeEncoder(json.JSONEncoder):
"""Custom datetime encoder for json output."""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class LogEvent(object):
"""
Extract information from log line and store properties/variables.
line_str: the original line string
split_tokens: a list of string tokens after splitting line_str using
whitespace as split points
datetime: a datetime object for the logevent. For logfiles created with
version 2.4+, it also contains micro-seconds
duration: the duration of a timed operation in ms
thread: the thread name (e.g. "conn1234") as string
operation: insert, update, remove, query, command, getmore, None
namespace: the namespace of the operation, or None
command: the type of command, if the operation was a "command"
pattern: the query pattern for queries, updates, counts, etc
...
Certain operations also add the number of affected/scanned documents.
If applicable, the following variables are also set, otherwise the
default is None: nscanned, ntoreturn, nreturned, ninserted, nupdated
For performance reason, all fields are evaluated lazily upon first
request.
"""
# datetime handler for json encoding
dthandler = lambda obj: obj.isoformat() if isinstance(obj,
datetime) else None
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
log_operations = ['query', 'insert', 'update', 'remove', 'getmore',
'command', 'aggregate', 'transaction']
log_levels = ['D', 'F', 'E', 'W', 'I', 'U']
log_components = ['-', 'ACCESS', 'COMMAND', 'CONTROL', 'GEO', 'INDEX',
'NETWORK', 'QUERY', 'REPL', 'SHARDING', 'STORAGE',
'JOURNAL', 'WRITE', 'TOTAL']
def __init__(self, doc_or_str):
self._debug = False
self._year_rollover = False
if isinstance(doc_or_str, bytes):
doc_or_str = doc_or_str.decode("utf-8")
if isinstance(doc_or_str, str) or (sys.version_info.major == 2 and
isinstance(doc_or_str, unicode)):
# create from string, remove line breaks at end of _line_str
self.from_string = True
self._line_str = doc_or_str.rstrip()
self._profile_doc = None
self._reset()
else:
self.from_string = False
self._profile_doc = doc_or_str
# docs don't need to be parsed lazily, they are fast
self._parse_document()
def _reset(self):
self._debug = False
self._split_tokens_calculated = False
self._split_tokens = None
self._duration_calculated = False
self._duration = None
self._datetime_calculated = False
self._datetime = None
self._datetime_nextpos = None
self._datetime_format = None
self._datetime_str = ''
self._thread_calculated = False
self._thread = None
self._operation_calculated = False
self._operation = None
self._namespace = None
self._pattern = None
self._sort_pattern = None
self._actual_query = None
self._actual_sort = None
# SERVER-36414 - parameters for slow transactions
self._lsid = None
self._txnNumber = None
self._autocommit = None
self._readConcern = None
self._timeActiveMicros = None
self._timeInactiveMicros = None
self._readTimestamp = None
self._terminationCause = None
self._locks = None
self._command_calculated = False
self._command = None
self._counters_calculated = False
self._allowDiskUse = None
self._bytesRead = None
self._bytesWritten = None
self._timeReadingMicros = None
self._timeWritingMicros = None
# TODO: refactor from the legacy names to modern
# (eg: nscanned => keysExamined). Currently _extract_counters()
# maps newer property names into legacy equivalents for
# broader log file support.
self._nscanned = None # keysExamined
self._nscannedObjects = None # docsExamined
self._ntoreturn = None
self._nupdated = None # nModified
self._nreturned = None # nReturned or nMatched (updates)
self._ninserted = None # nInserted
self._ndeleted = None # nDeleted
self._numYields = None
self._planSummary = None
self._actualPlanSummary = None
self._writeConflicts = None
self._r = None
self._w = None
self._conn = None
self._hostname = None
self._level_calculated = False
self._level = None
self._component = None
self.merge_marker_str = ''
self._client_metadata_calculated = False
self._client_metadata = None
def set_line_str(self, line_str):
"""
Set line_str.
Line_str is only writeable if LogEvent was created from a string,
not from a system.profile documents.
"""
if not self.from_string:
raise ValueError("can't set line_str for LogEvent created from "
"system.profile documents.")
if line_str != self._line_str:
self._line_str = line_str.rstrip()
self._reset()
def get_line_str(self):
"""Return line_str depending on source, logfile or system.profile."""
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str,
self._datetime_str,
self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str,
self._line_str] if s])
line_str = property(get_line_str, set_line_str)
@property
def split_tokens(self):
"""Split string into tokens (lazy)."""
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens
@property
def duration(self):
"""Calculate duration if available (lazy)."""
if not self._duration_calculated:
self._duration_calculated = True
# split_tokens = self.split_tokens
line_str = self.line_str
if (line_str
and line_str.endswith('ms')
and 'Scheduled new oplog query' not in line_str):
try:
# find duration from end
space_pos = line_str.rfind(" ")
if space_pos == -1:
return
self._duration = int(line_str[line_str.rfind(" ") +
1:-2].replace(',', ''))
except ValueError:
self._duration = None
elif "flushing" in self.line_str:
matchobj = re.search(r'flushing mmaps took (\d+)ms',
self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
# SERVER-16176 - Logging of slow checkpoints
elif "Checkpoint took" in self.line_str:
matchobj = re.search("Checkpoint took ([\d]+) seconds to complete", self.line_str)
if matchobj:
self._duration = int(matchobj.group(1)) * 1000
return self._duration
# SERVER-41349 - get hostname from the DNS log line
@property
def hostname(self):
line_str = self.line_str
groups = re.search("DNS resolution while connecting to ([\w.]+) took ([\d]+)ms", line_str)
self._hostname = groups.group(1)
return self._hostname
@property
def cursor(self):
"""Pull the cursor information if available (lazy)."""
line_str = self.line_str
# SERVER-28604 Checking reaped cursor information
groups = re.search("Cursor id ([\w.]+) timed out, idle since ([^\n]*)", line_str)
self._cursorid = groups.group(1)
self._reapedtime = groups.group(2)
return self._cursorid
@property
def datetime(self):
"""Extract datetime if available (lazy)."""
if not self._datetime_calculated:
self._datetime_calculated = True
# if no datetime after 10 tokens, break to avoid parsing
# very long lines
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:offs + 4])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith("iso8601"):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
# separate datetime str and linestr
self._line_str = (' '.join(self.split_tokens
[self._datetime_nextpos:]))
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime
@property
def datetime_format(self):
if not self._datetime_calculated:
_ = self.datetime
return self._datetime_format
@property
def datetime_nextpos(self):
if self._datetime_nextpos is None and not self._datetime_calculated:
_ = self.datetime
return self._datetime_nextpos
def set_datetime_hint(self, format, nextpos, rollover):
self._datetime_format = format
self._datetime_nextpos = nextpos
self._year_rollover = rollover
# Fast check if timestamp format changed.
# If it has, trigger datetime evaluation.
if format.startswith('ctime'):
if (len(self.split_tokens) < 4 or
self.split_tokens[self._datetime_nextpos - 4] not in
self.weekdays):
_ = self.datetime
return False
return True
else:
if len(self.split_tokens) == 0:
# empty line, no need to parse datetime
self._datetime_calculated = True
return False
try:
if not (self.split_tokens[self._datetime_nextpos - 1][0]
.isdigit()):
# not the timestamp format that was hinted
_ = self.datetime
return False
except Exception:
pass
return True
def _match_datetime_pattern(self, tokens):
"""
Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500
"""
# first check: less than 4 tokens can't be ctime
assume_iso8601_format = len(tokens) < 4
# check for ctime-pre-2.4 or ctime format
if not assume_iso8601_format:
weekday, month, day, time = tokens[:4]
if (len(tokens) < 4 or (weekday not in self.weekdays) or
(month not in self.months) or not day.isdigit()):
assume_iso8601_format = True
if assume_iso8601_format:
# sanity check, because the dateutil parser could interpret
# any numbers as a valid date
if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}',
tokens[0]):
return None
# convinced that this is a ISO-8601 format, the dateutil parser
# will do the rest
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = "iso8601-utc" \
if tokens[0].endswith('Z') else "iso8601-local"
else:
# assume current year unless self.year_rollover
# is set (from LogFile)
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[: 4]),
default=datetime(year, 1, 1))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzutc())
if self._year_rollover and dt > self._year_rollover:
dt = dt.replace(year=year - 1)
self._datetime_format = "ctime" \
if '.' in tokens[3] else "ctime-pre2.4"
return dt
@property
def thread(self):
"""Extract thread name if available (lazy)."""
if not self._thread_calculated:
self._thread_calculated = True
split_tokens = self.split_tokens
if not self.datetime_nextpos:
return None
if len(split_tokens) <= self.datetime_nextpos:
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match(r'^\[([^\]]*)\]$', connection_token)
if match:
self._thread = match.group(1)
if self._thread is not None:
if self._thread in ['initandlisten', 'mongosMain']:
if len(split_tokens) >= 5 and split_tokens[-5][0] == '#':
self._conn = 'conn' + split_tokens[-5][1:]
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread
@property
def conn(self):
r"""
Extract conn name if available (lazy).
This value is None for all lines except the log lines related to
connections, that is lines matching '\[conn[0-9]+\]' or
'\[(initandlisten|mongosMain)\] .* connection accepted from'.
"""
self.thread
return self._conn
@property
def operation(self):
"""
Extract operation if available (lazy).
Operations: query, insert, update, remove, getmore, command
"""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation
@property
def namespace(self):
"""Extract namespace if available (lazy)."""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace
def _extract_operation_and_namespace(self):
"""
Helper method to extract both operation and namespace from a logevent.
It doesn't make sense to only extract one as they appear back to back
in the token list.
"""
split_tokens = self.split_tokens
if not self._datetime_nextpos:
# force evaluation of thread to get access to datetime_offset and
# to protect from changes due to line truncation.
_ = self.thread
if not self._datetime_nextpos or (len(split_tokens) <=
self._datetime_nextpos + 2):
return
op = split_tokens[self._datetime_nextpos + 1].lower()
if op == 'warning:':
# check if this log line got truncated
if ("warning: log line attempted" in self._line_str and
"over max size" in self._line_str):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[self._datetime_nextpos + 1]
else:
# unknown warning, bail out
return
if op in self.log_operations:
self._operation = op
self._namespace = split_tokens[self._datetime_nextpos + 2]
@property
def pattern(self):
"""Extract query pattern from operations."""
if not self._pattern:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._pattern = self._find_pattern('query: ')
# Fallback check for q: variation (eg "remove" command in 3.6+)
if self._pattern is None:
self._pattern = self._find_pattern('q: ')
elif self.command == 'find':
self._pattern = self._find_pattern('filter: ')
return self._pattern
@property
def sort_pattern(self):
"""Extract query pattern from operations."""
if not self._sort_pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern
@property
def actual_query(self):
"""Extract the actual query (not pattern) from operations."""
if not self._actual_query:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._actual_query = self._find_pattern('query: ', actual=True)
elif self.command == 'find':
self._actual_query = self._find_pattern('filter: ',
actual=True)
return self._actual_query
@property
def actual_sort(self):
"""Extract the actual sort key (not pattern) from operations."""
if not self._actual_sort:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._actual_sort = self._find_pattern('orderby: ',
actual=True)
return self._actual_sort
@property
def command(self):
"""Extract query pattern from operations."""
if not self._command_calculated:
self._command_calculated = True
if self.operation == 'command':
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[command_idx + 1]
if command == '{':
# workaround for <= 2.2 log files,
# where command was not listed separately
command = self.split_tokens[command_idx + 2][:-1]
self._command = command.lower()
except ValueError:
pass
return self._command
@property
def nscanned(self):
"""Extract nscanned or keysExamined counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscanned
@property
def timeActiveMicros(self):
"""Extract timeActiveMicros if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._timeActiveMicros
@property
def timeInactiveMicros(self):
"""Extract timeInactiveMicros if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._timeInactiveMicros
@property
def nscannedObjects(self):
"""
Extract counters if available (lazy).
Looks for nscannedObjects or docsExamined.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects
@property
def ntoreturn(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn
@property
def writeConflicts(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts
@property
def nreturned(self):
"""
Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned
@property
def terminationCause(self):
# Looks for terminationCause counter in Transaction logs.
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._terminationCause
@property
def ninserted(self):
"""Extract ninserted or nInserted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ninserted
@property
def bytesRead(self):
"""Extract bytesRead counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._bytesRead
@property
def bytesWritten(self):
"""Extract bytesWritten counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._bytesWritten
@property
def timeReadingMicros(self):
"""Extract timeReadingMicros counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._timeReadingMicros
@property
def timeWritingMicros(self):
"""Extract timeWritingMicros counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._timeWritingMicros
@property
def ndeleted(self):
"""Extract ndeleted or nDeleted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ndeleted
@property
def allowDiskUse(self):
"""Extract allowDiskUse counter for aggregation if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._allowDiskUse
@property
def nupdated(self):
"""Extract nupdated or nModified counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nupdated
@property
def numYields(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._numYields
@property
def readTimestamp(self):
"""Extract readTimeStamp counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._readTimestamp
@property
def planSummary(self):
"""Extract planSummary if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._planSummary
@property
def actualPlanSummary(self):
"""Extract planSummary including JSON if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._actualPlanSummary
@property
def r(self):
"""Extract read lock (r) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._r
@property
def lsid(self):
"""Extract lsid counter if available (lazy)."""
self._lsid = self._find_pattern('lsid: ', actual=True)
return self._lsid
@property
def locks(self):
"""Extract locks counter for transactions if available (lazy)."""
self._locks = self._find_pattern('locks:', actual=True)
return self._locks
@property
def txnNumber(self):
"""Extract txnNumber counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._txnNumber
@property
def autocommit(self):
"""Extract autocommit counter for transactions if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._autocommit
@property
def readConcern(self):
"""Extract readConcern Level if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._readConcern
@property
def w(self):
"""Extract write lock (w) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._w
def _extract_counters(self):
"""Extract counters like nscanned and nreturned from the logevent."""
# extract counters (if present)
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned',
'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields',
'planSummary', 'writeConflicts', 'keyUpdates', 'bytesRead', 'bytesWritten', 'timeReadingMicros',
'timeWritingMicros', 'lsid', 'txnNumber', 'autocommit', 'allowDiskUse', 'level',
'timeActiveMicros', 'timeInactiveMicros', 'duration', 'readTimestamp', 'terminationCause']
# TODO: refactor mtools to use current counter names throughout
# Transitionary hack: mapping of current names into prior equivalents
counter_equiv = {
'datetime': 'datetime',
'docsExamined': 'nscannedObjects',
'keysExamined': 'nscanned',
'nDeleted': 'ndeleted',
'nInserted': 'ninserted',
'nMatched': 'nreturned',
'nModified': 'nupdated',
'cursorid' : 'cursorid',
'repaedtime' : 'reapedtime'
}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
# trigger operation evaluation to get access to offset
if self.operation:
for t, token in enumerate(split_tokens[self.datetime_nextpos +
2:]):
for counter in counters:
if token.startswith('%s:' % counter):
try:
# Remap counter to standard name, if applicable
counter = counter_equiv.get(counter, counter)
if (counter == 'level' and token.startswith('level')):
self._readConcern = (
split_tokens[t + 1 + self.datetime_nextpos + 2].replace(',', ''))
elif (counter == 'readTimestamp' and token.startswith('readTimestamp')):
vars(self)['_' + counter] = (token.split(':')
[-1]).replace(',', '')
elif (counter == 'terminationCause' and token.startswith('terminationCause')):
vars(self)['_' + counter] = (token.split(':')
[-1]).replace(',', '')
else:
vars(self)['_' + counter] = int((token.split(':')
[-1]).replace(',',
''))
# extract allowDiskUse counter
if (counter == 'allowDiskUse' and token.startswith('allowDiskUse')):
# Splitting space between token and value
self._allowDiskUse = split_tokens[t + 1 + self.datetime_nextpos + 2].replace(',','')
else:
vars(self)['_' + counter] = int((token.split(':')[-1]).replace(',', ''))
except ValueError:
# see if this is a pre-2.5.2 numYields with space
# in between (e.g. "numYields: 2")
# https://jira.mongodb.org/browse/SERVER-10101
if (counter == 'numYields' and
token.startswith('numYields')):
try:
self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'bytesRead' and
token.startswith('bytesRead')):
try:
self._bytesRead = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'bytesWritten' and
token.startswith('bytesWritten')):
try:
self._bytesWritten = int(
(split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'timeReadingMicros' and
token.startswith('timeReadingMicros')):
try:
self._timeReadingMicros = int(
(split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'timeWritingMicros' and
token.startswith('timeWritingMicros')):
try:
self._timeWritingMicros = int(
(split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'txnNumber' and
token.startswith('txnNumber')):
self._txnNumber = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
if (counter == 'autocommit' and
token.startswith('autocommit')):
self._autocommit = (split_tokens[t + 1 + self.datetime_nextpos + 2].replace(',', ''))
if (counter == 'lsid' and
token.startswith('lsid')):
self._lsid = (split_tokens[t + 2 + self.datetime_nextpos + 2].replace(',', ''))
if (counter == 'planSummary' and
token.startswith('planSummary')):
try:
self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2]
if self._planSummary:
if split_tokens[t + 1 + self.datetime_nextpos + 3] != '{':
self._actualPlanSummary = self._planSummary
else:
self._actualPlanSummary = '%s %s' % (
self._planSummary,
self._find_pattern('planSummary: %s' % self._planSummary, actual=True)
)
except ValueError:
pass
# token not parsable, skip
break
@property
def level(self):
"""Extract log level if available (lazy)."""
if not self._level_calculated:
self._level_calculated = True
self._extract_level()
return self._level
@property
def component(self):
"""Extract log component if available (lazy)."""
self.level
return self._component
def _extract_level(self):
"""Extract level and component if available (lazy)."""
if self._level is None:
split_tokens = self.split_tokens
if not split_tokens:
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1])
if split_tokens[1] in self.log_levels else None)
if x is not None:
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False
@property
def client_metadata(self):
"""Return client metadata."""
if not self._client_metadata_calculated:
self._client_metadata_calculated = True
line_str = self.line_str
if (line_str and line_str.find('client metadata')):
try:
metadata_pos = line_str.find("{")
if metadata_pos == -1:
return
else:
metadata = line_str[metadata_pos:]
# Make valid JSON by wrapping field names in quotes
metadata, _ = re.subn(r'([{,])\s*([^,{\s\'"]+)\s*:',
' \\1 "\\2" : ', metadata)
# Replace double-quoted platform values with single quote
platform = re.search(r'"platform"\s+:\s+"(.*)"', metadata)
if (platform):
platform = platform.group(1)
platform_esc, _ = re.subn(r'"', r"'", platform)
metadata, _ = re.subn(platform, platform_esc, metadata)
self._client_metadata = json.loads(metadata)
except ValueError:
self._client_metadata = None
return self._client_metadata
def parse_all(self):
"""
Trigger extraction of all information.
These values are usually evaluated lazily.
"""
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
txnNumber = self.txnNumber
w = self.w
r = self.r
def _find_pattern(self, trigger, actual=False):
# get start of json query pattern
start_idx = self.line_str.rfind(trigger)
if start_idx == -1:
# no query pattern found
return None
stop_idx = 0
brace_counter = 0
search_str = self.line_str[start_idx + len(trigger):]
for match in re.finditer(r'{|}', search_str):
stop_idx = match.start()
if search_str[stop_idx] == '{':
brace_counter += 1
else:
brace_counter -= 1
if brace_counter == 0:
break
search_str = search_str[:stop_idx + 1].strip()
if search_str:
if actual:
return search_str
else:
return json2pattern(search_str, debug=self._debug)
else:
return None
def _reformat_timestamp(self, format, force=False):
if format not in ['ctime', 'ctime-pre2.4', 'iso8601-utc',
'iso8601-local']:
raise ValueError('invalid datetime format %s, choose from ctime, '
'ctime-pre2.4, iso8601-utc, iso8601-local.')
if ((self.datetime_format is None or
(self.datetime_format == format and
self._datetime_str != '')) and not force):
return
elif self.datetime is None:
return
elif format.startswith('ctime'):
dt_string = (self.weekdays[self.datetime.weekday()] + ' ' +
self.datetime.strftime("%b %d %H:%M:%S"))
# remove zero-padding from day number
tokens = dt_string.split(' ')
if tokens[2].startswith('0'):
tokens[2] = tokens[2].replace('0', ' ', 1)
dt_string = ' '.join(tokens)
if format == 'ctime':
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)
elif format == 'iso8601-local':
dt_string = self.datetime.isoformat()
if self.datetime.utcoffset() is None:
dt_string += '+00:00'
ms_str = str(int(self.datetime.microsecond / 1000)).zfill(3)[:3]
# change isoformat string to have 3 digit milliseconds and no :
# in offset
dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)',
'.%s\\2\\3\\4' % ms_str, dt_string, count=1)
elif format == 'iso8601-utc':
if self.datetime.utcoffset():
dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-"
"%dT%H:"
"%M:%S")
else:
dt_string = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)[:3] + 'Z'
# set new string and format
self._datetime_str = dt_string
self._datetime_format = format
def __str__(self):
"""Default string conversion for LogEvent object is its line_str."""
return str(self.line_str)
def to_dict(self, labels=None):
"""Convert LogEvent object to a dictionary."""
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields', 'cursorid', 'reapedtime',
'txtNumber', 'lsid', 'autocommit', 'readConcern',
'timeActiveMicros', 'timeInactiveMicros']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output
def to_json(self, labels=None):
"""Convert LogEvent object to valid JSON."""
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
def _parse_document(self):
"""Parse system.profile doc, copy all values to member variables."""
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo is None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if self.operation == 'command':
self._command = doc[u'command'].keys()[0]
# query pattern for system.profile events, all three cases.
# See SERVER-13245
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'],
dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if ('orderby' in doc['query'] and
isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']
['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']
['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
self._txnNumber = doc[u'txnNumber'] if 'txnNumber' in doc else None
self._lsid = doc[u'lsid'] if 'lsid' in doc else None
self._autocommit = doc[u'autocommit'] if 'autocommit' in doc else None
self._readConcern = doc[u'level'] if 'level' in doc else None
self._timeActiveMicros = doc[u'timeActiveMicros'] if 'timeActiveMicros' in doc else None
self._timeInactiveMicros = doc[u'timeInactiveMicros'] if 'timeInactiveMicros' in doc else None
self._duration = doc[u'duration'] if 'duration' in doc else None
self._datetime = doc[u'datetime'] if 'datetime' in doc else None
if u'lockStats' in doc:
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r
elif u'locks' in doc:
locks = json.dumps(doc[u'locks'])
else:
locks = ''
# build a fake line_str
payload = ''
if 'query' in doc:
payload += ('query: %s' % str(doc[u'query'])
.replace("u'", "'").replace("'", '"'))
if 'command' in doc:
payload += ('command: %s' % str(doc[u'command'])
.replace("u'", "'").replace("'", '"'))
if 'updateobj' in doc:
payload += (' update: %s' % str(doc[u'updateobj'])
.replace("u'", "'").replace("'", '"'))
scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i' % self._numYields if 'numYield' in doc else ''
duration = '%ims' % self.duration if self.duration is not None else ''
self._line_str = (f'''[{self.thread}] {self.operation} {self.namespace} {payload} '''
f'''{scanned} {yields} locks(micros) {locks} '''
f'''{duration}''')
|
|
"""
Chemical Properties Script
==========================
Create a dictionary of chemical properties
This script provides tools to create a dictionary of the properties of
several hydrocarbons and chemicals of environmental interest in a global
dictionary for use by other programs that need to know chemical properties.
Parameters
----------
The chemical data are stored in ``./data/ChemData.csv`` and the
biodegradation data are stored in ``./data/BioData.csv``. In these files,
header rows are denoted by ``%``, the last row of pure text is taken as the
variable names, and the last row with ``()`` is taken as the units. The
columns should include a key name (e.g., `methane`) followed by numerical
values for each parameter in the file.
For the data provided by the model, the data sources and more details are
documented in the documentation (see ./docs/index.html).
Notes
-----
To use the properties database distributed by ``TAMOC``, you must import this
module and then use the `tamoc_data()` method, which reads the databases
distributed with ``TAMOC``.
See also
--------
`dbm` : Uses these dictionaries to create chemical mixture objects.
Examples
--------
>>> from tamoc import chemical_properties
>>> chem_db, chem_units, bio_db, bio_units = chemical_properties.tamoc_data()
>>> chem_db['oxygen']['M']
0.031998800000000001
>>> chem_units['M']
'(kg/mol)'
"""
# S. Socolofsky, January 2012, Texas A&M University <[email protected]>.
from __future__ import (absolute_import, division, print_function)
import numpy as np
import os
def load_data(fname):
"""
Load a chemical properties file into memory
Reads in a chemical properties file, creates a dictionary of the columns
in the file, and performs some units conversions as necessary to have the
data in SI mks units.
Parameters
----------
fname : str
file name (with relative path as necessary) where the chemical
property data is stored
Returns
-------
data : dict
dictionary of the properties for each column in the data file
units : dict
corresponding dictionary of units for each property in data
Notes
-----
This function is used by the `dbm` module to load in the default chemical
data in ./tamoc/data/chemdata.csv. This function can also be called by
the user to read in a user-specified file of chemical data present in any
storage location.
"""
# Set up counters to keep track of what has been and has not been read
readnames = -1
data = {}
# Read in and parse the data from the chemistry data file.
with open(fname) as datfile:
for line in datfile:
entries = line.strip().split(',')
# Remove blank RHS column (Excel randomly includes extra columns)
if len(entries[len(entries)-1]) == 0:
entries = entries[0:len(entries)-1]
# Identify and store the data
if line.find('%') >= 0:
# This is a header line
if line.find('(') >= 0:
# This line contains the units
header_units = line.strip().split(',')
elif (len(entries[1]) > 0) and(readnames < 0):
# This line contains the variable names
header_keys = line.strip().split(',')
readnames = 1
else:
# This is a data line
data[entries[0]] = {}
for i in range(1, len(entries)):
data[entries[0]][header_keys[i]] = np.float64(entries[i])
# Add the units to two different dictionaries
read_units = {}
for i in range(len(header_units) - 1):
read_units[header_keys[i]] = header_units[i]
units = {}
for i in range(len(header_units) - 1):
units[header_keys[i]] = header_units[i]
# Convert to SI units. If you add a new unit to the file ChemData.csv,
# then you should include a check for it here.
for chemical in data:
for variable in read_units:
if read_units[variable].find('g/mol') >= 0:
# Convert to kg/mol
data[chemical][variable] = data[chemical][variable] / 1000.
units[variable] = '(kg/mol)'
if read_units[variable].find('psia') >= 0:
# Convert to Pa
data[chemical][variable] = data[chemical][variable] * 6894.76
units[variable] = '(Pa)'
if read_units[variable].find('F') >= 0:
# Convert to K
data[chemical][variable] = (data[chemical][variable] - 32.) * \
5. / 9. + 273.15
units[variable] = '(K)'
if read_units[variable].find('mol/dm^3 atm') >= 0:
# Convert to kg/(m^3 Pa)
data[chemical][variable] = (data[chemical][variable] * \
1000. / 101325. * \
data[chemical]['M'])
units[variable] = '(kg/(m^3 Pa))'
if read_units[variable].find('mm^2/s') >= 0:
# Convert to m^2/s
data[chemical][variable] = data[chemical][variable] / 1000.**2
units[variable] = '(m^2/s)'
if read_units[variable].find('cal/mol') >= 0:
# Convert to J/mol
data[chemical][variable] = data[chemical][variable] / 0.238846
units[variable] = '(J/mol)'
if read_units[variable].find('L/mol') >= 0:
# Convert to m^3/mol
data[chemical][variable] = data[chemical][variable] / 1000.
units[variable] = '(m^3/mol)'
if read_units[variable].find('1/d') >= 0:
# Convert to 1/s
data[chemical][variable] = data[chemical][variable] / 86400.
units[variable] = '(1/s)'
if read_units[variable].find('(d)') >= 0.:
# Convert to s
data[chemical][variable] = data[chemical][variable] * 86400.
units[variable] = '(s)'
return (data, units)
def tamoc_data():
"""
Load the supplied chemical properties file from the `TAMOC` distribution
Reads in the chemical properties file provided with `TAMOC`, creates a
dictionary of the columns in the file, and performs some units
conversions as necessary to have the data in SI mks units.
Returns
-------
data : dict
dictionary of the properties for each column in the data file
units : dict
corresponding dictionary of units for each property in data
Notes
-----
This function read in the the default chemical data in
./tamoc/data/chemdata.csv.
"""
# Get the relative path to the ./tamoc/data directory
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__), 'data'))
# Create the full relative path to the default data in ChemData.csv
chem_fname = os.path.join(__location__,'ChemData.csv')
bio_fname = os.path.join(__location__,'BioData.csv')
# Load in the default data and their units
chem_data, chem_units = load_data(chem_fname)
bio_data, bio_units = load_data(bio_fname)
# Return the results
return (chem_data, chem_units, bio_data, bio_units)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
# Used by SomeRandomBenchmark class below.
_ran_somebenchmark_1 = [False]
_ran_somebenchmark_2 = [False]
_ran_somebenchmark_but_shouldnt = [False]
class SomeRandomBenchmark(test.Benchmark):
"""This Benchmark should automatically be registered in the registry."""
def _dontRunThisBenchmark(self):
_ran_somebenchmark_but_shouldnt[0] = True
def notBenchmarkMethod(self):
_ran_somebenchmark_but_shouldnt[0] = True
def benchmark1(self):
_ran_somebenchmark_1[0] = True
def benchmark2(self):
_ran_somebenchmark_2[0] = True
class TestReportingBenchmark(test.Benchmark):
"""This benchmark (maybe) reports some stuff."""
def benchmarkReport1(self):
self.report_benchmark(iters=1)
def benchmarkReport2(self):
self.report_benchmark(
iters=2,
name="custom_benchmark_name",
extras={"number_key": 3,
"other_key": "string"})
def benchmark_times_an_op(self):
with session.Session(config=benchmark.benchmark_config()) as sess:
a = constant_op.constant(0.0)
a_plus_a = a + a
return self.run_op_benchmark(
sess, a_plus_a, min_iters=1000, store_trace=True, name="op_benchmark")
class BenchmarkTest(test.TestCase):
def testGlobalBenchmarkRegistry(self):
registry = list(benchmark.GLOBAL_BENCHMARK_REGISTRY)
self.assertEqual(len(registry), 2)
self.assertTrue(SomeRandomBenchmark in registry)
self.assertTrue(TestReportingBenchmark in registry)
def testRunSomeRandomBenchmark(self):
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run other benchmarks, but this wont run the one we care about
benchmark._run_benchmarks("unrelated")
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run all the benchmarks, avoid generating any reports
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom")
# Validate that SomeRandomBenchmark ran correctly
self.assertTrue(_ran_somebenchmark_1[0])
self.assertTrue(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
_ran_somebenchmark_1[0] = False
_ran_somebenchmark_2[0] = False
_ran_somebenchmark_but_shouldnt[0] = False
# Test running a specific method of SomeRandomBenchmark
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom.*1$")
self.assertTrue(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
def testReportingBenchmark(self):
tempdir = test.get_temp_dir()
try:
gfile.MakeDirs(tempdir)
except OSError as e:
# It's OK if the directory already exists.
if " exists:" not in str(e):
raise e
prefix = os.path.join(tempdir,
"reporting_bench_%016x_" % random.getrandbits(64))
expected_output_file = "%s%s" % (prefix,
"TestReportingBenchmark.benchmarkReport1")
expected_output_file_2 = "%s%s" % (
prefix, "TestReportingBenchmark.custom_benchmark_name")
expected_output_file_3 = "%s%s" % (prefix,
"TestReportingBenchmark.op_benchmark")
try:
self.assertFalse(gfile.Exists(expected_output_file))
# Run benchmark but without env, shouldn't write anything
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should run without writing anything
self.assertFalse(gfile.Exists(expected_output_file))
# Runbenchmark with env, should write
os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should write
reporting.benchmarkReport2() # This should write
benchmark_values3 = reporting.benchmark_times_an_op() # This should write
# Check the files were written
self.assertTrue(gfile.Exists(expected_output_file))
self.assertTrue(gfile.Exists(expected_output_file_2))
self.assertTrue(gfile.Exists(expected_output_file_3))
# Check the contents are correct
expected_1 = test_log_pb2.BenchmarkEntry()
expected_1.name = "TestReportingBenchmark.benchmarkReport1"
expected_1.iters = 1
expected_2 = test_log_pb2.BenchmarkEntry()
expected_2.name = "TestReportingBenchmark.custom_benchmark_name"
expected_2.iters = 2
expected_2.extras["number_key"].double_value = 3
expected_2.extras["other_key"].string_value = "string"
expected_3 = test_log_pb2.BenchmarkEntry()
expected_3.name = "TestReportingBenchmark.op_benchmark"
expected_3.iters = 1000
def read_benchmark_entry(f):
s = gfile.GFile(f, "rb").read()
entries = test_log_pb2.BenchmarkEntries.FromString(s)
self.assertEquals(1, len(entries.entry))
return entries.entry[0]
read_benchmark_1 = read_benchmark_entry(expected_output_file)
self.assertProtoEquals(expected_1, read_benchmark_1)
read_benchmark_2 = read_benchmark_entry(expected_output_file_2)
self.assertProtoEquals(expected_2, read_benchmark_2)
read_benchmark_3 = read_benchmark_entry(expected_output_file_3)
self.assertEquals(expected_3.name, read_benchmark_3.name)
self.assertEquals(expected_3.iters, read_benchmark_3.iters)
self.assertGreater(read_benchmark_3.wall_time, 0)
# Trace is not stored in benchmark entry. Instead we get it from
# return value of `run_op_benchmark` call.
full_trace = benchmark_values3["extras"]["full_trace_chrome_format"]
json_trace = json.loads(full_trace)
self.assertTrue(isinstance(json_trace, dict))
self.assertTrue("traceEvents" in json_trace.keys())
allocator_keys = [k for k in read_benchmark_3.extras.keys()
if k.startswith("allocator_maximum_num_bytes_")]
self.assertGreater(len(allocator_keys), 0)
for k in allocator_keys:
self.assertGreater(read_benchmark_3.extras[k].double_value, 0)
finally:
gfile.DeleteRecursively(tempdir)
if __name__ == "__main__":
test.main()
|
|
import os
from py2neo.cypher.error.schema import (IndexAlreadyExists,
ConstraintAlreadyExists)
from .exception import DoesNotExist
from .properties import Property, PropertyManager
from .signals import hooks
from .util import Database, deprecated, classproperty
DATABASE_URL = os.environ.get('NEO4J_REST_URL', 'http://localhost:7474/db/data/')
db = Database(DATABASE_URL)
def install_labels(cls):
# TODO when to execute this?
if not hasattr(db, 'session'):
db.new_session()
for key, prop in cls.defined_properties(aliases=False, rels=False).items():
if prop.index:
indexes = db.session.schema.get_indexes(cls.__label__)
if key not in indexes:
try:
db.cypher_query("CREATE INDEX on :{}({}); ".format(
cls.__label__, key))
except IndexAlreadyExists:
pass
elif prop.unique_index:
unique_const = db.session.schema.get_uniqueness_constraints(
cls.__label__)
if key not in unique_const:
try:
db.cypher_query("CREATE CONSTRAINT "
"on (n:{}) ASSERT n.{} IS UNIQUE; ".format(
cls.__label__, key))
except (ConstraintAlreadyExists, IndexAlreadyExists):
pass
class NodeMeta(type):
def __new__(mcs, name, bases, dct):
dct.update({'DoesNotExist': type('DoesNotExist', (DoesNotExist,), dct)})
inst = super(NodeMeta, mcs).__new__(mcs, name, bases, dct)
if hasattr(inst, '__abstract_node__'):
delattr(inst, '__abstract_node__')
else:
for key, value in dct.items():
if key == 'deleted':
raise ValueError("Class property called 'deleted' conflicts with neomodel internals")
if issubclass(value.__class__, Property):
value.name = key
value.owner = inst
# support for 'magic' properties
if hasattr(value, 'setup') and hasattr(
value.setup, '__call__'):
value.setup()
# cache the names of all required and unique_index properties
all_required = set(name for name, p in inst.defined_properties(aliases=False, rels=False).items()
if p.required or p.unique_index)
inst.__required_properties__ = tuple(all_required)
# cache all definitions
inst.__all_properties__ = tuple(inst.defined_properties(aliases=False, rels=False).items())
inst.__all_aliases__ = tuple(inst.defined_properties(properties=False, rels=False).items())
inst.__all_relationships__ = tuple(inst.defined_properties(aliases=False, properties=False).items())
if '__label__' in dct:
inst.__label__ = dct['__label__']
else:
inst.__label__ = inst.__name__
install_labels(inst)
from .index import NodeIndexManager
inst.index = NodeIndexManager(inst, inst.__label__)
return inst
NodeBase = NodeMeta('NodeBase', (PropertyManager,), {'__abstract_node__': True})
class StructuredNode(NodeBase):
__abstract_node__ = True
__required_properties__ = ()
""" Names of all required properties of this StructuredNode """
__all_properties__ = ()
""" Tuple of (name, property) of all regular properties """
__all_aliases__ = ()
""" Tuple of (name, property) of all aliases """
__all_relationships__ = ()
""" Tuple of (name, property) of all relationships """
@classproperty
def nodes(cls):
from .match import NodeSet
return NodeSet(cls)
def __init__(self, *args, **kwargs):
if 'deleted' in kwargs:
raise ValueError("deleted property is reserved for neomodel")
for key, val in self.__all_relationships__:
self.__dict__[key] = val.build_manager(self, key)
super(StructuredNode, self).__init__(*args, **kwargs)
def __eq__(self, other):
if not isinstance(other, (StructuredNode,)):
return False
if hasattr(self, '_id') and hasattr(other, '_id'):
return self._id == other._id
return False
def __ne__(self, other):
return not self.__eq__(other)
def labels(self):
self._pre_action_check('labels')
return self.cypher("MATCH n WHERE id(n)={self} "
"RETURN labels(n)")[0][0][0]
def cypher(self, query, params=None):
self._pre_action_check('cypher')
params = params or {}
params.update({'self': self._id})
return db.cypher_query(query, params)
@classmethod
def inherited_labels(cls):
return [scls.__label__ for scls in cls.mro()
if hasattr(scls, '__label__') and not hasattr(
scls, '__abstract_node__')]
@classmethod
@deprecated("Category nodes are now deprecated, the functionality is "
"emulated using labels")
def category(cls):
return FakeCategory(cls)
@hooks
def save(self):
# create or update instance node
if hasattr(self, '_id'):
# update
params = self.deflate(self.__properties__, self)
query = "MATCH n WHERE id(n)={self} \n"
query += "\n".join(["SET n.{} = {{{}}}".format(key, key) + "\n"
for key in params.keys()])
for label in self.inherited_labels():
query += "SET n:`{}`\n".format(label)
self.cypher(query, params)
elif hasattr(self, 'deleted') and self.deleted:
raise ValueError("{}.save() attempted on deleted node".format(
self.__class__.__name__))
else: # create
self._id = self.create(self.__properties__)[0]._id
return self
def _pre_action_check(self, action):
if hasattr(self, 'deleted') and self.deleted:
raise ValueError("{}.{}() attempted on deleted node".format(
self.__class__.__name__, action))
if not hasattr(self, '_id'):
raise ValueError("{}.{}() attempted on unsaved node".format(
self.__class__.__name__, action))
@hooks
def delete(self):
self._pre_action_check('delete')
self.cypher("MATCH self WHERE id(self)={self} "
"OPTIONAL MATCH (self)-[r]-()"
" DELETE r, self")
del self.__dict__['_id']
self.deleted = True
return True
def refresh(self):
"""Reload this object from its node id in the database"""
self._pre_action_check('refresh')
if hasattr(self, '_id'):
node = self.inflate(self.cypher("MATCH n WHERE id(n)={self}"
" RETURN n")[0][0][0])
for key, val in node.__properties__.items():
setattr(self, key, val)
else:
raise ValueError("Can't refresh unsaved node")
@classmethod
def _build_create_query(cls, create_params, lazy=False):
"""
Get a tuple of a CYPHER query and a params dict for the specified CREATE query.
:param create_params: A list of the target nodes parameters.
:type create_params: list of dict
:rtype: tuple
"""
# create mapped query
query = "CREATE (n:{} {{create_params}})".format(':'.join(cls.inherited_labels()))
# close query
if lazy:
query += " RETURN id(n)"
else:
query += " RETURN n"
return query, dict(create_params=create_params)
@classmethod
def _build_merge_query(cls, merge_params, update_existing=False, lazy=False, relationship=None):
"""
Get a tuple of a CYPHER query and a params dict for the specified MERGE query.
:param merge_params: The target node match parameters, each node must have a "create" key and optional "update".
:type merge_params: list of dict
:param update_existing: True to update properties of existing nodes, default False to keep existing values.
:type update_existing: bool
:rtype: tuple
"""
query_params = dict(merge_params=merge_params)
n_merge = "(n:{} {{{}}})".format(':'.join(cls.inherited_labels()),
", ".join("{0}: params.create.{0}".format(p) for p in cls.__required_properties__))
if relationship is None:
# create "simple" unwind query
query = "UNWIND {{merge_params}} as params\n MERGE {}\n ".format(n_merge)
else:
# validate relationship
if not isinstance(relationship.source, StructuredNode):
raise ValueError("relationship source [%s] is not a StructuredNode" % repr(relationship.source))
relation_type = relationship.definition.get('relation_type')
if not relation_type:
raise ValueError('No relation_type is specified on provided relationship')
query_params["source_id"] = relationship.source._id
query = "MATCH (source:{}) WHERE ID(source) = {{source_id}}\n ".format(relationship.source.__label__)
query += "WITH source\n UNWIND {merge_params} as params \n "
query += "MERGE (source)-[:{}]->{} \n ".format(relation_type, n_merge)
query += "ON CREATE SET n = params.create\n "
# if update_existing, write properties on match as well
if update_existing is True:
query += "ON MATCH SET n += params.update\n"
# close query
if lazy:
query += "RETURN id(n)"
else:
query += "RETURN n"
return query, query_params
@classmethod
def _stream_nodes(cls, results, lazy=False):
"""
yeilds results
:rtype: generator
"""
post_create = not lazy and hasattr(cls, 'post_create')
# generate iterate post_create() and inflate
for n in results:
if post_create:
n[0].post_create()
yield cls.inflate(n[0])
@classmethod
def create(cls, *props, **kwargs):
"""
Call to CREATE with parameters map. A new instance will be created and saved.
When using streaming=True, operation is not in current transaction if one exists.
:param props: List of dict arguments to get or create the entities with.
:type props: tuple
:param streaming: Optional, Specify streaming=True to get a results generator instead of a list.
:param lazy: False by default, specify True to get nodes with id only without the parameters.
:rtype: list
"""
lazy = kwargs.get('lazy', False)
# build create query
create_params = [cls.deflate(p, skip_empty=True) for p in props]
query, params = cls._build_create_query(create_params, lazy=lazy)
if kwargs.get('streaming', False) is True:
return cls._stream_nodes(db.cypher_stream_query(query, params), lazy=lazy)
else:
# fetch and build instance for each result
results = db.cypher_query(query, params)
if not lazy and hasattr(cls, 'post_create'):
for r in results[0]:
r[0].post_create()
return [cls.inflate(r[0]) for r in results[0]]
@classmethod
def get_or_create(cls, *props, **kwargs):
"""
Call to MERGE with parameters map. A new instance will be created and saved if does not already exists,
this is an atomic operation.
Parameters must contain all required properties, any non required properties will be set on created nodes only.
When using streaming=True, operation is not in current transaction if one exists.
:param props: List of dict arguments to get or create the entities with.
:type props: tuple
:param relationship: Optional, relationship to get/create on when new entity is created.
:param streaming: Optional, Specify streaming=True to get a results generator instead of a list.
:param lazy: False by default, specify True to get nodes with id only without the parameters.
:rtype: list
"""
lazy = kwargs.get('lazy', False)
relationship = kwargs.get('relationship')
# build merge query
get_or_create_params = [{"create": cls.deflate(p, skip_empty=True)} for p in props]
query, params = cls._build_merge_query(get_or_create_params, relationship=relationship, lazy=lazy)
if kwargs.get('streaming', False) is True:
return cls._stream_nodes(db.cypher_stream_query(query, params), lazy=lazy)
else:
# fetch and build instance for each result
results = db.cypher_query(query, params)
# TODO: check each node if created call post_create()
return [cls.inflate(r[0]) for r in results[0]]
@classmethod
def create_or_update(cls, *props, **kwargs):
"""
Call to MERGE with parameters map. A new instance will be created and saved if does not already exists,
this is an atomic operation. If an instance already exists all optional properties specified will be updated.
When using streaming=True, operation is not in current transaction if one exists.
:param props: List of dict arguments to get or create the entities with.
:type props: tuple
:param relationship: Optional, relationship to get/create on when new entity is created.
:param streaming: Optional, Specify streaming=True to get a results generator instead of a list.
:param lazy: False by default, specify True to get nodes with id only without the parameters.
:rtype: list
"""
lazy = kwargs.get('lazy', False)
relationship = kwargs.get('relationship')
# build merge query, make sure to update only explicitly specified properties
create_or_update_params = []
for specified, deflated in [(p, cls.deflate(p, skip_empty=True)) for p in props]:
create_or_update_params.append({"create": deflated,
"update": dict((k, v) for k, v in deflated.items() if k in specified)})
query, params = cls._build_merge_query(create_or_update_params, update_existing=True, relationship=relationship,
lazy=lazy)
if kwargs.get('streaming', False) is True:
return cls._stream_nodes(db.cypher_stream_query(query, params), lazy=lazy)
else:
# fetch and build instance for each result
results = db.cypher_query(query, params)
# TODO: check each node if created call post_create()
return [cls.inflate(r[0]) for r in results[0]]
@classmethod
def inflate(cls, node):
# support lazy loading
if isinstance(node, int):
snode = cls()
snode._id = node
else:
props = {}
for key, prop in cls.__all_properties__:
# map property name from database to object property
db_property = prop.db_property or key
if db_property in node.properties:
props[key] = prop.inflate(node.properties[db_property], node)
elif prop.has_default:
props[key] = prop.default_value()
else:
props[key] = None
snode = cls(**props)
snode._id = node._id
return snode
class FakeCategory(object):
"""
Category nodes are no longer required with the introduction of labels.
This class behaves like the old category nodes used in earlier version of neomodel
but uses labels under the hood calling the traversal api.
"""
def __init__(self, cls):
self.instance = FakeInstanceRel(cls)
def cypher(self, *args, **kwargs):
raise NotImplemented("cypher method on category nodes no longer supported")
class FakeInstanceRel(object):
"""
Fake rel manager for our fake category node
"""
def __init__(self, cls):
from .match import NodeSet
self._node_set = NodeSet(cls)
def __len__(self):
return self._node_set.query_cls(self._node_set)._count()
def __bool__(self):
return len(self) > 0
def __nonzero__(self):
return len(self) > 0
def count(self):
return self.__len__()
def all(self):
return self._node_set.all()
def search(self, **kwargs):
ns = self._node_set
for field, value in kwargs.items():
ns.filter(**{field: value})
return self._node_set.all()
def get(self, **kwargs):
result = self.search(**kwargs)
if len(result) == 1:
return result[0]
if len(result) > 1:
raise Exception("Multiple items returned, use search?")
if not result:
raise DoesNotExist("No items exist for the specified arguments")
|
|
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
# Standard libraries.
from copy import deepcopy
from functools import partial
# Dependent libraries.
import pygame
# Local modules.
from config.gameConfig import CELL_SIZE, AllColors, FPS_MAIN, GAME_SCREEN_SIZE
from shift.utils.basicUtils import hitTestByDistance
from shift.utils.timer import ShiftTimer
from shift.gameObjects.mapObjects import Character, Door, Trap, Arrow, Key, Block, Mosaic, Lamp, GameText
from shift.gameObjects.mapGroups import ShiftGroup
import GVar
def getRealColor(logicColor):
if logicColor is True:
return AllColors['white']
else:
return AllColors['black']
class GameMap:
"""the class of the game map.
It contains a matrix of map and some Sprites.
self.matrix : the matrix of True(white) or False(black)
self.character : a Sprite of game character
self.door : a Sprite of game destination(a door)
self.rotateArrows : a list of Sprites of rotate Arrows
self.traps : a list of Sprites of traps
self.keys : a list of Sprites of keys and blocks
self.blocks :
self.lamps : a list of Sprites of lamps and mosaics
self.mosaics :
self.texts : a list of Sprites of texts
"""
allCommands = {
'noOp': 0,
'jump': 1,
'left': 2,
'leftStop': 3,
'right': 4,
'rightStop': 5,
'shift': 6,
}
def __init__(self, levelData, surface):
self.surface = surface
self.rowNum = levelData.rowNum
self.matrix = deepcopy(levelData.matrix)
self.character = Character(self, location=levelData.records['S'][0])
self.door = Door(
self, location=levelData.records['D'][0][:2],
angle=levelData.records['D'][0][2]
)
self.arrows = ShiftGroup(
Arrow(self, location=r[:2], angle=r[2])
for r in levelData.records['A']
)
self.traps = ShiftGroup(*[
Trap(self, location=r[:2], angle=r[2])
for r in levelData.records['T']
])
self.blocks = ShiftGroup(*[
Block(self, Id=r[4], start=r[:2], length=r[2], angle=r[3])
for r in levelData.records['B']
])
# keys must be initialized after blocks
self.keys = ShiftGroup(
Key(self, location=r[:2], blockIds=r[2:], angle=0)
for r in levelData.records['K']
)
self.mosaics = ShiftGroup(
Mosaic(self, Id=r[2], location=r[:2])
for r in levelData.records['M']
)
# lamps must be initialized after mosaics
self.lamps = ShiftGroup(
Lamp(self, mosaicIds=r[2:], location=r[:2])
for r in levelData.records['L']
)
self.texts = ShiftGroup(
GameText(self, text=r[3], location=r[:2], angle=r[2])
for r in levelData.records['Text']
)
self.staticObjects = ShiftGroup(
self.texts, self.door, self.arrows, self.keys, self.mosaics, self.lamps, self.traps, self.blocks,
)
# Start the timer of this game.
self.timer = ShiftTimer()
def getRotateCoordinate(self, coordinate, angle):
if angle == 0:
return coordinate
elif angle == 90:
return coordinate[1], self.rowNum - 1 - coordinate[0]
elif angle == 180:
return self.rowNum - 1 - coordinate[0], self.rowNum - 1 - coordinate[1]
elif angle == 270:
return self.rowNum - 1 - coordinate[1], coordinate[0]
return coordinate
def getCellColor(self, location):
return self.matrix[location[1]][location[0]]
def drawBackground(self, surface):
for h in range(len(self.matrix)):
for w in range(len(self.matrix[h])):
surface.fill(getRealColor(self.matrix[h][w]),
pygame.Rect(w * CELL_SIZE, h * CELL_SIZE, CELL_SIZE, CELL_SIZE)
)
self.staticObjects.draw(surface)
def draw(self, surface):
self.drawBackground(surface)
self.character.draw(surface)
def win(self):
return self.character.isQuiet() and self.door.angle == 0 and \
pygame.sprite.collide_rect_ratio(0.4)(self.character, self.door)
def lose(self):
return pygame.sprite.spritecollideany(self.character, self.traps,
collided=partial(hitTestByDistance, ratio=0.5)) is not None
def update(self, command):
"""Update the GameMap,
such as change the location of character, handle hit objects, etc.
Note: This method does NOT call pygame.display.update.
"""
if command == GameMap.allCommands['left']:
self.character.toLeft()
elif command == GameMap.allCommands['right']:
self.character.toRight()
elif command == GameMap.allCommands['leftStop']:
self.character.toLeftStop()
elif command == GameMap.allCommands['rightStop']:
self.character.toRightStop()
elif command == GameMap.allCommands['jump']:
self.character.toJump()
elif command == GameMap.allCommands['shift']:
if self.character.canShift():
self.shiftMap()
# Then special events below.
# update the character here.
self.character.update()
# hitArrow here.
hitArrow = pygame.sprite.spritecollideany(self.character, self.arrows,
collided=partial(hitTestByDistance, ratio=0.4))
if hitArrow is not None:
angle = -hitArrow.angle % 360
if angle != 0:
self.rotateCartoon(angle)
self.rotateMap(angle)
self.character.verticalSpeed = 0 # after rotating, do not jump.
# self.character.toStop()
# hitKey here.
hitKey = pygame.sprite.spritecollideany(self.character, self.keys,
collided=partial(hitTestByDistance, ratio=0.55))
if hitKey is not None:
hitKey.visible = False
for block in hitKey.controlBlocks:
block.rotateFromKey()
hitKey.kill()
# hitLamp here.
hitLamp = pygame.sprite.spritecollideany(self.character, self.lamps,
collided=partial(hitTestByDistance, ratio=0.55))
if hitLamp is not None:
hitLamp.visible = False
for mosaic in hitLamp.controlMosaics:
mosaic.disappearCartoon()
mosaic.kill()
hitLamp.kill()
if self.win():
return 1
elif self.lose():
self.character.deathCartoon()
return -1
return 0
def rotateCartoon(self, angle, origSurface=None):
from config.gameConfig import MAP_ROTATE_SPEED
if angle > 180:
angle -= 360
AnglePerStep = -MAP_ROTATE_SPEED
else:
AnglePerStep = MAP_ROTATE_SPEED
if origSurface is None:
origSurface = pygame.Surface(GAME_SCREEN_SIZE)
self.draw(origSurface)
for currAngle in range(0, angle, AnglePerStep):
GVar.GlobalTimer.tick(FPS_MAIN)
self.surface.fill(AllColors['white'])
rotateSurface = pygame.transform.rotate(origSurface, currAngle).convert_alpha()
rotateRect = rotateSurface.get_rect()
rotateRect.center = self.surface.get_rect().center
self.surface.blit(rotateSurface, rotateRect)
pygame.display.update()
def covered(self, location):
"""test if the input logic location is covered by any block or mosaic.
"""
for block in self.blocks:
if block.cover(location):
return True
for mosaic in self.mosaics:
if mosaic.cover(location):
return True
return False
def shiftMap(self):
# Update character image and location.
self.character.bgColor = not self.character.bgColor
self.character.image = self.character.getImage(self.character.bgColor)
# flip the image when rotating
self.character.image = pygame.transform.flip(self.character.image, False, True)
self.character.rect.top += CELL_SIZE
# the cartoon of the flipping of character should be here.
# todo
self.rotateCartoon(180)
self.rotateMap(180)
self.character.toStop() # after shifting, do not move.
# reset the image after rotating
self.character.image = pygame.transform.flip(self.character.image, False, True)
def rotateMap(self, angle):
"""rotate the logic structure of the map.
"""
newMatrix = [[None for _ in range(self.rowNum)] for _ in range(self.rowNum)]
for y in range(self.rowNum):
for x in range(self.rowNum):
newCoor = self.getRotateCoordinate((x, y), angle)
newMatrix[newCoor[1]][newCoor[0]] = self.matrix[y][x]
self.matrix = newMatrix
self.character.rotate(angle)
for obj in self.staticObjects:
obj.rotate(angle)
|
|
# Copyright (c) 2015-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
import collections
import json
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
import six
from six.moves.urllib import parse as urlparse
from swift import gettext_ as _
from swift.common.exceptions import EncryptionException, UnknownSecretIdError
from swift.common.swob import HTTPInternalServerError
from swift.common.utils import get_logger
from swift.common.wsgi import WSGIContext
from cgi import parse_header
CRYPTO_KEY_CALLBACK = 'swift.callback.fetch_crypto_keys'
class Crypto(object):
"""
Used by middleware: Calls cryptography library
"""
cipher = 'AES_CTR_256'
# AES will accept several key sizes - we are using 256 bits i.e. 32 bytes
key_length = 32
iv_length = algorithms.AES.block_size // 8
def __init__(self, conf=None):
self.logger = get_logger(conf, log_route="crypto")
# memoize backend to avoid repeated iteration over entry points
self.backend = default_backend()
def create_encryption_ctxt(self, key, iv):
"""
Creates a crypto context for encrypting
:param key: 256-bit key
:param iv: 128-bit iv or nonce used for encryption
:raises ValueError: on invalid key or iv
:returns: an instance of an encryptor
"""
self.check_key(key)
engine = Cipher(algorithms.AES(key), modes.CTR(iv),
backend=self.backend)
return engine.encryptor()
def create_decryption_ctxt(self, key, iv, offset):
"""
Creates a crypto context for decrypting
:param key: 256-bit key
:param iv: 128-bit iv or nonce used for decryption
:param offset: offset into the message; used for range reads
:returns: an instance of a decryptor
"""
self.check_key(key)
if offset < 0:
raise ValueError('Offset must not be negative')
if offset:
# Adjust IV so that it is correct for decryption at offset.
# The CTR mode offset is incremented for every AES block and taken
# modulo 2^128.
offset_blocks, offset_in_block = divmod(offset, self.iv_length)
ivl = int(binascii.hexlify(iv), 16) + offset_blocks
ivl %= 1 << algorithms.AES.block_size
iv = bytes(bytearray.fromhex(format(
ivl, '0%dx' % (2 * self.iv_length))))
else:
offset_in_block = 0
engine = Cipher(algorithms.AES(key), modes.CTR(iv),
backend=self.backend)
dec = engine.decryptor()
# Adjust decryption boundary within current AES block
dec.update(b'*' * offset_in_block)
return dec
def create_iv(self):
return os.urandom(self.iv_length)
def create_crypto_meta(self):
# create a set of parameters
return {'iv': self.create_iv(), 'cipher': self.cipher}
def check_crypto_meta(self, meta):
"""
Check that crypto meta dict has valid items.
:param meta: a dict
:raises EncryptionException: if an error is found in the crypto meta
"""
try:
if meta['cipher'] != self.cipher:
raise EncryptionException('Bad crypto meta: Cipher must be %s'
% self.cipher)
if len(meta['iv']) != self.iv_length:
raise EncryptionException(
'Bad crypto meta: IV must be length %s bytes'
% self.iv_length)
except KeyError as err:
raise EncryptionException(
'Bad crypto meta: Missing %s' % err)
def create_random_key(self):
# helper method to create random key of correct length
return os.urandom(self.key_length)
def wrap_key(self, wrapping_key, key_to_wrap):
# we don't use an RFC 3394 key wrap algorithm such as cryptography's
# aes_wrap_key because it's slower and we have iv material readily
# available so don't need a deterministic algorithm
iv = self.create_iv()
encryptor = Cipher(algorithms.AES(wrapping_key), modes.CTR(iv),
backend=self.backend).encryptor()
return {'key': encryptor.update(key_to_wrap), 'iv': iv}
def unwrap_key(self, wrapping_key, context):
# unwrap a key from dict of form returned by wrap_key
# check the key length early - unwrapping won't change the length
self.check_key(context['key'])
decryptor = Cipher(algorithms.AES(wrapping_key),
modes.CTR(context['iv']),
backend=self.backend).decryptor()
return decryptor.update(context['key'])
def check_key(self, key):
if len(key) != self.key_length:
raise ValueError("Key must be length %s bytes" % self.key_length)
class CryptoWSGIContext(WSGIContext):
"""
Base class for contexts used by crypto middlewares.
"""
def __init__(self, crypto_app, server_type, logger):
super(CryptoWSGIContext, self).__init__(crypto_app.app)
self.crypto = crypto_app.crypto
self.logger = logger
self.server_type = server_type
def get_keys(self, env, required=None, key_id=None):
# Get the key(s) from the keymaster
required = required if required is not None else [self.server_type]
try:
fetch_crypto_keys = env[CRYPTO_KEY_CALLBACK]
except KeyError:
self.logger.exception(_('ERROR get_keys() missing callback'))
raise HTTPInternalServerError(
"Unable to retrieve encryption keys.")
err = None
try:
keys = fetch_crypto_keys(key_id=key_id)
except UnknownSecretIdError as err:
self.logger.error('get_keys(): unknown key id: %s', err)
raise
except Exception as err: # noqa
self.logger.exception('get_keys(): from callback: %s', err)
raise HTTPInternalServerError(
"Unable to retrieve encryption keys.")
for name in required:
try:
key = keys[name]
self.crypto.check_key(key)
continue
except KeyError:
self.logger.exception(_("Missing key for %r") % name)
except TypeError:
self.logger.exception(_("Did not get a keys dict"))
except ValueError as e:
# don't include the key in any messages!
self.logger.exception(_("Bad key for %(name)r: %(err)s") %
{'name': name, 'err': e})
raise HTTPInternalServerError(
"Unable to retrieve encryption keys.")
return keys
def get_multiple_keys(self, env):
# get a list of keys from the keymaster containing one dict of keys for
# each of the keymaster root secret ids
keys = [self.get_keys(env)]
active_key_id = keys[0]['id']
for other_key_id in keys[0].get('all_ids', []):
if other_key_id == active_key_id:
continue
keys.append(self.get_keys(env, key_id=other_key_id))
return keys
def dump_crypto_meta(crypto_meta):
"""
Serialize crypto meta to a form suitable for including in a header value.
The crypto-meta is serialized as a json object. The iv and key values are
random bytes and as a result need to be base64 encoded before sending over
the wire. Base64 encoding returns a bytes object in py3, to future proof
the code, decode this data to produce a string, which is what the
json.dumps function expects.
:param crypto_meta: a dict containing crypto meta items
:returns: a string serialization of a crypto meta dict
"""
def b64_encode_meta(crypto_meta):
return {
name: (base64.b64encode(value).decode() if name in ('iv', 'key')
else b64_encode_meta(value) if isinstance(value, dict)
else value)
for name, value in crypto_meta.items()}
# use sort_keys=True to make serialized form predictable for testing
return urlparse.quote_plus(
json.dumps(b64_encode_meta(crypto_meta), sort_keys=True))
def load_crypto_meta(value, b64decode=True):
"""
Build the crypto_meta from the json object.
Note that json.loads always produces unicode strings; to ensure the
resultant crypto_meta matches the original object:
* cast all keys to str (effectively a no-op on py3),
* base64 decode 'key' and 'iv' values to bytes, and
* encode remaining string values as UTF-8 on py2 (while leaving them
as native unicode strings on py3).
:param value: a string serialization of a crypto meta dict
:param b64decode: decode the 'key' and 'iv' values to bytes, default True
:returns: a dict containing crypto meta items
:raises EncryptionException: if an error occurs while parsing the
crypto meta
"""
def b64_decode_meta(crypto_meta):
return {
str(name): (
base64.b64decode(val) if name in ('iv', 'key') and b64decode
else b64_decode_meta(val) if isinstance(val, dict)
else val.encode('utf8') if six.PY2 else val)
for name, val in crypto_meta.items()}
try:
if not isinstance(value, six.string_types):
raise ValueError('crypto meta not a string')
val = json.loads(urlparse.unquote_plus(value))
if not isinstance(val, collections.Mapping):
raise ValueError('crypto meta not a Mapping')
return b64_decode_meta(val)
except (KeyError, ValueError, TypeError) as err:
msg = 'Bad crypto meta %r: %s' % (value, err)
raise EncryptionException(msg)
def append_crypto_meta(value, crypto_meta):
"""
Serialize and append crypto metadata to an encrypted value.
:param value: value to which serialized crypto meta will be appended.
:param crypto_meta: a dict of crypto meta
:return: a string of the form <value>; swift_meta=<serialized crypto meta>
"""
if not isinstance(value, str):
raise ValueError
return '%s; swift_meta=%s' % (value, dump_crypto_meta(crypto_meta))
def extract_crypto_meta(value):
"""
Extract and deserialize any crypto meta from the end of a value.
:param value: string that may have crypto meta at end
:return: a tuple of the form:
(<value without crypto meta>, <deserialized crypto meta> or None)
"""
swift_meta = None
value, meta = parse_header(value)
if 'swift_meta' in meta:
swift_meta = load_crypto_meta(meta['swift_meta'])
return value, swift_meta
|
|
from pandac.PandaModules import *
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import SuitBattleGlobals
from toontown.toonbase import TTLocalizer
import HolidayDecorator
import HalloweenHolidayDecorator
import CrashedLeaderBoardDecorator
from direct.interval.IntervalGlobal import *
import calendar
from copy import deepcopy
from toontown.suit import SuitDNA
decorationHolidays = [ToontownGlobals.WINTER_DECORATIONS,
ToontownGlobals.WACKY_WINTER_DECORATIONS,
ToontownGlobals.HALLOWEEN_PROPS,
ToontownGlobals.SPOOKY_PROPS,
ToontownGlobals.HALLOWEEN_COSTUMES,
ToontownGlobals.SPOOKY_COSTUMES,
ToontownGlobals.CRASHED_LEADERBOARD]
promotionalSpeedChatHolidays = []
class NewsManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('NewsManager')
neverDisable = 1
YearlyHolidayType = 1
OncelyHolidayType = 2
RelativelyHolidayType = 3
OncelyMultipleStartHolidayType = 4
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.population = 0
self.invading = 0
forcedHolidayDecorations = base.config.GetString('force-holiday-decorations', '')
self.decorationHolidayIds = []
if forcedHolidayDecorations != '':
forcedHolidayDecorations = forcedHolidayDecorations.split(',')
for HID in forcedHolidayDecorations:
try:
self.decorationHolidayIds.append(decorationHolidays[int(HID)])
except:
print 'holidayId value error: "%s"... skipping' %HID
self.holidayDecorator = None
self.holidayIdList = []
base.cr.newsManager = self
base.localAvatar.inventory.setInvasionCreditMultiplier(1)
self.weeklyCalendarHolidays = []
return
def delete(self):
self.cr.newsManager = None
if self.holidayDecorator:
self.holidayDecorator.exit()
DistributedObject.DistributedObject.delete(self)
return
def setPopulation(self, population):
self.population = population
messenger.send('newPopulation', [population])
def getPopulation(self):
return self.population
def sendSystemMessage(self, message, style):
base.localAvatar.setSystemMessage(style, message)
def setInvasionStatus(self, msgType, suitType, remaining, flags):
if suitType in SuitDNA.suitHeadTypes:
suitName = SuitBattleGlobals.SuitAttributes[suitType]['name']
suitNamePlural = SuitBattleGlobals.SuitAttributes[suitType]['pluralname']
elif suitType in SuitDNA.suitDepts:
suitName = SuitDNA.getDeptFullname(suitType)
suitNamePlural = SuitDNA.getDeptFullnameP(suitType)
messages = []
if msgType == ToontownGlobals.SuitInvasionBegin:
messages.append(TTLocalizer.SuitInvasionBegin1)
messages.append(TTLocalizer.SuitInvasionBegin2 % suitNamePlural)
self.invading = 1
elif msgType == ToontownGlobals.SuitInvasionEnd:
messages.append(TTLocalizer.SuitInvasionEnd1 % suitName)
messages.append(TTLocalizer.SuitInvasionEnd2)
self.invading = 0
elif msgType == ToontownGlobals.SuitInvasionUpdate:
messages.append(TTLocalizer.SuitInvasionUpdate1)
messages.append(TTLocalizer.SuitInvasionUpdate2)
self.invading = 1
elif msgType == ToontownGlobals.SuitInvasionBulletin:
messages.append(TTLocalizer.SuitInvasionBulletin1)
messages.append(TTLocalizer.SuitInvasionBulletin2 % suitNamePlural)
self.invading = 1
elif msgType == ToontownGlobals.SkelecogInvasionBegin:
messages.append(TTLocalizer.SkelecogInvasionBegin1)
messages.append(TTLocalizer.SkelecogInvasionBegin2)
messages.append(TTLocalizer.SkelecogInvasionBegin3)
self.invading = 1
elif msgType == ToontownGlobals.SkelecogInvasionEnd:
messages.append(TTLocalizer.SkelecogInvasionEnd1)
messages.append(TTLocalizer.SkelecogInvasionEnd2)
self.invading = 0
elif msgType == ToontownGlobals.SkelecogInvasionBulletin:
messages.append(TTLocalizer.SkelecogInvasionBulletin1)
messages.append(TTLocalizer.SkelecogInvasionBulletin2)
messages.append(TTLocalizer.SkelecogInvasionBulletin3)
self.invading = 1
elif msgType == ToontownGlobals.WaiterInvasionBegin:
messages.append(TTLocalizer.WaiterInvasionBegin1)
messages.append(TTLocalizer.WaiterInvasionBegin2)
self.invading = 1
elif msgType == ToontownGlobals.WaiterInvasionEnd:
messages.append(TTLocalizer.WaiterInvasionEnd1)
messages.append(TTLocalizer.WaiterInvasionEnd2)
self.invading = 0
elif msgType == ToontownGlobals.WaiterInvasionBulletin:
messages.append(TTLocalizer.WaiterInvasionBulletin1)
messages.append(TTLocalizer.WaiterInvasionBulletin2)
messages.append(TTLocalizer.WaiterInvasionBulletin3)
self.invading = 1
elif msgType == ToontownGlobals.V2InvasionBegin:
messages.append(TTLocalizer.V2InvasionBegin1)
messages.append(TTLocalizer.V2InvasionBegin2)
messages.append(TTLocalizer.V2InvasionBegin3)
self.invading = 1
elif msgType == ToontownGlobals.V2InvasionEnd:
messages.append(TTLocalizer.V2InvasionEnd1)
messages.append(TTLocalizer.V2InvasionEnd2)
self.invading = 0
elif msgType == ToontownGlobals.V2InvasionBulletin:
messages.append(TTLocalizer.V2InvasionBulletin1)
messages.append(TTLocalizer.V2InvasionBulletin2)
messages.append(TTLocalizer.V2InvasionBulletin3)
self.invading = 1
elif msgType == ToontownGlobals.VirtualInvasionBegin:
messages.append(TTLocalizer.VirtualInvasionBegin1)
messages.append(TTLocalizer.VirtualInvasionBegin2)
messages.append(TTLocalizer.VirtualInvasionBegin3)
self.invading = 1
elif msgType == ToontownGlobals.VirtualInvasionEnd:
messages.append(TTLocalizer.VirtualInvasionEnd1)
messages.append(TTLocalizer.VirtualInvasionEnd2)
self.invading = 0
elif msgType == ToontownGlobals.VirtualInvasionBulletin:
messages.append(TTLocalizer.VirtualInvasionBulletin1)
messages.append(TTLocalizer.VirtualInvasionBulletin2)
messages.append(TTLocalizer.VirtualInvasionBulletin3)
self.invading = 1
elif msgType == ToontownGlobals.RentalInvasionBegin:
messages.append(TTLocalizer.RentalInvasionBegin1)
messages.append(TTLocalizer.RentalInvasionBegin2)
messages.append(TTLocalizer.RentalInvasionBegin3)
self.invading = 1
elif msgType == ToontownGlobals.RentalInvasionEnd:
messages.append(TTLocalizer.RentalInvasionEnd1)
messages.append(TTLocalizer.RentalInvasionEnd2)
self.invading = 0
elif msgType == ToontownGlobals.RentalInvasionBulletin:
messages.append(TTLocalizer.RentalInvasionBulletin1)
messages.append(TTLocalizer.RentalInvasionBulletin2)
messages.append(TTLocalizer.RentalInvasionBulletin3)
self.invading = 1
else:
self.notify.warning('setInvasionStatus: invalid msgType: %s' % msgType)
return
multiplier = 1
if self.invading:
multiplier = ToontownBattleGlobals.getInvasionMultiplier()
base.localAvatar.inventory.setInvasionCreditMultiplier(multiplier)
track = Sequence(name='newsManagerWait', autoPause=1)
for i, message in enumerate(messages):
if i == 0:
track.append(Wait(1))
else:
track.append(Wait(5))
track.append(Func(base.localAvatar.setSystemMessage, 0, message))
track.start()
def getInvading(self):
return self.invading
def startHoliday(self, holidayId):
if holidayId not in self.holidayIdList:
self.notify.info('setHolidayId: Starting Holiday %s' % holidayId)
self.holidayIdList.append(holidayId)
if holidayId in decorationHolidays:
self.decorationHolidayIds.append(holidayId)
if holidayId == ToontownGlobals.HALLOWEEN_PROPS:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addHalloweenMenu()
self.setHalloweenPropsHolidayStart()
elif holidayId == ToontownGlobals.SPOOKY_PROPS:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addHalloweenMenu()
self.setSpookyPropsHolidayStart()
elif holidayId == ToontownGlobals.WINTER_DECORATIONS:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addWinterMenu()
self.setWinterDecorationsStart()
elif holidayId == ToontownGlobals.WACKY_WINTER_DECORATIONS:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addWinterMenu()
self.setWackyWinterDecorationsStart()
if hasattr(base.cr.playGame, 'dnaStore') and hasattr(base.cr.playGame, 'hood') and hasattr(base.cr.playGame.hood, 'loader'):
if holidayId == ToontownGlobals.HALLOWEEN_COSTUMES or holidayId == ToontownGlobals.SPOOKY_COSTUMES:
self.holidayDecorator = HalloweenHolidayDecorator.HalloweenHolidayDecorator()
elif holidayId == ToontownGlobals.CRASHED_LEADERBOARD:
self.holidayDecorator = CrashedLeaderBoardDecorator.CrashedLeaderBoardDecorator()
else:
self.holidayDecorator = HolidayDecorator.HolidayDecorator()
self.holidayDecorator.decorate()
messenger.send('decorator-holiday-%d-starting' % holidayId)
elif holidayId in promotionalSpeedChatHolidays:
if hasattr(base, 'TTSCPromotionalMenu'):
base.TTSCPromotionalMenu.startHoliday(holidayId)
elif holidayId == ToontownGlobals.MORE_XP_HOLIDAY:
self.setMoreXpHolidayStart()
elif holidayId == ToontownGlobals.JELLYBEAN_DAY:
pass
elif holidayId == ToontownGlobals.CIRCUIT_RACING_EVENT:
self.setGrandPrixWeekendStart()
elif holidayId == ToontownGlobals.HYDRANT_ZERO_HOLIDAY:
self.setHydrantZeroHolidayStart()
elif holidayId == ToontownGlobals.APRIL_FOOLS_COSTUMES:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addAprilToonsMenu()
elif holidayId == ToontownGlobals.WINTER_CAROLING:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addCarolMenu()
self.setWinterCarolingStart()
elif holidayId == ToontownGlobals.WACKY_WINTER_CAROLING:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addCarolMenu()
elif holidayId == ToontownGlobals.VALENTINES_DAY:
messenger.send('ValentinesDayStart')
base.localAvatar.setSystemMessage(0, TTLocalizer.ValentinesDayStart)
elif holidayId == ToontownGlobals.SILLY_CHATTER_ONE:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addSillyPhaseOneMenu()
elif holidayId == ToontownGlobals.SILLY_CHATTER_TWO:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addSillyPhaseTwoMenu()
elif holidayId == ToontownGlobals.SILLY_CHATTER_THREE:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addSillyPhaseThreeMenu()
elif holidayId == ToontownGlobals.SILLY_CHATTER_FOUR:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addSillyPhaseFourMenu()
elif holidayId == ToontownGlobals.SILLY_CHATTER_FIVE:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addSillyPhaseFiveMenu()
elif holidayId == ToontownGlobals.VICTORY_PARTY_HOLIDAY:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addVictoryPartiesMenu()
elif holidayId == ToontownGlobals.SELLBOT_NERF_HOLIDAY:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setSellbotNerfHolidayStart()
base.localAvatar.chatMgr.chatInputSpeedChat.addSellbotNerfMenu()
elif holidayId == ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY or holidayId == ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY_MONTH:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addJellybeanJamMenu(TTSCJellybeanJamMenu.JellybeanJamPhases.TROLLEY)
elif holidayId == ToontownGlobals.JELLYBEAN_FISHING_HOLIDAY or holidayId == ToontownGlobals.JELLYBEAN_FISHING_HOLIDAY_MONTH:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addJellybeanJamMenu(TTSCJellybeanJamMenu.JellybeanJamPhases.FISHING)
elif holidayId == ToontownGlobals.JELLYBEAN_PARTIES_HOLIDAY:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setJellybeanPartiesHolidayStart()
elif holidayId == ToontownGlobals.JELLYBEAN_PARTIES_HOLIDAY_MONTH:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setJellybeanMonthHolidayStart()
elif holidayId == ToontownGlobals.BANK_UPGRADE_HOLIDAY:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setBankUpgradeHolidayStart()
elif holidayId == ToontownGlobals.BLACK_CAT_DAY:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setBlackCatHolidayStart()
elif holidayId == ToontownGlobals.SPOOKY_BLACK_CAT:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setSpookyBlackCatHolidayStart()
elif holidayId == ToontownGlobals.TOP_TOONS_MARATHON:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setTopToonsMarathonStart()
elif holidayId == ToontownGlobals.SELLBOT_INVASION:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addSellbotInvasionMenu()
elif holidayId == ToontownGlobals.SELLBOT_FIELD_OFFICE:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.addSellbotFieldOfficeMenu()
elif holidayId == ToontownGlobals.IDES_OF_MARCH:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setIdesOfMarchStart()
base.localAvatar.chatMgr.chatInputSpeedChat.addIdesOfMarchMenu()
elif holidayId == ToontownGlobals.EXPANDED_CLOSETS:
self.setExpandedClosetsStart()
elif holidayId == ToontownGlobals.KARTING_TICKETS_HOLIDAY:
self.setKartingTicketsHolidayStart()
def endHoliday(self, holidayId):
if holidayId in self.holidayIdList:
self.notify.info('setHolidayId: Ending Holiday %s' % holidayId)
self.holidayIdList.remove(holidayId)
if holidayId in self.decorationHolidayIds:
self.decorationHolidayIds.remove(holidayId)
if holidayId == ToontownGlobals.HALLOWEEN_PROPS:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeHalloweenMenu()
self.setHalloweenPropsHolidayEnd()
elif holidayId == ToontownGlobals.SPOOKY_PROPS:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeHalloweenMenu()
self.setSpookyPropsHolidayEnd()
elif holidayId == ToontownGlobals.WINTER_DECORATIONS:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeWinterMenu()
self.setWinterDecorationsEnd()
elif holidayId == ToontownGlobals.WACKY_WINTER_DECORATIONS:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeWinterMenu()
if hasattr(base.cr.playGame, 'dnaStore') and hasattr(base.cr.playGame, 'hood') and hasattr(base.cr.playGame.hood, 'loader'):
if holidayId == ToontownGlobals.HALLOWEEN_COSTUMES or holidayId == ToontownGlobals.SPOOKY_COSTUMES:
self.holidayDecorator = HalloweenHolidayDecorator.HalloweenHolidayDecorator()
elif holidayId == ToontownGlobals.CRASHED_LEADERBOARD:
self.holidayDecorator = CrashedLeaderBoardDecorator.CrashedLeaderBoardDecorator()
else:
self.holidayDecorator = HolidayDecorator.HolidayDecorator()
self.holidayDecorator.undecorate()
messenger.send('decorator-holiday-%d-ending' % holidayId)
elif holidayId in promotionalSpeedChatHolidays:
if hasattr(base, 'TTSCPromotionalMenu'):
base.TTSCPromotionalMenu.endHoliday(holidayId)
elif holidayId == ToontownGlobals.MORE_XP_HOLIDAY:
self.setMoreXpHolidayEnd()
elif holidayId == ToontownGlobals.JELLYBEAN_DAY:
pass
elif holidayId == ToontownGlobals.CIRCUIT_RACING_EVENT:
self.setGrandPrixWeekendEnd()
elif holidayId == ToontownGlobals.APRIL_FOOLS_COSTUMES:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeAprilToonsMenu()
elif holidayId == ToontownGlobals.VALENTINES_DAY:
messenger.send('ValentinesDayStop')
base.localAvatar.setSystemMessage(0, TTLocalizer.ValentinesDayEnd)
elif holidayId == ToontownGlobals.SILLY_CHATTER_ONE:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeSillyPhaseOneMenu()
elif holidayId == ToontownGlobals.SILLY_CHATTER_TWO:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeSillyPhaseTwoMenu()
elif holidayId == ToontownGlobals.SILLY_CHATTER_THREE:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeSillyPhaseThreeMenu()
elif holidayId == ToontownGlobals.SILLY_CHATTER_FOUR:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeSillyPhaseFourMenu()
elif holidayId == ToontownGlobals.SILLY_CHATTER_FIVE:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeSillyPhaseFiveMenu()
elif holidayId == ToontownGlobals.VICTORY_PARTY_HOLIDAY:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeVictoryPartiesMenu()
elif holidayId == ToontownGlobals.WINTER_CAROLING:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeCarolMenu()
elif holidayId == ToontownGlobals.WACKY_WINTER_CAROLING:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeCarolMenu()
elif holidayId == ToontownGlobals.SELLBOT_NERF_HOLIDAY:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setSellbotNerfHolidayEnd()
base.localAvatar.chatMgr.chatInputSpeedChat.removeSellbotNerfMenu()
elif holidayId == ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY or holidayId == ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY_MONTH:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeJellybeanJamMenu()
elif holidayId == ToontownGlobals.JELLYBEAN_FISHING_HOLIDAY or holidayId == ToontownGlobals.JELLYBEAN_FISHING_HOLIDAY_MONTH:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeJellybeanJamMenu()
elif holidayId == ToontownGlobals.JELLYBEAN_PARTIES_HOLIDAY or holidayId == ToontownGlobals.JELLYBEAN_PARTIES_HOLIDAY_MONTH:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setJellybeanPartiesHolidayEnd()
base.localAvatar.chatMgr.chatInputSpeedChat.removeJellybeanJamMenu()
elif holidayId == ToontownGlobals.BLACK_CAT_DAY:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setBlackCatHolidayEnd()
elif holidayId == ToontownGlobals.SPOOKY_BLACK_CAT:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setSpookyBlackCatHolidayEnd()
elif holidayId == ToontownGlobals.TOP_TOONS_MARATHON:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
self.setTopToonsMarathonEnd()
elif holidayId == ToontownGlobals.SELLBOT_INVASION:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeSellbotInvasionMenu()
elif holidayId == ToontownGlobals.SELLBOT_FIELD_OFFICE:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeSellbotFieldOfficeMenu()
elif holidayId == ToontownGlobals.IDES_OF_MARCH:
if hasattr(base, 'localAvatar') and base.localAvatar and hasattr(base.localAvatar, 'chatMgr') and base.localAvatar.chatMgr:
base.localAvatar.chatMgr.chatInputSpeedChat.removeIdesOfMarchMenu()
def setHolidayIdList(self, holidayIdList):
def isEnding(id):
return id not in holidayIdList
def isStarting(id):
return id not in self.holidayIdList
toEnd = filter(isEnding, self.holidayIdList)
for endingHolidayId in toEnd:
self.endHoliday(endingHolidayId)
toStart = filter(isStarting, holidayIdList)
for startingHolidayId in toStart:
self.startHoliday(startingHolidayId)
messenger.send('setHolidayIdList', [holidayIdList])
def getDecorationHolidayId(self):
return self.decorationHolidayIds
def getHolidayIdList(self):
return self.holidayIdList
def setBingoWin(self, zoneId):
base.localAvatar.setSystemMessage(0, 'Bingo congrats!')
def setBingoStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.FishBingoStart)
def setBingoOngoing(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.FishBingoOngoing)
def setBingoEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.FishBingoEnd)
def setCircuitRaceStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.CircuitRaceStart)
def setCircuitRaceOngoing(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.CircuitRaceOngoing)
def setCircuitRaceEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.CircuitRaceEnd)
def setTrolleyHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.TrolleyHolidayStart)
def setTrolleyHolidayOngoing(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.TrolleyHolidayOngoing)
def setTrolleyHolidayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.TrolleyHolidayEnd)
def setTrolleyWeekendStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.TrolleyWeekendStart)
def setTrolleyWeekendEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.TrolleyWeekendEnd)
def setRoamingTrialerWeekendStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.RoamingTrialerWeekendStart)
base.roamingTrialers = True
def setRoamingTrialerWeekendOngoing(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.RoamingTrialerWeekendOngoing)
base.roamingTrialers = True
def setRoamingTrialerWeekendEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.RoamingTrialerWeekendEnd)
base.roamingTrialers = False
def setMoreXpHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.MoreXpHolidayStart)
def setMoreXpHolidayOngoing(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.MoreXpHolidayOngoing)
def setMoreXpHolidayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.MoreXpHolidayEnd)
def setJellybeanDayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanDayHolidayStart)
def setJellybeanDayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanDayHolidayEnd)
def setGrandPrixWeekendStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.GrandPrixWeekendHolidayStart)
def setGrandPrixWeekendEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.GrandPrixWeekendHolidayEnd)
def setHydrantZeroHolidayStart(self):
messenger.send('HydrantZeroIsRunning', [True])
def setSellbotNerfHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.SellbotNerfHolidayStart)
def setSellbotNerfHolidayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.SellbotNerfHolidayEnd)
def setJellybeanTrolleyHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanTrolleyHolidayStart)
def setJellybeanTrolleyHolidayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanTrolleyHolidayEnd)
def setJellybeanFishingHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanFishingHolidayStart)
def setJellybeanFishingHolidayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanFishingHolidayEnd)
def setJellybeanPartiesHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanPartiesHolidayStart)
def setJellybeanMonthHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanMonthHolidayStart)
def setJellybeanPartiesHolidayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.JellybeanPartiesHolidayEnd)
def setBankUpgradeHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.BankUpgradeHolidayStart)
def setHalloweenPropsHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.HalloweenPropsHolidayStart)
def setHalloweenPropsHolidayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.HalloweenPropsHolidayEnd)
def setSpookyPropsHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.SpookyPropsHolidayStart)
def setSpookyPropsHolidayEnd(self):
pass
def setBlackCatHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.BlackCatHolidayStart)
def setBlackCatHolidayEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.BlackCatHolidayEnd)
def setSpookyBlackCatHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.SpookyBlackCatHolidayStart)
for currToon in base.cr.toons.values():
currToon.setDNA(currToon.style.clone())
def setSpookyBlackCatHolidayEnd(self):
for currToon in base.cr.toons.values():
currToon.setDNA(currToon.style.clone())
def setTopToonsMarathonStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.TopToonsMarathonStart)
def setTopToonsMarathonEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.TopToonsMarathonEnd)
def setWinterDecorationsStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.WinterDecorationsStart)
def setWinterDecorationsEnd(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.WinterDecorationsEnd)
def setWackyWinterDecorationsStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.WackyWinterDecorationsStart)
def setWinterCarolingStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.WinterCarolingStart)
def setExpandedClosetsStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.ExpandedClosetsStart)
def setKartingTicketsHolidayStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.KartingTicketsHolidayStart)
def setIdesOfMarchStart(self):
base.localAvatar.setSystemMessage(0, TTLocalizer.IdesOfMarchStart)
def holidayNotify(self):
for id in self.holidayIdList:
if id == 19:
self.setBingoOngoing()
elif id == 20:
self.setCircuitRaceOngoing()
elif id == 21:
self.setTrolleyHolidayOngoing()
elif id == 22:
self.setRoamingTrialerWeekendOngoing()
def setWeeklyCalendarHolidays(self, weeklyCalendarHolidays):
self.weeklyCalendarHolidays = weeklyCalendarHolidays
def getHolidaysForWeekday(self, day):
result = []
for item in self.weeklyCalendarHolidays:
if item[1] == day:
result.append(item[0])
return result
def setYearlyCalendarHolidays(self, yearlyCalendarHolidays):
self.yearlyCalendarHolidays = yearlyCalendarHolidays
def getYearlyHolidaysForDate(self, theDate):
result = []
for item in self.yearlyCalendarHolidays:
if item[1][0] == theDate.month and item[1][1] == theDate.day:
newItem = [self.YearlyHolidayType] + list(item)
result.append(tuple(newItem))
continue
if item[2][0] == theDate.month and item[2][1] == theDate.day:
newItem = [self.YearlyHolidayType] + list(item)
result.append(tuple(newItem))
return result
def setMultipleStartHolidays(self, multipleStartHolidays):
self.multipleStartHolidays = multipleStartHolidays
def getMultipleStartHolidaysForDate(self, theDate):
result = []
for theHoliday in self.multipleStartHolidays:
times = theHoliday[1:]
tempTimes = times[0]
for startAndStopTimes in tempTimes:
startTime = startAndStopTimes[0]
endTime = startAndStopTimes[1]
if startTime[0] == theDate.year and startTime[1] == theDate.month and startTime[2] == theDate.day:
fakeOncelyHoliday = [theHoliday[0], startTime, endTime]
newItem = [self.OncelyMultipleStartHolidayType] + fakeOncelyHoliday
result.append(tuple(newItem))
continue
if endTime[0] == theDate.year and endTime[1] == theDate.month and endTime[2] == theDate.day:
fakeOncelyHoliday = [theHoliday[0], startTime, endTime]
newItem = [self.OncelyMultipleStartHolidayType] + fakeOncelyHoliday
result.append(tuple(newItem))
return result
def setOncelyCalendarHolidays(self, oncelyCalendarHolidays):
self.oncelyCalendarHolidays = oncelyCalendarHolidays
def getOncelyHolidaysForDate(self, theDate):
result = []
for item in self.oncelyCalendarHolidays:
if item[1][0] == theDate.year and item[1][1] == theDate.month and item[1][2] == theDate.day:
newItem = [self.OncelyHolidayType] + list(item)
result.append(tuple(newItem))
continue
if item[2][0] == theDate.year and item[2][1] == theDate.month and item[2][2] == theDate.day:
newItem = [self.OncelyHolidayType] + list(item)
result.append(tuple(newItem))
return result
def setRelativelyCalendarHolidays(self, relativelyCalendarHolidays):
self.relativelyCalendarHolidays = relativelyCalendarHolidays
def getRelativelyHolidaysForDate(self, theDate):
result = []
self.weekDaysInMonth = []
self.numDaysCorMatrix = [(28, 0), (29, 1), (30, 2), (31, 3)]
for i in xrange(7):
self.weekDaysInMonth.append((i, 4))
for holidayItem in self.relativelyCalendarHolidays:
item = deepcopy(holidayItem)
newItem = []
newItem.append(item[0])
i = 1
while i < len(item):
sRepNum = item[i][1]
sWeekday = item[i][2]
eWeekday = item[i+1][2]
while 1:
eRepNum = item[i+1][1]
self.initRepMatrix(theDate.year, item[i][0])
while self.weekDaysInMonth[sWeekday][1] < sRepNum:
sRepNum -= 1
sDay = self.dayForWeekday(theDate.year, item[i][0], sWeekday, sRepNum)
self.initRepMatrix(theDate.year, item[i+1][0])
while self.weekDaysInMonth[eWeekday][1] < eRepNum:
eRepNum -= 1
nDay = self.dayForWeekday(theDate.year, item[i+1][0], eWeekday, eRepNum)
if ((nDay > sDay and
item[i+1][0] == item[i][0] and
(item[i+1][1] - item[i][1]) <= (nDay - sDay + abs(eWeekday - sWeekday))/7) or
item[i+1][0] != item[i][0]):
break
if self.weekDaysInMonth[eWeekday][1] > eRepNum:
eRepNum += 1
else:
item[i+1][0] += 1
item[i+1][1] = 1
newItem.append([item[i][0], sDay, item[i][3], item[i][4], item[i][5]])
newItem.append([item[i+1][0], nDay, item[i+1][3], item[i+1][4], item[i+1][5]])
i += 2
if item[1][0] == theDate.month and newItem[1][1] == theDate.day:
nItem = [self.RelativelyHolidayType] + list(newItem)
result.append(tuple(nItem))
continue
if item[2][0] == theDate.month and newItem[2][1] == theDate.day:
nItem = [self.RelativelyHolidayType] + list(newItem)
result.append(tuple(nItem))
return result
def dayForWeekday(self, year, month, weekday, repNum):
monthDays = calendar.monthcalendar(year, month)
if monthDays[0][weekday] == 0:
repNum += 1
return monthDays[repNum - 1][weekday]
def initRepMatrix(self, year, month):
for i in xrange(7):
self.weekDaysInMonth[i] = (i, 4)
startingWeekDay, numDays = calendar.monthrange(year, month)
if startingWeekDay > 6:
import pdb
pdb.set_trace()
for i in xrange(4):
if numDays == self.numDaysCorMatrix[i][0]:
break
for j in xrange(self.numDaysCorMatrix[i][1]):
self.weekDaysInMonth[startingWeekDay] = (self.weekDaysInMonth[startingWeekDay][0], self.weekDaysInMonth[startingWeekDay][1] + 1)
startingWeekDay = (startingWeekDay + 1) % 7
def isHolidayRunning(self, holidayId):
result = holidayId in self.holidayIdList
return result
|
|
#!/usr/bin/python
# Copyright MetaCommunications, Inc. 2003-2007
# Copyright Redshift Software, Inc. 2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import glob
import optparse
import os
import os.path
import platform
import sys
import time
#~ Place holder for xsl_reports/util module
utils = None
repo_root = {
'anon' : 'http://svn.boost.org/svn/boost/',
'user' : 'https://svn.boost.org/svn/boost/'
}
repo_path = {
'trunk' : 'trunk',
'release' : 'branches/release',
'build' : 'trunk/tools/build/v2',
'jam' : 'trunk/tools/build/engine',
'regression' : 'trunk/tools/regression',
'boost-build.jam'
: 'trunk/boost-build.jam'
}
class runner:
def __init__(self,root):
commands = map(
lambda m: m[8:].replace('_','-'),
filter(
lambda m: m.startswith('command_'),
runner.__dict__.keys())
)
commands.sort()
commands = "commands: %s" % ', '.join(commands)
opt = optparse.OptionParser(
usage="%prog [options] [commands]",
description=commands)
#~ Base Options:
opt.add_option( '--runner',
help="runner ID (e.g. 'Metacomm')" )
opt.add_option( '--comment',
help="an HTML comment file to be inserted in the reports" )
opt.add_option( '--tag',
help="the tag for the results" )
opt.add_option( '--toolsets',
help="comma-separated list of toolsets to test with" )
opt.add_option( '--libraries',
help="comma separated list of libraries to test")
opt.add_option( '--incremental',
help="do incremental run (do not remove previous binaries)",
action='store_true' )
opt.add_option( '--timeout',
help="specifies the timeout, in minutes, for a single test run/compilation",
type='int' )
opt.add_option( '--bjam-options',
help="options to pass to the regression test" )
opt.add_option( '--bjam-toolset',
help="bootstrap toolset for 'bjam' executable" )
opt.add_option( '--pjl-toolset',
help="bootstrap toolset for 'process_jam_log' executable" )
opt.add_option( '--platform' )
#~ Source Options:
opt.add_option( '--user',
help="Boost SVN user ID" )
opt.add_option( '--local',
help="the name of the boost tarball" )
opt.add_option( '--force-update',
help="do an SVN update (if applicable) instead of a clean checkout, even when performing a full run",
action='store_true' )
opt.add_option( '--have-source',
help="do neither a tarball download nor an SVN update; used primarily for testing script changes",
action='store_true' )
#~ Connection Options:
opt.add_option( '--ftp',
help="FTP URL to upload results to." )
opt.add_option( '--proxy',
help="HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128')" )
opt.add_option( '--ftp-proxy',
help="FTP proxy server (e.g. 'ftpproxy')" )
opt.add_option( '--dart-server',
help="the dart server to send results to" )
#~ Debug Options:
opt.add_option( '--debug-level',
help="debugging level; controls the amount of debugging output printed",
type='int' )
opt.add_option( '--send-bjam-log',
help="send full bjam log of the regression run",
action='store_true' )
opt.add_option( '--mail',
help="email address to send run notification to" )
opt.add_option( '--smtp-login',
help="STMP server address/login information, in the following form: <user>:<password>@<host>[:<port>]" )
opt.add_option( '--skip-tests',
help="do not run bjam; used for testing script changes",
action='store_true' )
#~ Defaults
self.runner = None
self.comment='comment.html'
self.tag='trunk'
self.toolsets=None
self.libraries=None
self.incremental=False
self.timeout=5
self.bjam_options=''
self.bjam_toolset=''
self.pjl_toolset=''
self.platform=self.platform_name()
self.user='anonymous'
self.local=None
self.force_update=False
self.have_source=False
self.ftp=None
self.proxy=None
self.ftp_proxy=None
self.dart_server=None
self.debug_level=0
self.send_bjam_log=False
self.mail=None
self.smtp_login=None
self.skip_tests=False
( _opt_, self.actions ) = opt.parse_args(None,self)
if not self.actions or self.actions == []:
self.actions = [ 'regression' ]
#~ Initialize option dependent values.
self.regression_root = root
self.boost_root = os.path.join( self.regression_root, 'boost' )
self.regression_results = os.path.join( self.regression_root, 'results' )
if self.pjl_toolset != 'python':
self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
else:
self.regression_log = os.path.join( self.regression_results, 'bjam.xml' )
self.tools_bb_root = os.path.join( self.regression_root,'tools_bb' )
self.tools_bjam_root = os.path.join( self.regression_root,'tools_bjam' )
self.tools_regression_root = os.path.join( self.regression_root,'tools_regression' )
self.xsl_reports_dir = os.path.join( self.tools_regression_root, 'xsl_reports' )
self.timestamp_path = os.path.join( self.regression_root, 'timestamp' )
if sys.platform == 'win32':
self.patch_boost = 'patch_boost.bat'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
elif sys.platform == 'cygwin':
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
else:
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam' }
self.process_jam_log = { 'name' : 'process_jam_log' }
self.bjam = {
'name' : self.bjam['name'],
'build_cmd' : self.bjam_build_cmd,
'path' : os.path.join(self.regression_root,self.bjam['name']),
'source_dir' : self.tools_bjam_root,
'build_dir' : self.tools_bjam_root,
'build_args' : ''
}
self.process_jam_log = {
'name' : self.process_jam_log['name'],
'build_cmd' : self.bjam_cmd,
'path' : os.path.join(self.regression_root,self.process_jam_log['name']),
'source_dir' : os.path.join(self.tools_regression_root,'build'),
'build_dir' : os.path.join(self.tools_regression_root,'build'),
'build_args' : 'process_jam_log -d2'
}
if self.debug_level > 0:
self.log('Regression root = %s'%self.regression_root)
self.log('Boost root = %s'%self.boost_root)
self.log('Regression results = %s'%self.regression_results)
self.log('Regression log = %s'%self.regression_log)
self.log('BB root = %s'%self.tools_bb_root)
self.log('Bjam root = %s'%self.tools_bjam_root)
self.log('Tools root = %s'%self.tools_regression_root)
self.log('XSL reports dir = %s'%self.xsl_reports_dir)
self.log('Timestamp = %s'%self.timestamp_path)
self.log('Patch Boost script = %s'%self.patch_boost)
if self.libraries is not None:
self.libraries = self.libraries.split(",")
# Boost.Build depends on any having run
if "build" in self.libraries and "any" not in self.libraries:
self.libraries += ["any"]
self.bjam_options += ' "--limit-tests=' + \
"|".join(lib for lib in self.libraries if lib != "build") + '"'
self.main()
#~ The various commands that make up the testing sequence...
def command_cleanup(self,*args):
if not args or args == None or args == []: args = [ 'source', 'bin' ]
if 'source' in args:
self.log( 'Cleaning up "%s" directory ...' % self.boost_root )
self.rmtree( self.boost_root )
if 'bin' in args:
boost_bin_dir = os.path.join( self.boost_root, 'bin' )
self.log( 'Cleaning up "%s" directory ...' % boost_bin_dir )
self.rmtree( boost_bin_dir )
boost_binv2_dir = os.path.join( self.boost_root, 'bin.v2' )
self.log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
self.rmtree( boost_binv2_dir )
self.log( 'Cleaning up "%s" directory ...' % self.regression_results )
self.rmtree( self.regression_results )
def command_get_tools(self):
#~ Get Boost.Build v2...
self.log( 'Getting Boost.Build v2...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_bb_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['build']),
os.path.basename(self.tools_bb_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_bb_root)+".tar.bz2",
self.tarball_url(repo_path['build']) ) )
self.unpack_tarball(
self.tools_bb_root+".tar.bz2",
os.path.basename(self.tools_bb_root) )
#~ Get Boost.Jam...
self.log( 'Getting Boost.Jam...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_bjam_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['jam']),
os.path.basename(self.tools_bjam_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_bjam_root)+".tar.bz2",
self.tarball_url(repo_path['jam']) ) )
self.unpack_tarball(
self.tools_bjam_root+".tar.bz2",
os.path.basename(self.tools_bjam_root) )
#~ Get the regression tools and utilities...
self.log( 'Getting regression tools an utilities...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_regression_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['regression']),
os.path.basename(self.tools_regression_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_regression_root)+".tar.bz2",
self.tarball_url(repo_path['regression']) ) )
self.unpack_tarball(
self.tools_regression_root+".tar.bz2",
os.path.basename(self.tools_regression_root) )
#~ We get a boost-build.jam to make the tool build work even if there's
#~ and existing boost-build.jam above the testing root.
self.log( 'Getting boost-build.jam...' )
self.http_get(
self.svn_repository_url(repo_path['boost-build.jam']),
os.path.join( self.regression_root, 'boost-build.jam' ) )
def command_get_source(self):
self.refresh_timestamp()
self.log( 'Getting sources (%s)...' % self.timestamp() )
if self.user and self.user != '':
self.retry( self.svn_checkout )
else:
self.retry( self.get_tarball )
pass
def command_update_source(self):
if self.user and self.user != '' \
or os.path.exists( os.path.join( self.boost_root, '.svn' ) ):
open( self.timestamp_path, 'w' ).close()
self.log( 'Updating sources from SVN (%s)...' % self.timestamp() )
self.retry( self.svn_update )
else:
self.command_get_source( )
pass
def command_patch(self):
self.import_utils()
patch_boost_path = os.path.join( self.regression_root, self.patch_boost )
if os.path.exists( patch_boost_path ):
self.log( 'Found patch file "%s". Executing it.' % patch_boost_path )
os.chdir( self.regression_root )
utils.system( [ patch_boost_path ] )
pass
def command_setup(self):
self.command_patch()
self.build_if_needed(self.bjam,self.bjam_toolset)
if self.pjl_toolset != 'python':
self.build_if_needed(self.process_jam_log,self.pjl_toolset)
def command_test(self, *args):
if not args or args == None or args == []: args = [ "test", "process" ]
self.import_utils()
self.log( 'Making "%s" directory...' % self.regression_results )
utils.makedirs( self.regression_results )
results_libs = os.path.join( self.regression_results, 'libs' )
results_status = os.path.join( self.regression_results, 'status' )
if "clean" in args:
self.command_test_clean()
if "test" in args:
self.command_test_run()
self.command_test_boost_build()
if "process" in args:
if self.pjl_toolset != 'python':
self.command_test_process()
def command_test_clean(self):
results_libs = os.path.join( self.regression_results, 'libs' )
results_status = os.path.join( self.regression_results, 'status' )
self.rmtree( results_libs )
self.rmtree( results_status )
def command_test_run(self):
self.import_utils()
if self.pjl_toolset != 'python':
test_cmd = '%s -d2 preserve-test-targets=off --dump-tests %s "--build-dir=%s" >>"%s" 2>&1' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
else:
test_cmd = '%s -d1 preserve-test-targets=off --dump-tests --verbose-test %s "--build-dir=%s" "--out-xml=%s"' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
self.log( 'Starting tests (%s)...' % test_cmd )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.system( [ test_cmd ] )
os.chdir( cd )
def command_test_boost_build(self):
if self.libraries is not None and "build" not in self.libraries:
return
self.import_utils()
self.log( 'Running Boost.Build tests' )
# Find the true names of the toolsets used for testing
toolsets = os.listdir(os.path.join(self.regression_results,
"boost/bin.v2/libs/any/test/any_test.test"));
for t in toolsets:
d = os.path.join(self.regression_results, ("boost-build-%s" % (t)))
utils.makedirs (d)
fn = os.path.join(d, "test_log.xml")
cd = os.getcwd()
try:
os.chdir (os.path.join (self.boost_root, 'tools/build/test'));
bjam_path = os.path.dirname (self.tool_path( self.bjam ))
self.log( "Using bjam binary in '%s'" % (bjam_path))
os.putenv('PATH', bjam_path + os.pathsep + os.environ['PATH'])
utils.system ( [ '"%s" test_all.py --default-bjam --xml %s > %s' % (sys.executable, t, fn) ] )
finally:
os.chdir( cd )
def command_test_process(self):
self.import_utils()
self.log( 'Getting test case results out of "%s"...' % self.regression_log )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.checked_system( [
'"%s" "%s" <"%s"' % (
self.tool_path(self.process_jam_log),
self.regression_results,
self.regression_log )
] )
os.chdir( cd )
def command_collect_logs(self):
self.import_utils()
comment_path = os.path.join( self.regression_root, self.comment )
if not os.path.exists( comment_path ):
self.log( 'Comment file "%s" not found; creating default comment.' % comment_path )
f = open( comment_path, 'w' )
f.write( '<p>Tests are run on %s platform.</p>' % self.platform_name() )
f.close()
source = 'tarball'
revision = ''
svn_root_file = os.path.join( self.boost_root, '.svn' )
svn_info_file = os.path.join( self.boost_root, 'svn_info.txt' )
if os.path.exists( svn_root_file ):
source = 'SVN'
self.svn_command( 'info --xml "%s" >"%s"' % (self.boost_root,svn_info_file) )
if os.path.exists( svn_info_file ):
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find( 'Revision:' )
if i < 0: i = svn_info.find( 'revision=' ) # --xml format
if i >= 0:
i += 10
while svn_info[i] >= '0' and svn_info[i] <= '9':
revision += svn_info[i]
i += 1
if self.pjl_toolset != 'python':
from collect_and_upload_logs import collect_logs
if self.incremental:
run_type = 'incremental'
else:
run_type = 'full'
collect_logs(
self.regression_results,
self.runner, self.tag, self.platform, comment_path,
self.timestamp_path,
self.user,
source, run_type,
self.dart_server, self.proxy,
revision )
else:
from process_jam_log import BJamLog2Results
if self.incremental:
run_type = '--incremental'
else:
run_type = ''
BJamLog2Results([
'--output='+os.path.join(self.regression_results,self.runner+'.xml'),
'--runner='+self.runner,
'--comment='+comment_path,
'--tag='+self.tag,
'--platform='+self.platform,
'--source='+source,
'--revision='+revision,
run_type,
self.regression_log
])
self.compress_file(
os.path.join(self.regression_results,self.runner+'.xml'),
os.path.join(self.regression_results,self.runner+'.zip')
)
def command_upload_logs(self):
self.import_utils()
from collect_and_upload_logs import upload_logs
if self.ftp:
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server,
ftp_url = self.ftp )
)
else:
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server )
)
def command_regression(self):
import socket
import string
try:
mail_subject = 'Boost regression for %s on %s' % ( self.tag,
string.split(socket.gethostname(), '.')[0] )
start_time = time.localtime()
if self.mail:
self.log( 'Sending start notification to "%s"' % self.mail )
self.send_mail(
'%s started at %s.' % ( mail_subject, format_time( start_time ) )
)
self.command_get_tools()
if self.local is not None:
self.log( 'Using local file "%s"' % self.local )
b = os.path.basename( self.local )
tag = b[ 0: b.find( '.' ) ]
self.log( 'Tag: "%s"' % tag )
self.unpack_tarball( self.local, self.boost_root )
elif self.have_source:
if not self.incremental: self.command_cleanup( 'bin' )
else:
if self.incremental or self.force_update:
if not self.incremental: self.command_cleanup( 'bin' )
else:
self.command_cleanup()
self.command_get_source()
self.command_setup()
# Not specifying --toolset in command line is not enough
# that would mean to use Boost.Build default ones
# We can skip test only we were explictly
# told to have no toolsets in command line "--toolset="
if self.toolsets != '': # --toolset=,
if not self.skip_tests:
self.command_test()
self.command_collect_logs()
self.command_upload_logs()
if self.mail:
self.log( 'Sending report to "%s"' % self.mail )
end_time = time.localtime()
self.send_mail(
'%s completed successfully at %s.' % ( mail_subject, format_time( end_time ) )
)
except:
if self.mail:
self.log( 'Sending report to "%s"' % self.mail )
traceback_ = '\n'.join( apply( traceback.format_exception, sys.exc_info() ) )
end_time = time.localtime()
self.send_mail(
'%s failed at %s.' % ( mail_subject, format_time( end_time ) ),
traceback_ )
raise
def command_show_revision(self):
modified = '$Date$'
revision = '$Revision$'
import re
re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
#~ Utilities...
def main(self):
for action in self.actions:
action_m = "command_"+action.replace('-','_')
if hasattr(self,action_m):
getattr(self,action_m)()
def platform_name(self):
# See http://article.gmane.org/gmane.comp.lib.boost.testing/933
if sys.platform == 'win32':
return 'Windows'
elif sys.platform == 'cygwin':
return 'Windows/Cygwin'
return platform.system()
def log(self,message):
sys.stdout.flush()
sys.stderr.flush()
sys.stderr.write( '# %s\n' % message )
sys.stderr.flush()
def rmtree(self,path):
if os.path.exists( path ):
import shutil
#~ shutil.rmtree( unicode( path ) )
if sys.platform == 'win32':
os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
shutil.rmtree( unicode( path ) )
else:
os.system( 'rm -f -r "%s"' % path )
def refresh_timestamp( self ):
if os.path.exists( self.timestamp_path ):
os.unlink( self.timestamp_path )
open( self.timestamp_path, 'w' ).close()
def timestamp( self ):
return time.strftime(
'%Y-%m-%dT%H:%M:%SZ',
time.gmtime( os.stat( self.timestamp_path ).st_mtime ) )
def retry( self, f, max_attempts=5, sleep_secs=10 ):
for attempts in range( max_attempts, -1, -1 ):
try:
return f()
except Exception, msg:
self.log( '%s failed with message "%s"' % ( f.__name__, msg ) )
if attempts == 0:
self.log( 'Giving up.' )
raise
self.log( 'Retrying (%d more attempts).' % attempts )
time.sleep( sleep_secs )
def http_get( self, source_url, destination_file ):
import urllib
proxies = None
if hasattr(self,'proxy') and self.proxy is not None:
proxies = { 'http' : self.proxy }
src = urllib.urlopen( source_url, proxies = proxies )
f = open( destination_file, 'wb' )
while True:
data = src.read( 16*1024 )
if len( data ) == 0: break
f.write( data )
f.close()
src.close()
def import_utils(self):
global utils
if utils is None:
sys.path.append( self.xsl_reports_dir )
import utils as utils_module
utils = utils_module
def build_if_needed( self, tool, toolset ):
self.import_utils()
if os.path.exists( tool[ 'path' ] ):
self.log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
return
self.log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
if toolset is None:
if self.toolsets is not None:
toolset = string.split( self.toolsets, ',' )[0]
else:
toolset = tool[ 'default_toolset' ]
self.log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
self.log( ' Using default toolset for the platform (%s).' % toolset )
if os.path.exists( tool[ 'source_dir' ] ):
self.log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
build_cmd = tool[ 'build_cmd' ]( toolset, tool['build_args'] )
self.log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
utils.system( [ 'cd "%s"' % tool[ 'source_dir' ], build_cmd ] )
else:
raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
if not tool.has_key( 'build_path' ):
tool[ 'build_path' ] = self.tool_path( tool )
if not os.path.exists( tool[ 'build_path' ] ):
raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
self.log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
def tool_path( self, name_or_spec ):
if isinstance( name_or_spec, basestring ):
return os.path.join( self.regression_root, name_or_spec )
if os.path.exists( name_or_spec[ 'path' ] ):
return name_or_spec[ 'path' ]
if name_or_spec.has_key( 'build_path' ):
return name_or_spec[ 'build_path' ]
build_dir = name_or_spec[ 'build_dir' ]
self.log( 'Searching for "%s" in "%s"...' % ( name_or_spec[ 'name' ], build_dir ) )
for root, dirs, files in os.walk( build_dir ):
if name_or_spec[ 'name' ] in files:
return os.path.join( root, name_or_spec[ 'name' ] )
raise Exception( 'Cannot find "%s" in any of the following locations:\n%s' % (
name_or_spec[ 'name' ]
, '\n'.join( [ name_or_spec[ 'path' ], build_dir ] )
) )
def bjam_build_cmd( self, *rest ):
if sys.platform == 'win32':
cmd = 'build.bat %s' % self.bjam_toolset
else:
cmd = './build.sh %s' % self.bjam_toolset
env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
if os.environ.has_key( env_setup_key ):
return '%s & %s' % ( os.environ[env_setup_key], cmd )
return cmd
def bjam_cmd( self, toolsets, args = '', *rest ):
build_path = self.regression_root
if build_path[-1] == '\\': build_path += '\\'
if self.timeout > 0:
args += ' -l%s' % (self.timeout*60)
cmd = '"%(bjam)s"' +\
' "-sBOOST_BUILD_PATH=%(bbpath)s"' +\
' "-sBOOST_ROOT=%(boost)s"' +\
' "--boost=%(boost)s"' +\
' "--boost-build=%(bb)s"' +\
' "--debug-configuration"' +\
' %(arg)s'
cmd %= {
'bjam' : self.tool_path( self.bjam ),
'bbpath' : os.pathsep.join([build_path,self.tools_bb_root]),
'bb' : self.tools_bb_root,
'boost' : self.boost_root,
'arg' : args }
if toolsets:
import string
cmd += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
return cmd
def send_mail( self, subject, msg = '' ):
import smtplib
if not self.smtp_login:
server_name = 'mail.%s' % mail.split( '@' )[-1]
user_name = None
password = None
else:
server_name = self.smtp_login.split( '@' )[-1]
( user_name, password ) = string.split( self.smtp_login.split( '@' )[0], ':' )
log( ' Sending mail through "%s"...' % server_name )
smtp_server = smtplib.SMTP( server_name )
smtp_server.set_debuglevel( self.debug_level )
if user_name:
smtp_server.login( user_name, password )
smtp_server.sendmail( self.mail, [ self.mail ],
'Subject: %s\nTo: %s\n\n%s' % ( subject, self.mail, msg ) )
def compress_file( self, file_path, archive_path ):
self.import_utils()
utils.log( 'Compressing "%s"...' % file_path )
try:
import zipfile
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( file_path, os.path.basename( file_path ) )
z.close()
utils.log( 'Done writing "%s".'% archive_path )
except Exception, msg:
utils.log( 'Warning: Compressing falied (%s)' % msg )
utils.log( ' Trying to compress using a platform-specific tool...' )
try:
import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
raise Exception( 'Compressing failed!' )
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
utils.log( 'Removing stale "%s".' % archive_path )
zip_cmd.main( file_path, archive_path )
utils.log( 'Done compressing "%s".' % archive_path )
#~ Dowloading source, from SVN...
def svn_checkout( self ):
os.chdir( self.regression_root )
self.svn_command( 'co %s %s' % (self.svn_repository_url(self.tag),'boost') )
def svn_update( self ):
os.chdir( self.boost_root )
self.svn_command( 'update' )
def svn_command( self, command ):
svn_anonymous_command_line = 'svn --non-interactive %(command)s'
svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
if not hasattr(self,'user') or self.user is None or self.user == 'anonymous':
cmd = svn_anonymous_command_line % { 'command': command }
else:
cmd = svn_command_line % { 'user': self.user, 'command': command }
self.log( 'Executing SVN command "%s"' % cmd )
rc = os.system( cmd )
if rc != 0:
raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
def svn_repository_url( self, path ):
if self.user != 'anonymous' and self.user != '':
return '%s%s' % (repo_root['user'],path)
else:
return '%s%s' % (repo_root['anon'],path)
#~ Downloading and extracting source archives, from tarballs or zipballs...
def get_tarball( self, *args ):
if not args or args == []:
args = [ 'download', 'unpack' ]
tarball_path = None
if hasattr(self,'local') and self.local is not None:
tarball_path = self.local
elif 'download' in args:
tarball_path = self.download_tarball(self.boost_tarball_name(),self.boost_tarball_url())
if not tarball_path:
tarball_path = os.path.join( self.regression_root, self.boost_tarball_url() )
if 'unpack' in args:
self.unpack_tarball( tarball_path, self.boost_root )
pass
def download_tarball( self, tarball_name, tarball_url ):
tarball_path = os.path.join( self.regression_root, tarball_name )
self.log( 'Downloading "%s" to "%s"...' % ( tarball_url, os.path.dirname( tarball_path ) ) )
if os.path.exists( tarball_path ):
os.unlink( tarball_path )
self.http_get( tarball_url, tarball_path )
return tarball_path
def tarball_url( self, path ):
return 'http://beta.boost.org/development/snapshot.php/%s' % path
def boost_tarball_name( self ):
return 'boost-%s.tar.bz2' % self.tag.split( '/' )[-1]
def boost_tarball_url( self ):
return self.tarball_url( self.tag )
def unpack_tarball( self, tarball_path, target_path ):
self.log( 'Looking for old unpacked archives...' )
old_boost_dirs = self.find_boost_dirs( )
for old_boost_dir in old_boost_dirs:
if old_boost_dir != tarball_path:
self.log( 'Deleting old directory %s.' % old_boost_dir )
self.rmtree( old_boost_dir )
self.log( 'Unpacking boost tarball ("%s")...' % tarball_path )
tarball_name = os.path.basename( tarball_path )
extension = tarball_name[ tarball_name.find( '.' ) : ]
if extension in ( ".tar.gz", ".tar.bz2" ):
import tarfile
import stat
mode = os.path.splitext( extension )[1][1:]
tar = tarfile.open( tarball_path, 'r:%s' % mode )
for tarinfo in tar:
tar.extract( tarinfo, self.regression_root )
if sys.platform == 'win32' and not tarinfo.isdir():
# workaround what appears to be a Win32-specific bug in 'tarfile'
# (modification times for extracted files are not set properly)
f = os.path.join( self.regression_root, tarinfo.name )
os.chmod( f, stat.S_IWRITE )
os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
tar.close()
elif extension in ( ".zip" ):
import zipfile
z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
for f in z.infolist():
destination_file_path = os.path.join( self.regression_root, f.filename )
if destination_file_path[-1] == "/": # directory
if not os.path.exists( destination_file_path ):
os.makedirs( destination_file_path )
else: # file
result = open( destination_file_path, 'wb' )
result.write( z.read( f.filename ) )
result.close()
z.close()
else:
raise 'Do not know how to unpack archives with extension \"%s\"' % extension
boost_dir = self.find_boost_dirs()[0]
self.log( ' Unpacked into directory "%s"' % boost_dir )
if os.path.exists( target_path ):
self.log( 'Deleting "%s" directory...' % target_path )
self.rmtree( target_path )
self.log( 'Renaming "%s" into "%s"' % ( boost_dir, target_path ) )
os.rename( boost_dir, target_path )
def find_boost_dirs( self ):
return [
x for x in
glob.glob( os.path.join( self.regression_root, 'boost[-_]*' ) )
if os.path.isdir( x )
]
|
|
"""Support for monitoring plants."""
from collections import deque
from datetime import datetime, timedelta
import logging
import voluptuous as vol
from homeassistant.components.recorder.models import States
from homeassistant.components.recorder.util import execute, session_scope
from homeassistant.const import (
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
CONDUCTIVITY,
CONF_SENSORS,
STATE_OK,
STATE_PROBLEM,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
UNIT_PERCENTAGE,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change_event
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "plant"
READING_BATTERY = "battery"
READING_TEMPERATURE = ATTR_TEMPERATURE
READING_MOISTURE = "moisture"
READING_CONDUCTIVITY = "conductivity"
READING_BRIGHTNESS = "brightness"
ATTR_PROBLEM = "problem"
ATTR_SENSORS = "sensors"
PROBLEM_NONE = "none"
ATTR_MAX_BRIGHTNESS_HISTORY = "max_brightness"
# we're not returning only one value, we're returning a dict here. So we need
# to have a separate literal for it to avoid confusion.
ATTR_DICT_OF_UNITS_OF_MEASUREMENT = "unit_of_measurement_dict"
CONF_MIN_BATTERY_LEVEL = f"min_{READING_BATTERY}"
CONF_MIN_TEMPERATURE = f"min_{READING_TEMPERATURE}"
CONF_MAX_TEMPERATURE = f"max_{READING_TEMPERATURE}"
CONF_MIN_MOISTURE = f"min_{READING_MOISTURE}"
CONF_MAX_MOISTURE = f"max_{READING_MOISTURE}"
CONF_MIN_CONDUCTIVITY = f"min_{READING_CONDUCTIVITY}"
CONF_MAX_CONDUCTIVITY = f"max_{READING_CONDUCTIVITY}"
CONF_MIN_BRIGHTNESS = f"min_{READING_BRIGHTNESS}"
CONF_MAX_BRIGHTNESS = f"max_{READING_BRIGHTNESS}"
CONF_CHECK_DAYS = "check_days"
CONF_SENSOR_BATTERY_LEVEL = READING_BATTERY
CONF_SENSOR_MOISTURE = READING_MOISTURE
CONF_SENSOR_CONDUCTIVITY = READING_CONDUCTIVITY
CONF_SENSOR_TEMPERATURE = READING_TEMPERATURE
CONF_SENSOR_BRIGHTNESS = READING_BRIGHTNESS
DEFAULT_MIN_BATTERY_LEVEL = 20
DEFAULT_MIN_MOISTURE = 20
DEFAULT_MAX_MOISTURE = 60
DEFAULT_MIN_CONDUCTIVITY = 500
DEFAULT_MAX_CONDUCTIVITY = 3000
DEFAULT_CHECK_DAYS = 3
SCHEMA_SENSORS = vol.Schema(
{
vol.Optional(CONF_SENSOR_BATTERY_LEVEL): cv.entity_id,
vol.Optional(CONF_SENSOR_MOISTURE): cv.entity_id,
vol.Optional(CONF_SENSOR_CONDUCTIVITY): cv.entity_id,
vol.Optional(CONF_SENSOR_TEMPERATURE): cv.entity_id,
vol.Optional(CONF_SENSOR_BRIGHTNESS): cv.entity_id,
}
)
PLANT_SCHEMA = vol.Schema(
{
vol.Required(CONF_SENSORS): vol.Schema(SCHEMA_SENSORS),
vol.Optional(
CONF_MIN_BATTERY_LEVEL, default=DEFAULT_MIN_BATTERY_LEVEL
): cv.positive_int,
vol.Optional(CONF_MIN_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_MIN_MOISTURE, default=DEFAULT_MIN_MOISTURE): cv.positive_int,
vol.Optional(CONF_MAX_MOISTURE, default=DEFAULT_MAX_MOISTURE): cv.positive_int,
vol.Optional(
CONF_MIN_CONDUCTIVITY, default=DEFAULT_MIN_CONDUCTIVITY
): cv.positive_int,
vol.Optional(
CONF_MAX_CONDUCTIVITY, default=DEFAULT_MAX_CONDUCTIVITY
): cv.positive_int,
vol.Optional(CONF_MIN_BRIGHTNESS): cv.positive_int,
vol.Optional(CONF_MAX_BRIGHTNESS): cv.positive_int,
vol.Optional(CONF_CHECK_DAYS, default=DEFAULT_CHECK_DAYS): cv.positive_int,
}
)
DOMAIN = "plant"
CONFIG_SCHEMA = vol.Schema({DOMAIN: {cv.string: PLANT_SCHEMA}}, extra=vol.ALLOW_EXTRA)
# Flag for enabling/disabling the loading of the history from the database.
# This feature is turned off right now as its tests are not 100% stable.
ENABLE_LOAD_HISTORY = False
async def async_setup(hass, config):
"""Set up the Plant component."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for plant_name, plant_config in config[DOMAIN].items():
_LOGGER.info("Added plant %s", plant_name)
entity = Plant(plant_name, plant_config)
entities.append(entity)
await component.async_add_entities(entities)
return True
class Plant(Entity):
"""Plant monitors the well-being of a plant.
It also checks the measurements against
configurable min and max values.
"""
READINGS = {
READING_BATTERY: {
ATTR_UNIT_OF_MEASUREMENT: UNIT_PERCENTAGE,
"min": CONF_MIN_BATTERY_LEVEL,
},
READING_TEMPERATURE: {
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
"min": CONF_MIN_TEMPERATURE,
"max": CONF_MAX_TEMPERATURE,
},
READING_MOISTURE: {
ATTR_UNIT_OF_MEASUREMENT: UNIT_PERCENTAGE,
"min": CONF_MIN_MOISTURE,
"max": CONF_MAX_MOISTURE,
},
READING_CONDUCTIVITY: {
ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY,
"min": CONF_MIN_CONDUCTIVITY,
"max": CONF_MAX_CONDUCTIVITY,
},
READING_BRIGHTNESS: {
ATTR_UNIT_OF_MEASUREMENT: "lux",
"min": CONF_MIN_BRIGHTNESS,
"max": CONF_MAX_BRIGHTNESS,
},
}
def __init__(self, name, config):
"""Initialize the Plant component."""
self._config = config
self._sensormap = {}
self._readingmap = {}
self._unit_of_measurement = {}
for reading, entity_id in config["sensors"].items():
self._sensormap[entity_id] = reading
self._readingmap[reading] = entity_id
self._state = None
self._name = name
self._battery = None
self._moisture = None
self._conductivity = None
self._temperature = None
self._brightness = None
self._problems = PROBLEM_NONE
self._conf_check_days = 3 # default check interval: 3 days
if CONF_CHECK_DAYS in self._config:
self._conf_check_days = self._config[CONF_CHECK_DAYS]
self._brightness_history = DailyHistory(self._conf_check_days)
@callback
def _state_changed_event(self, event):
"""Sensor state change event."""
self.state_changed(event.data.get("entity_id"), event.data.get("new_state"))
@callback
def state_changed(self, entity_id, new_state):
"""Update the sensor status."""
if new_state is None:
return
value = new_state.state
_LOGGER.debug("Received callback from %s with value %s", entity_id, value)
if value == STATE_UNKNOWN:
return
reading = self._sensormap[entity_id]
if reading == READING_MOISTURE:
if value != STATE_UNAVAILABLE:
value = int(float(value))
self._moisture = value
elif reading == READING_BATTERY:
if value != STATE_UNAVAILABLE:
value = int(float(value))
self._battery = value
elif reading == READING_TEMPERATURE:
if value != STATE_UNAVAILABLE:
value = float(value)
self._temperature = value
elif reading == READING_CONDUCTIVITY:
if value != STATE_UNAVAILABLE:
value = int(float(value))
self._conductivity = value
elif reading == READING_BRIGHTNESS:
if value != STATE_UNAVAILABLE:
value = int(float(value))
self._brightness = value
self._brightness_history.add_measurement(
self._brightness, new_state.last_updated
)
else:
raise HomeAssistantError(
f"Unknown reading from sensor {entity_id}: {value}"
)
if ATTR_UNIT_OF_MEASUREMENT in new_state.attributes:
self._unit_of_measurement[reading] = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
self._update_state()
def _update_state(self):
"""Update the state of the class based sensor data."""
result = []
for sensor_name in self._sensormap.values():
params = self.READINGS[sensor_name]
value = getattr(self, f"_{sensor_name}")
if value is not None:
if value == STATE_UNAVAILABLE:
result.append(f"{sensor_name} unavailable")
else:
if sensor_name == READING_BRIGHTNESS:
result.append(
self._check_min(
sensor_name, self._brightness_history.max, params
)
)
else:
result.append(self._check_min(sensor_name, value, params))
result.append(self._check_max(sensor_name, value, params))
result = [r for r in result if r is not None]
if result:
self._state = STATE_PROBLEM
self._problems = ", ".join(result)
else:
self._state = STATE_OK
self._problems = PROBLEM_NONE
_LOGGER.debug("New data processed")
self.async_write_ha_state()
def _check_min(self, sensor_name, value, params):
"""If configured, check the value against the defined minimum value."""
if "min" in params and params["min"] in self._config:
min_value = self._config[params["min"]]
if value < min_value:
return f"{sensor_name} low"
def _check_max(self, sensor_name, value, params):
"""If configured, check the value against the defined maximum value."""
if "max" in params and params["max"] in self._config:
max_value = self._config[params["max"]]
if value > max_value:
return f"{sensor_name} high"
return None
async def async_added_to_hass(self):
"""After being added to hass, load from history."""
if ENABLE_LOAD_HISTORY and "recorder" in self.hass.config.components:
# only use the database if it's configured
self.hass.async_add_job(self._load_history_from_db)
async_track_state_change_event(
self.hass, list(self._sensormap), self._state_changed_event
)
for entity_id in self._sensormap:
state = self.hass.states.get(entity_id)
if state is not None:
self.state_changed(entity_id, state)
async def _load_history_from_db(self):
"""Load the history of the brightness values from the database.
This only needs to be done once during startup.
"""
start_date = datetime.now() - timedelta(days=self._conf_check_days)
entity_id = self._readingmap.get(READING_BRIGHTNESS)
if entity_id is None:
_LOGGER.debug(
"Not reading the history from the database as "
"there is no brightness sensor configured"
)
return
_LOGGER.debug("Initializing values for %s from the database", self._name)
with session_scope(hass=self.hass) as session:
query = (
session.query(States)
.filter(
(States.entity_id == entity_id.lower())
and (States.last_updated > start_date)
)
.order_by(States.last_updated.asc())
)
states = execute(query, to_native=True, validate_entity_ids=False)
for state in states:
# filter out all None, NaN and "unknown" states
# only keep real values
try:
self._brightness_history.add_measurement(
int(state.state), state.last_updated
)
except ValueError:
pass
_LOGGER.debug("Initializing from database completed")
self.async_write_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the attributes of the entity.
Provide the individual measurements from the
sensor in the attributes of the device.
"""
attrib = {
ATTR_PROBLEM: self._problems,
ATTR_SENSORS: self._readingmap,
ATTR_DICT_OF_UNITS_OF_MEASUREMENT: self._unit_of_measurement,
}
for reading in self._sensormap.values():
attrib[reading] = getattr(self, f"_{reading}")
if self._brightness_history.max is not None:
attrib[ATTR_MAX_BRIGHTNESS_HISTORY] = self._brightness_history.max
return attrib
class DailyHistory:
"""Stores one measurement per day for a maximum number of days.
At the moment only the maximum value per day is kept.
"""
def __init__(self, max_length):
"""Create new DailyHistory with a maximum length of the history."""
self.max_length = max_length
self._days = None
self._max_dict = {}
self.max = None
def add_measurement(self, value, timestamp=None):
"""Add a new measurement for a certain day."""
day = (timestamp or datetime.now()).date()
if not isinstance(value, (int, float)):
return
if self._days is None:
self._days = deque()
self._add_day(day, value)
else:
current_day = self._days[-1]
if day == current_day:
self._max_dict[day] = max(value, self._max_dict[day])
elif day > current_day:
self._add_day(day, value)
else:
_LOGGER.warning("Received old measurement, not storing it")
self.max = max(self._max_dict.values())
def _add_day(self, day, value):
"""Add a new day to the history.
Deletes the oldest day, if the queue becomes too long.
"""
if len(self._days) == self.max_length:
oldest = self._days.popleft()
del self._max_dict[oldest]
self._days.append(day)
if not isinstance(value, (int, float)):
return
self._max_dict[day] = value
|
|
import getpass
import os
import socket
from datetime import date, datetime
from glob import glob
from logging import getLogger, DEBUG
from logging.handlers import RotatingFileHandler
from typing import List, Optional
import click
from scrapy.crawler import CrawlerProcess
from scrapy.utils.log import get_scrapy_root_handler, configure_logging
import config
from webweeder import cli_vars
from webweeder import configutils
from webweeder.spiders.monster import MonsterSpider
from webweeder.stats import StatsMonitor
from webweeder.utils import delete_directory, find_duplicates, MEGABYTES
from webweeder.weeders.monster import MonsterWeeder
_logger = getLogger(__name__)
@click.command()
@click.argument('domains', nargs=-1)
@click.option('--alldomains', is_flag=True, help=cli_vars.HELP_ALLDOMAINS)
@click.option('--clean', is_flag=True, help=cli_vars.HELP_CLEAN_CRAWL)
@click.option('--outdir', default=config.OUTPUT_DIRECTORY, type=click.Path(file_okay=False), help=cli_vars.HELP_OUTDIR)
@click.option('--useragent', default=config.USER_AGENT, type=str, help=cli_vars.HELP_USERAGENT)
@click.option('--statsinterval', default=config.STATS_INTERVAL, type=click.IntRange(min=-1),
help=cli_vars.HELP_STATSINTERVAL)
@click.option('--parser', default=config.HTML_PARSER, type=click.Choice(cli_vars.CHOICE_PARSERS),
help=cli_vars.HELP_PARSER)
@click.option('--loglevel', default=config.LOG_LEVEL, type=click.Choice(cli_vars.CHOICE_LOGLEVEL),
help=cli_vars.HELP_LOGLEVEL)
@click.option('--logdir', default=config.LOG_DIRECTORY, type=click.Path(file_okay=False), help=cli_vars.HELP_LOGDIR)
def crawl(domains, alldomains, clean, outdir, useragent, statsinterval, parser, loglevel, logdir):
# TODO: docstring for command-line help and example usage
msg = 'crawl: domains=%r, alldomains=%r, clean=%r, outdir=%r, useragent=%r, statsinterval=%r, parser=%r, ' \
'logfile=%r, logdir=%r' \
% (domains, alldomains, clean, outdir, useragent, statsinterval, parser, loglevel, logdir)
_configure(outdir, useragent, statsinterval, parser, loglevel, logdir)
_log_system_info()
_logger.debug(msg)
click.echo()
# Clean the output directory if it's the ONLY thing we need to do
if clean:
if len(domains) == 0 and (not alldomains):
click.echo(cli_vars.MSG_CLEANING_AND_EXITING)
delete_directory(outdir, preserve_dir=True)
return
# Ensure the user has entered valid domains to crawl
domains = _validate_domains(domains, alldomains)
if domains is None:
return
# Clean the output directory if necessary
if clean:
click.echo(cli_vars.MSG_CLEANING)
delete_directory(outdir, preserve_dir=True)
click.echo()
# Confirm that the user wants to start crawling
click.echo('You are about to crawl these domains: %r' % domains)
click.confirm('Continue crawling %d domains?' % len(domains), abort=True)
click.echo()
# Keeps track of statistics
stats = StatsMonitor()
# Create and start the spiders specified by the user
process = CrawlerProcess(config.SCRAPY_SETTINGS)
for domain in domains:
MonsterSpider.next_instance_domain = configutils.get_config_for_domain(domain)
MonsterSpider.next_instance_callback = stats.on_page_crawled
process.crawl(MonsterSpider)
process.start() # Blocks until the spiders finish
@click.command()
@click.option('--clean', is_flag=True, help=cli_vars.HELP_CLEAN_WEED)
@click.option('--outdir', default=config.OUTPUT_DIRECTORY, type=click.Path(file_okay=False), help=cli_vars.HELP_OUTDIR)
@click.option('--parser', default=config.HTML_PARSER, type=click.Choice(cli_vars.CHOICE_PARSERS),
help=cli_vars.HELP_PARSER)
@click.option('--loglevel', default=config.LOG_LEVEL, type=click.Choice(cli_vars.CHOICE_LOGLEVEL),
help=cli_vars.HELP_LOGLEVEL)
@click.option('--logdir', default=config.LOG_DIRECTORY, type=click.Path(file_okay=False), help=cli_vars.HELP_LOGDIR)
def weed(clean, outdir, parser, loglevel, logdir):
# TODO: docstring for command-line help and example usage
msg = 'weed: clean=%r, outdir=%r, parser=%r, logfile=%r, logdir=%r' \
% (clean, outdir, parser, loglevel, logdir)
_configure(outdir, config.USER_AGENT, config.STATS_INTERVAL, parser, loglevel, logdir)
_log_system_info()
_logger.debug(msg)
click.echo()
# Clean the output directory if necessary
if clean:
click.echo(cli_vars.MSG_CLEANING)
file_pattern = os.path.join(config.OUTPUT_DIRECTORY, '**', 'plaintext_article.txt')
for file in glob(file_pattern, recursive=True):
_logger.debug('Cleaning file: %s' % file)
os.remove(file)
click.echo()
weeder = MonsterWeeder()
_logger.info('Collecting pages to weed...')
metadatas: List[str] = weeder.find_page_metadatas(config.OUTPUT_DIRECTORY)
metadatas.sort()
_logger.debug('Collected %d pages to weed: %r' % (len(metadatas), metadatas))
if len(metadatas) == 0:
click.echo(cli_vars.ERROR_NOTHING_TO_WEED)
click.echo()
return
# Confirm that the user wants to start weeding
click.echo('You are about to weed %d pages' % len(metadatas))
click.confirm('Continue weeding %d pages?' % len(metadatas), abort=True)
click.echo()
for (i, metadata) in enumerate(metadatas):
log_info = (_out_of_str(i + 1, len(metadatas)), metadata)
_logger.info('Weeding page %s: %s' % log_info)
try:
weeder.weed_page(config.OUTPUT_DIRECTORY, metadata)
except Exception:
# Just log the failure with its stack trace
_logger.exception('Failed to weed page %s: %s\n' % log_info)
def _configure(outdir, useragent, statsinterval, parser, loglevel, logdir):
config.OUTPUT_DIRECTORY = outdir
config.USER_AGENT = useragent
config.STATS_INTERVAL = statsinterval
config.HTML_PARSER = parser
config.LOG_LEVEL = loglevel
config.LOG_DIRECTORY = logdir
config.SCRAPY_SETTINGS = {
'BOT_NAME': 'WebWeeder',
'ROBOTSTXT_OBEY': True,
'LOG_LEVEL': loglevel,
'USER_AGENT': useragent
}
os.makedirs(outdir, exist_ok=True)
os.makedirs(logdir, exist_ok=True)
configure_logging(config.SCRAPY_SETTINGS)
log_file_path = os.path.join(config.LOG_DIRECTORY, date.today().strftime('%Y-%m-%d.log'))
file_handler = RotatingFileHandler(filename=log_file_path, maxBytes=(50 * MEGABYTES), backupCount=100)
file_handler.setFormatter(get_scrapy_root_handler().formatter)
file_handler.setLevel(DEBUG)
getLogger().addHandler(file_handler)
def _validate_domains(domains: List[str], alldomains: bool) -> Optional[List[str]]:
"""
:param domains: List of domains the user has specified to crawl
:param alldomains True if the user is trying to crawl every configured domain
:return: A list of valid domains to crawl, or "None" if there was a validation problem
"""
if alldomains:
if len(domains) != 0:
click.echo(cli_vars.ERROR_ALLDOMAINS_WITH_LIST)
return None
domains = [domain_config.name for domain_config in config.DOMAINS]
if len(domains) == 0:
click.echo(cli_vars.ERROR_NO_DOMAIN_SPECIFIED)
return None
domains = sorted(domains)
duplicates = find_duplicates(domains)
for duplicate in duplicates:
click.echo(cli_vars.ERROR_DUPLICATE_DOMAINS % duplicate)
if len(duplicates) != 0:
return None
all_configured = True
for domain in domains:
domain_config = configutils.get_config_for_domain(domain)
if domain_config is None:
click.echo(cli_vars.ERROR_UNCONFIGURED_DOMAIN % domain)
all_configured = False
if not all_configured:
return None
return domains
def _log_system_info():
try:
hostname = socket.gethostname()
except:
hostname = ''
try:
username = getpass.getuser()
except:
username = ''
_logger.debug('')
_logger.debug(' ------------------------------------')
_logger.debug(' | APPLICATION LAUNCH |')
_logger.debug(' | %s |' % datetime.now().isoformat())
_logger.debug(' ------------------------------------')
_logger.debug(' Hostname: %s' % hostname)
_logger.debug(' Username: %s' % username)
_logger.debug(' Directory: %s' % os.getcwd())
_logger.debug('')
def _out_of_str(n1: int, n2: int) -> str:
"""
:return A string in the format [n1 / n2], where "n1" and "n2" are the passed integers padded to the same length
"""
width = len(str(max(n1, n2)))
return '[%s / %s]' % (str(n1).rjust(width), str(n2).rjust(width))
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Document matcher for Full Text Search API stub.
DocumentMatcher provides an approximation of the Full Text Search API's query
matching.
"""
import logging
from google.appengine.datastore import document_pb
from google.appengine._internal.antlr3 import tree
from google.appengine.api.search import query_parser
from google.appengine.api.search import QueryParser
from google.appengine.api.search import search_util
from google.appengine.api.search.stub import simple_tokenizer
from google.appengine.api.search.stub import tokens
class ExpressionTreeException(Exception):
"""An error occurred while analyzing/translating the expression parse tree."""
def __init__(self, msg):
Exception.__init__(self, msg)
class DocumentMatcher(object):
"""A class to match documents with a query."""
def __init__(self, query, inverted_index):
self._query = query
self._inverted_index = inverted_index
self._parser = simple_tokenizer.SimpleTokenizer()
def _PostingsForToken(self, token):
"""Returns the postings for the token."""
return self._inverted_index.GetPostingsForToken(token)
def _PostingsForFieldToken(self, field, value):
"""Returns postings for the value occurring in the given field."""
value = simple_tokenizer.NormalizeString(value)
return self._PostingsForToken(
tokens.Token(chars=value, field_name=field))
def _MatchPhrase(self, field, match, document):
"""Match a textual field with a phrase query node."""
field_text = field.value().string_value()
phrase_text = query_parser.GetPhraseQueryNodeText(match)
if field.value().type() == document_pb.FieldValue.ATOM:
return (field_text == phrase_text)
phrase = self._parser.TokenizeText(phrase_text)
field_text = self._parser.TokenizeText(field_text)
if not phrase:
return True
posting = None
for post in self._PostingsForFieldToken(field.name(), phrase[0].chars):
if post.doc_id == document.id():
posting = post
break
if not posting:
return False
def ExtractWords(token_list):
return (token.chars for token in token_list)
for position in posting.positions:
match_words = zip(ExtractWords(field_text[position:]),
ExtractWords(phrase))
if len(match_words) != len(phrase):
continue
match = True
for doc_word, match_word in match_words:
if doc_word != match_word:
match = False
if match:
return True
return False
def _MatchTextField(self, field, match, document):
"""Check if a textual field matches a query tree node."""
if match.getType() == QueryParser.VALUE:
if query_parser.IsPhrase(match):
return self._MatchPhrase(field, match, document)
if field.value().type() == document_pb.FieldValue.ATOM:
return (field.value().string_value() ==
query_parser.GetQueryNodeText(match))
query_tokens = self._parser.TokenizeText(
query_parser.GetQueryNodeText(match))
if not query_tokens:
return True
if len(query_tokens) > 1:
def QueryNode(token):
return query_parser.CreateQueryNode(token.chars, QueryParser.TEXT)
return all(self._MatchTextField(field, QueryNode(token), document)
for token in query_tokens)
token_text = query_tokens[0].chars
matching_docids = [
post.doc_id for post in self._PostingsForFieldToken(
field.name(), token_text)]
return document.id() in matching_docids
def ExtractGlobalEq(node):
if node.getType() == QueryParser.EQ and len(node.children) >= 2:
if node.children[0].getType() == QueryParser.GLOBAL:
return node.children[1]
return node
if match.getType() == QueryParser.CONJUNCTION:
return all(self._MatchTextField(field, ExtractGlobalEq(child), document)
for child in match.children)
if match.getType() == QueryParser.DISJUNCTION:
return any(self._MatchTextField(field, ExtractGlobalEq(child), document)
for child in match.children)
if match.getType() == QueryParser.NEGATION:
return not self._MatchTextField(
field, ExtractGlobalEq(match.children[0]), document)
return False
def _MatchDateField(self, field, match, operator, document):
"""Check if a date field matches a query tree node."""
return self._MatchComparableField(
field, match, search_util.DeserializeDate, operator, document)
def _MatchNumericField(self, field, match, operator, document):
"""Check if a numeric field matches a query tree node."""
return self._MatchComparableField(field, match, float, operator, document)
def _MatchComparableField(
self, field, match, cast_to_type, op, document):
"""A generic method to test matching for comparable types.
Comparable types are defined to be anything that supports <, >, <=, >=, ==.
For our purposes, this is numbers and dates.
Args:
field: The document_pb.Field to test
match: The query node to match against
cast_to_type: The type to cast the node string values to
op: The query node type representing the type of comparison to perform
document: The document that the field is in
Returns:
True iff the field matches the query.
Raises:
UnsupportedOnDevError: Raised when an unsupported operator is used, or
when the query node is of the wrong type.
ExpressionTreeException: Raised when a != inequality operator is used.
"""
field_val = cast_to_type(field.value().string_value())
if match.getType() == QueryParser.VALUE:
try:
match_val = cast_to_type(query_parser.GetQueryNodeText(match))
except ValueError:
return False
else:
return False
if op == QueryParser.EQ:
return field_val == match_val
if op == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
if op == QueryParser.GT:
return field_val > match_val
if op == QueryParser.GE:
return field_val >= match_val
if op == QueryParser.LESSTHAN:
return field_val < match_val
if op == QueryParser.LE:
return field_val <= match_val
raise search_util.UnsupportedOnDevError(
'Operator %s not supported for numerical fields on development server.'
% match.getText())
def _MatchField(self, field, match, operator, document):
"""Check if a field matches a query tree.
Args:
field_query_node: Either a string containing the name of a field, a query
node whose text is the name of the field, or a document_pb.Field.
match: A query node to match the field with.
operator: The a query node type corresponding to the type of match to
perform (eg QueryParser.EQ, QueryParser.GT, etc).
document: The document to match.
"""
if isinstance(field, (basestring, tree.CommonTree)):
if isinstance(field, tree.CommonTree):
field = query_parser.GetQueryNodeText(field)
fields = search_util.GetAllFieldInDocument(document, field)
return any(self._MatchField(f, match, operator, document) for f in fields)
if field.value().type() in search_util.TEXT_DOCUMENT_FIELD_TYPES:
if operator != QueryParser.EQ:
return False
return self._MatchTextField(field, match, document)
if field.value().type() in search_util.NUMBER_DOCUMENT_FIELD_TYPES:
return self._MatchNumericField(field, match, operator, document)
if field.value().type() == document_pb.FieldValue.DATE:
return self._MatchDateField(field, match, operator, document)
type_name = document_pb.FieldValue.ContentType_Name(
field.value().type()).lower()
raise search_util.UnsupportedOnDevError(
'Matching fields of type %s is unsupported on dev server (searched for '
'field %s)' % (type_name, field.name()))
def _MatchGlobal(self, match, document):
for field in document.field_list():
try:
if self._MatchField(field.name(), match, QueryParser.EQ, document):
return True
except search_util.UnsupportedOnDevError:
pass
return False
def _CheckMatch(self, node, document):
"""Check if a document matches a query tree."""
if node.getType() == QueryParser.CONJUNCTION:
return all(self._CheckMatch(child, document) for child in node.children)
if node.getType() == QueryParser.DISJUNCTION:
return any(self._CheckMatch(child, document) for child in node.children)
if node.getType() == QueryParser.NEGATION:
return not self._CheckMatch(node.children[0], document)
if node.getType() in query_parser.COMPARISON_TYPES:
field, match = node.children
if field.getType() == QueryParser.GLOBAL:
return self._MatchGlobal(match, document)
return self._MatchField(field, match, node.getType(), document)
return False
def Matches(self, document):
try:
return self._CheckMatch(self._query, document)
except search_util.UnsupportedOnDevError, e:
logging.warning(str(e))
return False
def FilterDocuments(self, documents):
return (doc for doc in documents if self.Matches(doc))
|
|
import numpy as np
from random import choice,randrange, shuffle
import time
from datetime import datetime
import json,simplejson
grid_size = 49
iterations = 200
perc_filled_sites = 0.5
'''probability not to copy best strategy'''
r = 0.05
'''probability to cooperate spontaneously (noise 1)'''
q = 0.05
'''Probability to Migrate'''
m = 1
'''probability of expelling'''
s = 1
'''Moore's Distance'''
M = 5
'''nghbs = self.search_for_sites(site,strategies,site_occupation="empty")
o_site = site.copy()
site = nghbs[np.random.randint(len(nghbs))]
#print "new site : %s (strategy : %s)"%(site,strategies[site])
#pay_off = self.pay_off(site,strategies)
chgDic = {'best_pay_off': 0, 'best_site': site, 'o_pay_off': 1.5, 'o_site': o_site }
strategies = self.move(chgDic,strategies)['strategies']
#print "new site : %s (strategy : %s)"%(site,strategies[site])
comparison = self.play_with_all_neighbors(site,strategies)
'''
'''
if plot:
pl.figure(1)
pl.plot(C['x'],C['y'],'-')
pl.xlabel("Monte Carlo Steps (MCS)")
pl.ylabel("proportion of cooperators")
pl.ylim(0,1)
pl.savefig("results/figures/simul%s_grid%s_%s_r%s_q%s_m%s_s%s_M%s.png"%(iterations,grid_size,datetime.strftime(init_timestamp,'%Y%m%d%H%M%S'),r,q,m,s,M))
pl.close()
if save:
J = json.dumps(dic)
f = open("results/json/simul%s_grid%s_%s_r%s_q%s_m%s_s%s_M%s.json"%(iterations,grid_size,datetime.strftime(init_timestamp,'%Y%m%d%H%M%S'),r,q,m,s,M),'wb')
f.write(J)
f.close()
'''
'''#T,R,P,S = (1.0,1.0,0.0,0.0) # cooperators = 0.6
#T,R,P,S = (1.05,1.0,0.0,0.0) # cooperators = 0.1 (found approx. 0.45)
#T,R,P,S = (1.1,1.0,0.0,0.0) # cooperators = 0.0 (found approx. 0.27)
#T,R,P,S = (1.2,1.0,0.0,0.0) # cooperators = 0.0 (found approx. 0.25)
#T,R,P,S = (1.3,1.0,0.0,0.0) # cooperators = 0.0 (found approx. 0.19)
#T,R,P,S = (1.7,1.0,0.0,0.0) # cooperators = 0.0 (found approx. 0.13)
'''
'''
strategies,cooperation,strategies_init = PG.simulate()
grid_init = np.array(strategies_init.values()).reshape([grid_size,grid_size])
print grid_init[0:20,0:20]
print "\n"
grid = np.array(strategies.values()).reshape([grid_size,grid_size])
print grid[0:20,0:20]
'''
def simulate2(self,verbose=0):
init_timestamp = datetime.now()
init_T = time.mktime(init_timestamp.timetuple())
strategies = strategies_init.copy()
strategi = {}
coop = np.array(strategies.values())
empty = len(coop[coop==-1])
C={'x':[],'y':[]}
dic = {'init_timestamp' : init_T,
'input': {
'grid_size' : grid_size,
'iterations' : iterations,
'filled_sites' : perc_filled_sites,
'strategies_init' : strategies_init,
'r':r,'q':q,'m':m,'s':s,'M':M,
}
}
cooperation = []
coop = np.array(strategies.values())
cooperation.append(float(np.sum(coop[coop>0]))/len(coop[coop>=0]))
print cooperation[0]
for i in range(MCS):
if i==range(MCS)[-iterations]:
strategies_step_before = strategies.copy()
'''pick an agent randomly'''
site = choice(strategies.keys())
if strategies[site]==-1:
'''if randomly chosen site is empty, continue'''
#print "empty site %s (%s)"%(site,strategies[site])
try:
cooperation.append(cooperation[-1])
continue
except:
cooperation.append(0)
continue
'''Migration'''
if np.random.rand() < m:
if np.random.rand() < s:
'''Migration to best possible site (property game)'''
chgDic = self.explore_neighborhood(site,strategies,site_occupation="all",forceMove=False,plot=False)
else:
'''Migration to an empty site'''
chgDic = self.explore_neighborhood(site,strategies,site_occupation="empty",forceMove=False,plot=False)
strategies = self.move(chgDic,strategies)['strategies']
site = chgDic['best_site']
comparison = self.play_with_all_neighbors(site,strategies)
else:
'''no movement, compare pay-off with neighbors'''
comparison = self.play_with_all_neighbors(site,strategies)
if not comparison.has_key('best_site'):
continue
'''Update strategy given comparison with neighbors'''
strategies = self.Dirk_update(comparison,strategies,r,q)
coop = np.array(strategies.values())
cooperation.append(float(np.sum(coop[coop>0]))/len(coop[coop>=0]))
n = 50000
if len(np.argwhere(np.array(cooperation[-n:])==cooperation[-1]))==n:
print "frozen situation, stop !\n"
break
if len(coop[coop==1])==0:
print "no cooperator left"
break
if len(coop[coop==0])==0:
print "no defector left"
break
if i%(iterations-1)==0 and np.max(cooperation) < 0.01:
print "lower threshold reached"
break
if i%iterations==0:
C['x'].append(i)
C['y'].append(cooperation[-1])
print "%s (%.2f perc.),%s,cooperation level : %.2f percent"%(i,float(i)/MCS*100,site,cooperation[-1]*100)
#strategi[i]= strategies
cooperation=[]
print "initial configuration: M=%s, r=%s, q=%s, m=%s, s=%s"%(M,r,q,m,s)
print "empty_sites : %s" %len(coop[coop==-1])
print "defectors : %s" %len(coop[coop==0])
print "cooperators : %s"%len(coop[coop==1])
self.crop_grid(0,strategies_init,10)
print "\n"
self.crop_grid(0,strategies,10)
now = datetime.now()
last_T = time.mktime(now.timetuple())
dic['last_timestamp'] = last_T
dic['duration'] = last_T - init_T
dic['output'] = {'filled_sites' : len(coop[coop!=-1]),
'iterations' : i,
'defectors' : len(coop[coop==0]),
'cooperators' : len(coop[coop==1]),
'strategies_final' : strategies,
'strategies_step_before': strategies_step_before,
'cooperation' : C
}
J = json.dumps(dic)
key = bucket.new_key("results/json/simul%s_grid%s_%s_r%s_q%s_m%s_s%s_M%s.json"%(iterations,grid_size,datetime.strftime(init_timestamp,'%Y%m%d%H%M%S'),r,q,m,s,M))
key.set_contents_from_string(J)
return dic
def initVariables(self):
global grid_size
global iterations
global MCS
global perc_filled_sites
global r
global q
global m
global s
global M
global strategies_init
global grid
STRATEGY_SET = {'C':1,'D':0}
grid_size = 49
iterations = 200
MCS = (grid_size**2)*iterations # Monte Carlo Steps
'''Grid Sparsity'''
perc_filled_sites = 0.5
'''probability not to copy best strategy'''
r = 0.05
'''probability to cooperate spontaneously (noise 1)'''
q = 0.05
'''Probability to Migrate'''
m = 1
'''probability of expelling'''
s = 1
'''Moore's Distance'''
M = 5
strategies_init = self.initialize_grid(STRATEGY_SET,size = grid_size, perc_filled_sites = perc_filled_sites)
grid = self.make_grid(strategies_init)
def testExpell(self):
self.initVariables()
global s
for s in [0.5]:
k=0
while k<3:
print "configuration: M=%s, r=%s, q=%s, m=%s, s=%s"%(M,r,q,m,s)
strategies_init = self.initialize_grid(STRATEGY_SET,size = grid_size, perc_filled_sites = perc_filled_sites)
strategies,C,strategies_init = self.simulate2()
k+=1
|
|
from numpy import sign, nonzero, equal, zeros, mean, std
from numpy import array, concatenate, sqrt, diag, matrix
from numpy import log, pi, trace, logspace, log10
from numpy.linalg import det, pinv
from numpy import sum, arange, nan, unique, argsort, isnan, cumsum
from scipy import stats
def confusionMatrix(labels_test, labels_predicted):
"""Compute the matrix of predictions versus labels"""
if len(labels_test) != len(labels_predicted):
return 0
TP = 0; FP = 0; TN = 0; FN = 0
for i in range(0, len(labels_test)):
if labels_test[i] == 0 or labels_predicted[i] == 0:
return 0
if labels_test[i] > 0:
if labels_predicted[i] > 0: TP += 1
else: FN +=1
else:
if labels_predicted[i] > 0: FP += 1
else: TN += 1
return (TP, TN, FP, FN)
def accuracy(output, labels_test):
"""How many correct predictions?"""
if max(labels_test) > 1:
return accuracy_multiclass(output, labels_test)
else:
TP, TN, FP, FN = confusionMatrix(labels_test, sign(output))
return float(TP + TN) / (TP + TN + FP + FN)
def balanced_accuracy(output, labels):
"""How many correct predictions?, normalized by number of positives and negatives"""
assert all(unique(labels)==array([-1,1])), 'Binary classification only'
TP, TN, FP, FN = confusionMatrix(labels, sign(output))
return 0.5*TP/(TP+FN) + 0.5*TN/(FP+TN)
def balanced_accuracy_multitask(output, labels):
"""Balanced accuracy applied to each task separately"""
assert output.shape == labels.shape, 'Predictions and labels have different shape'
num_task = output.shape[0]
balacc = zeros(num_task)
for ix in range(num_task):
lab_idx = nonzero(labels[ix,:])
balacc[ix] = balanced_accuracy(output[ix,lab_idx].flatten(), labels[ix,lab_idx].flatten())
return balacc
def accuracy_multiclass(output, labels_test):
"""Multiclass accuracy"""
int_out = map(int, output)
int_lab = map(int, labels_test)
return 1.0*len(nonzero(equal(int_out, int_lab))[0])/len(output)
def rmse(output, labels):
"""Root mean squared error"""
if output.ndim == 1:
num_ex = len(output)
else:
num_ex = output.shape[1]
return sqrt(diag(matrix(output - labels)*matrix(output-labels).T))/num_ex
def rmse_multitask(output, labels):
"""rmse for each task separately"""
assert output.shape == labels.shape, 'Predictions and labels have different shape'
num_task = output.shape[0]
error = zeros(num_task)
for ix in range(num_task):
lab_idx = nonzero(labels[ix,:])
error[ix] = rmse(output[ix,lab_idx].flatten(), labels[ix,lab_idx].flatten())
return error
def differential_entropy(K):
"""The differential entropy for a multivariate normal distribution.
Assume K is a positive semidefinite matrix."""
d = K.shape[0]
return (d/2)*(1+log(2*pi)) + 0.5*log(det(K))
def relative_entropy(K):
"""The relative entropy for a multivariate normal distribution,
compared to another Gaussian with the same mean and identity covariance.
"""
d = K.shape[0]
return 0.5*(log(det(pinv(K))) + trace(K) - d)
def trapz(x, y, ylim0, ylim1):
"""Trapezoidal rule for integrating
the curve defined by x-y pairs.
Assume x is in the range [0,1]
and y is constant to the ends of the interval
"""
assert len(x) == len(y), 'x and y need to be of same length'
x = concatenate([x, array([0.0, 1.0])])
y = concatenate([y, array([ylim0, ylim1])])
sort_idx = argsort(x)
sx = x[sort_idx]
sy = y[sort_idx]
area = 0.0
for ix in range(len(x)-1):
area += 0.5*(sx[ix+1]-sx[ix])*(sy[ix+1]+sy[ix])
return area
def stats_empirical(output, labels, interp=1000):
"""Compute some statistics for binary predictions.
tpr - true positive rate (recall)
fpr - false positive rate
ppv - positive predictive value (precision)
auc - area under the ROC curve
ap - area under the precision-recall curve (average precision)
If there are more than interp=1000 number of examples, then compute in logspace intervals
"""
assert len(output)==len(labels), 'Predictions and labels have different lengths'
assert all(unique(labels)==array([-1,1])), 'Labels are not binary {-1,+1}'
# Sort true labels according to predictions in ascending order
n = len(output)
sort_idx = argsort(output)
sorted_labels = labels[sort_idx]
tpr = []
fpr = []
ppv = []
if n > interp:
thresholds = list(range(100))+list(map(int, logspace(2, log10(n), interp).round()))
thresholds = (n-array(thresholds))[::-1]
else:
thresholds = range(n+1)
for thres in thresholds:
tp = 1.0*sum(sorted_labels[thres:]>0)
fn = 1.0*sum(sorted_labels[:thres]>0)
tn = 1.0*sum(sorted_labels[:thres]<0)
fp = 1.0*sum(sorted_labels[thres:]<0)
if tp+fn > 0.0:
tpr.append(tp/(tp+fn))
else:
tpr.append(nan)
if fp+tn > 0.0:
fpr.append(fp/(fp+tn))
else:
fpr.append(nan)
if tp+fp > 0.0:
ppv.append(tp/(tp+fp))
else:
ppv.append(nan)
tpr = array(tpr)
fpr = array(fpr)
ppv = array(ppv)
auc = trapz(fpr, tpr, 0.0, 1.0)
idx = -isnan(ppv)
apr = trapz(tpr[idx], ppv[idx], 1.0, 0.0)
return tpr, fpr, ppv, auc, apr
def stats_binormal(output, labels, step=0.001):
"""Compute some statistics for binary predictions.
tpr - true positive rate (recall)
fpr - false positive rate
ppv - positive predictive value (precision)
auc - area under the ROC curve
ap - area under the precision-recall curve (average precision)
Use the binormal assumption.
step gives the smoothness of curve.
"""
assert len(output)==len(labels), 'Predictions and labels have different lengths'
assert all(unique(labels)==array([-1,1])), 'Labels are not binary {-1,+1}'
# Estimate the binormal parameters
pos = output[labels>0]
neg = output[labels<0]
mu_pos = mean(pos)
mu_neg = mean(neg)
std_pos = std(pos)
std_neg = std(neg)
alpha = 1.0*len(pos)/len(output)
# Sanity checks
assert mu_pos > mu_neg, 'positive Gaussian is not to the right of negative'
assert (std_pos>0) and (std_neg>0), 'Variance is zero'
# Use Gaussian cdf to estimate scores
thres = arange(mu_neg-5.0*std_neg, mu_pos+5*std_pos, step)
tp = alpha*(1.0-stats.norm.cdf(thres, mu_pos, std_pos))
fp = (1.0-alpha)*(1.0-stats.norm.cdf(thres, mu_neg, std_neg))
fn = alpha*stats.norm.cdf(thres, mu_pos, std_pos)
tn = (1.0-alpha)*stats.norm.cdf(thres, mu_neg, std_neg)
tpr = tp/(tp+fn)
fpr = fp/(fp+tn)
ppv = tp/(tp+fp)
A = (mu_pos-mu_neg)/std_pos
B = std_neg/std_pos
auc = stats.norm.cdf(A/sqrt(1+B*B))
apr = trapz(tpr, ppv, 1.0, 0.0)
return tpr, fpr, ppv, auc, apr
def stats_binormal_multitask(output, labels):
"""stats_binormal applied to each row"""
assert output.shape == labels.shape, 'Predictions and labels have different shape'
num_task = output.shape[0]
tpr = []
fpr = []
ppv = []
auc = zeros(num_task)
apr = zeros(num_task)
for ix in range(num_task):
lab_idx = nonzero(labels[ix,:])
ctpr, cfpr, cppv, auc[ix], apr[ix] = stats_binormal(output[ix,lab_idx].flatten(),
labels[ix,lab_idx].flatten())
tpr.append(ctpr)
fpr.append(cfpr)
ppv.append(cppv)
return tpr, fpr, ppv, auc, apr
def auc(output, labels):
"""The area under the ROC curve,
estimated using the binormal approximation
"""
tpr, fpr, ppv, auc, apr = stats_empirical(output, labels)
return auc
def r2(output, labels):
"""The squared correlation coefficient"""
mu = mean(output)
numerator = sum((labels-output)*(labels-output))
denominator = sum((labels-mu)*(labels-mu))
return 1.-(numerator/denominator)
def spearman(output, labels):
"""Spearman's correlation coefficient (rho)"""
output_rank = score2rank(output)
labels_rank = score2rank(labels)
rho, pval = stats.pearsonr(output_rank, labels_rank)
return rho
def score2rank(orig_scores):
"""Convert an array of scores into an array of normalised ranks,
such that the highest score has the highest rank (1/num_ex)."""
scores = orig_scores.copy()
idx_sort = argsort(scores)[::-1]
unsort = argsort(idx_sort)
scores = scores[idx_sort]
ranks = infer_ranks(scores)
assert(len(ranks) == len(scores))
ranks = ranks/(len(scores)+1.0)
ranks = ranks[unsort]
return ranks
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
#
import re
import subprocess
from driver_tools import AddHostBinarySearchPath, DefaultOutputName, \
DriverChain, GetArch, ParseArgs, ParseTriple, Run, RunDriver, RunWithEnv, \
TempNameGen, UnrecognizedOption
from driver_env import env
from driver_log import DriverOpen, Log
import filetype
import pathtools
EXTRA_ENV = {
'ALLOW_TRANSLATE': '0', # Allow bitcode translation before linking.
# It doesn't normally make sense to do this.
'ALLOW_NATIVE' : '0', # Allow native objects (.S,.s,.o) to be in the
# linker line for .pexe generation.
# It doesn't normally make sense to do this.
# CXX_EH_MODE specifies how to deal with C++ exception handling:
# * 'none': Strips out use of C++ exception handling.
# * 'sjlj': Enables the setjmp()+longjmp()-based implementation of
# C++ exception handling. This is supported in PNaCl's stable
# ABI.
# * 'zerocost': Enables the zero-cost implementation of C++
# exception handling. This is not supported in PNaCl's stable
# ABI.
'CXX_EH_MODE': 'none',
'FORCE_INTERMEDIATE_LL': '0',
# Produce an intermediate .ll file
# Useful for debugging.
# NOTE: potentially different code paths and bugs
# might be triggered by this
'LANGUAGE' : '', # C or CXX (set by SetTool)
'INCLUDE_CXX_HEADERS': '0', # This is set by RunCC.
# Command-line options
'GCC_MODE' : '', # '' (default), '-E', '-c', or '-S'
'STDINC' : '1', # Include standard headers (-nostdinc sets to 0)
'STDINCCXX' : '1', # Include standard cxx headers (-nostdinc++ sets to 0)
'USE_STDLIB' : '1', # Include standard libraries (-nostdlib sets to 0)
'STDLIB' : '', # C++ Standard Library.
'STDLIB_TRUNC': '', # C++ Standard Library, truncated to pass as -lXXX.
'STDLIB_IDIR' : '', # C++ Standard Library include directory.
# Note: the above C++ Standard Library
# settings use a default if their value
# remains uset.
'DEFAULTLIBS' : '1', # Link with default libraries
'DIAGNOSTIC' : '0', # Diagnostic flag detected
'PIC' : '0', # Generate PIC
# TODO(robertm): Switch the default to 1
'NO_ASM' : '0', # Disallow use of inline assembler
'NEED_DASH_E' : '0', # Used for stdin inputs, which must have an explicit
# type set (using -x) unless -E is specified.
'VERBOSE' : '0', # Verbose (-v)
'SHOW_VERSION': '0', # Version (--version)
'PTHREAD' : '0', # use pthreads?
'INPUTS' : '', # Input files
'OUTPUT' : '', # Output file
'UNMATCHED' : '', # Unrecognized parameters
'BIAS_NONE' : '',
'BIAS_ARM' : '-D__arm__ -D__ARM_ARCH_7A__ -D__ARMEL__',
'BIAS_MIPS32' : '-D__MIPS__ -D__mips__ -D__MIPSEL__',
'BIAS_X8632' : '-D__i386__ -D__i386 -D__i686 -D__i686__ -D__pentium4__',
'BIAS_X8664' : '-D__amd64__ -D__amd64 -D__x86_64__ -D__x86_64 -D__core2__',
'BIAS_ARM_NONSFI': '${BIAS_ARM} -D__native_client_nonsfi__',
'BIAS_X8632_NONSFI': '${BIAS_X8632} -D__native_client_nonsfi__',
'FRONTEND_TRIPLE' : 'le32-unknown-nacl',
'OPT_LEVEL' : '', # Default for most tools is 0, but we need to know
# if it's explicitly set or not when the driver
# is only used for linking + translating.
'CC_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0} ' +
'-fno-vectorize -fno-slp-vectorize ' +
'-fno-common ${PTHREAD ? -pthread} ' +
'-nostdinc ${BIAS_%BIAS%} ' +
# BUG: http://code.google.com/p/nativeclient/issues/detail?id=2345
# it would be better to detect asm use inside clang
# as some uses of asm are borderline legit, e.g.
# <prototype> asm("<function-name>");
'${NO_ASM ? -Dasm=ASM_FORBIDDEN -D__asm__=ASM_FORBIDDEN} ' +
'-target ${FRONTEND_TRIPLE}',
'ISYSTEM' : '${ISYSTEM_USER} ${STDINC ? ${ISYSTEM_BUILTIN}}',
'ISYSTEM_USER' : '', # System include directories specified by
# using the -isystem flag.
'ISYSTEM_BUILTIN':
'${BASE_USR}/local/include ' +
'${ISYSTEM_CLANG} ' +
'${ISYSTEM_CXX} ' +
'${BASE_USR}/include ' +
'${BASE_SDK}/include ',
'ISYSTEM_CLANG' : '${BASE_LLVM}/lib/clang/3.4/include',
'ISYSTEM_CXX' :
'${INCLUDE_CXX_HEADERS && STDINCCXX ? ${ISYSTEM_CXX_include_paths}}',
'ISYSTEM_CXX_include_paths' :
'${BASE_USR}/include/c++/${STDLIB_IDIR} ' +
'${BASE_USR}/include/c++/${STDLIB_IDIR}/arm-none-linux-gnueabi ' +
'${BASE_USR}/include/c++/${STDLIB_IDIR}/backward',
# Only propagate opt level to linker if explicitly set, so that the
# linker will know if an opt level was explicitly set or not.
'LD_FLAGS' : '${#OPT_LEVEL ? -O${OPT_LEVEL}} -static ' +
'${PIC ? -fPIC} ${@AddPrefix:-L:SEARCH_DIRS} ' +
'--pnacl-exceptions=${CXX_EH_MODE}',
'SEARCH_DIRS' : '', # Directories specified using -L
# Library Strings
'EMITMODE' : '${!USE_STDLIB ? nostdlib : static}',
# This is setup so that LD_ARGS_xxx is evaluated lazily.
'LD_ARGS' : '${LD_ARGS_%EMITMODE%}',
# ${ld_inputs} signifies where to place the objects and libraries
# provided on the command-line.
'LD_ARGS_nostdlib': '-nostdlib ${ld_inputs}',
'LD_ARGS_static':
'${CXX_EH_MODE==zerocost ? -l:crt1_for_eh.x : -l:crt1.x} ' +
'-l:crti.bc -l:crtbegin.bc '
'${CXX_EH_MODE==sjlj ? -l:sjlj_eh_redirect.bc : '
'${CXX_EH_MODE==none ? -l:unwind_stubs.bc}} ' +
'${ld_inputs} ' +
'--start-group ${STDLIBS} --end-group',
'LLVM_PASSES_TO_DISABLE': '',
# Flags for translating to native .o files.
'TRANSLATE_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0}',
'STDLIBS' : '${DEFAULTLIBS ? '
'${LIBSTDCPP} ${LIBPTHREAD} ${LIBNACL} ${LIBC} ${LIBPNACLMM}}',
'LIBSTDCPP' : '${IS_CXX ? -l${STDLIB_TRUNC} -lm }',
'LIBC' : '-lc',
'LIBNACL' : '-lnacl',
'LIBPNACLMM': '-lpnaclmm',
# Enabled/disabled by -pthreads
'LIBPTHREAD': '${PTHREAD ? -lpthread}',
# IS_CXX is set by pnacl-clang and pnacl-clang++ programmatically
'CC' : '${IS_CXX ? ${CLANGXX} : ${CLANG}}',
'RUN_CC': '${CC} -emit-llvm ${mode} ${CC_FLAGS} ' +
'${@AddPrefix:-isystem :ISYSTEM} ' +
'-x${typespec} "${infile}" -o ${output}',
}
def AddLLVMPassDisableFlag(*args):
env.append('LLVM_PASSES_TO_DISABLE', *args)
env.append('LD_FLAGS', *args)
def AddLDFlag(*args):
env.append('LD_FLAGS', *args)
def AddTranslatorFlag(*args):
# pass translator args to ld in case we go all the way to .nexe
env.append('LD_FLAGS', *['-Wt,' + a for a in args])
# pass translator args to translator in case we go to .o
env.append('TRANSLATE_FLAGS', *args)
def AddCCFlag(*args):
env.append('CC_FLAGS', *args)
def AddDiagnosticFlag(*args):
env.append('CC_FLAGS', *args)
env.set('DIAGNOSTIC', '1')
def SetTarget(*args):
arch = ParseTriple(args[0])
env.set('FRONTEND_TRIPLE', args[0])
AddLDFlag('--target=' + args[0])
def SetStdLib(*args):
"""Set the C++ Standard Library."""
lib = args[0]
assert lib == 'libc++' or lib == 'libstdc++', (
'Invalid C++ standard library: -stdlib=%s' % lib)
env.set('STDLIB', lib)
env.set('STDLIB_TRUNC', lib[3:])
if lib == 'libc++':
env.set('STDLIB_IDIR', 'v1')
if env.getbool('IS_CXX'):
# libc++ depends on pthread for C++11 features as well as some
# exception handling (which may get removed later by the PNaCl ABI
# simplification) and initialize-once.
env.set('PTHREAD', '1')
elif lib == 'libstdc++':
env.set('STDLIB_IDIR', '4.6.2')
def IsPortable():
return env.getone('FRONTEND_TRIPLE').startswith('le32-')
stdin_count = 0
def AddInputFileStdin():
global stdin_count
# When stdin is an input, -x or -E must be given.
forced_type = filetype.GetForcedFileType()
if not forced_type:
# Only allowed if -E is specified.
forced_type = 'c'
env.set('NEED_DASH_E', '1')
stdin_name = '__stdin%d__' % stdin_count
env.append('INPUTS', stdin_name)
filetype.ForceFileType(stdin_name, forced_type)
stdin_count += 1
def IsStdinInput(f):
return f.startswith('__stdin') and f.endswith('__')
def HandleDashX(arg):
if arg == 'none':
filetype.SetForcedFileType(None)
return
filetype.SetForcedFileType(filetype.GCCTypeToFileType(arg))
def AddVersionFlag(*args):
env.set('SHOW_VERSION', '1')
AddDiagnosticFlag(*args)
def AddBPrefix(prefix):
""" Add a path to the list searched for host binaries and include dirs. """
AddHostBinarySearchPath(prefix)
prefix = pathtools.normalize(prefix)
if pathtools.isdir(prefix) and not prefix.endswith('/'):
prefix += '/'
# Add prefix/ to the library search dir if it exists
if pathtools.isdir(prefix):
env.append('SEARCH_DIRS', prefix)
# Add prefix/include to isystem if it exists
include_dir = prefix + 'include'
if pathtools.isdir(include_dir):
env.append('ISYSTEM_USER', include_dir)
CustomPatterns = [
( '--driver=(.+)', "env.set('CC', pathtools.normalize($0))\n"),
( '--pnacl-allow-native', "env.set('ALLOW_NATIVE', '1')"),
( '--pnacl-allow-translate', "env.set('ALLOW_TRANSLATE', '1')"),
( '--pnacl-frontend-triple=(.+)', SetTarget),
( ('-target','(.+)'), SetTarget),
( ('--target=(.+)'), SetTarget),
( '--pnacl-exceptions=(none|sjlj|zerocost)', "env.set('CXX_EH_MODE', $0)"),
# TODO(mseaborn): Remove "--pnacl-allow-exceptions", which is
# superseded by "--pnacl-exceptions".
( '--pnacl-allow-exceptions', "env.set('CXX_EH_MODE', 'zerocost')"),
( '(--pnacl-allow-nexe-build-id)', AddLDFlag),
( '(--pnacl-disable-abi-check)', AddLDFlag),
( '(--pnacl-disable-pass=.+)', AddLLVMPassDisableFlag),
( '(--pnacl-allow-dev-intrinsics)', AddLDFlag),
]
GCCPatterns = [
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-E', "env.set('GCC_MODE', '-E')"),
( '-S', "env.set('GCC_MODE', '-S')"),
( '-c', "env.set('GCC_MODE', '-c')"),
( '-allow-asm', "env.set('NO_ASM', '0')"),
( '-nostdinc', "env.set('STDINC', '0')"),
( '-nostdinc\+\+', "env.set('STDINCCXX', '0')"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-nodefaultlibs', "env.set('DEFAULTLIBS', '0')"),
( '-?-stdlib=(.*)', SetStdLib),
( ('-?-stdlib', '(.*)'), SetStdLib),
# Flags to pass to native linker
( '(-Wn,.*)', AddLDFlag),
( '-rdynamic', "env.append('LD_FLAGS', '-export-dynamic')"),
# Flags to pass to pnacl-translate
( '-Wt,(.*)', AddTranslatorFlag),
( ('-Xtranslator','(.*)'), AddTranslatorFlag),
# We don't care about -fPIC, but pnacl-ld and pnacl-translate do.
( '-fPIC', "env.set('PIC', '1')"),
# We must include -l, -Xlinker, and -Wl options into the INPUTS
# in the order they appeared. This is the exactly behavior of gcc.
# For example: gcc foo.c -Wl,--start-group -lx -ly -Wl,--end-group
#
( '(-l.+)', "env.append('INPUTS', $0)"),
( ('(-l)','(.+)'), "env.append('INPUTS', $0+$1)"),
( ('-Xlinker','(.*)'), "env.append('INPUTS', '-Xlinker=' + $0)"),
( '(-Wl,.*)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
( '-O([sz])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-3])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-9]+)', "env.set('OPT_LEVEL', '3')\n"),
( '-O', "env.set('OPT_LEVEL', '1')\n"),
( ('-isystem', '(.*)'),
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( '-isystem(.+)',
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( ('-I', '(.+)'), "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
( '-I(.+)', "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
# NOTE: the -iquote =DIR syntax (substitute = with sysroot) doesn't work.
# Clang just says: ignoring nonexistent directory "=DIR"
( ('-iquote', '(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-iquote(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-idirafter', '(.+)'),
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( '-idirafter(.+)',
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( ('(-include)','(.+)'), AddCCFlag),
( ('(-include.+)'), AddCCFlag),
( '(-g)', AddCCFlag),
( '(-W.*)', AddCCFlag),
( '(-w)', AddCCFlag),
( '(-std=.*)', AddCCFlag),
( '(-ansi)', AddCCFlag),
( ('(-D)','(.*)'), AddCCFlag),
( '(-D.+)', AddCCFlag),
( ('(-U)','(.*)'), AddCCFlag),
( '(-U.+)', AddCCFlag),
( '(-f.*)', AddCCFlag),
( '(-pedantic)', AddCCFlag),
( '(-pedantic-errors)', AddCCFlag),
( '(-g.*)', AddCCFlag),
( '(-v|--v)', "env.append('CC_FLAGS', $0)\n"
"env.set('VERBOSE', '1')"),
( '(-pthreads?)', "env.set('PTHREAD', '1')"),
# No-op: accepted for compatibility in case build scripts pass it.
( '-static', ""),
( ('-B','(.*)'), AddBPrefix),
( ('-B(.+)'), AddBPrefix),
( ('-L','(.+)'), "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '-L(.+)', "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '(-Wp,.*)', AddCCFlag),
( '(-Xpreprocessor .*)', AddCCFlag),
( ('(-Xclang)', '(.*)'), AddCCFlag),
# Accept and ignore default flags
( '-m32', ""),
( '-emit-llvm', ""),
( '(-MG)', AddCCFlag),
( '(-MMD)', AddCCFlag),
( '(-MM?)', "env.append('CC_FLAGS', $0)\n"
"env.set('GCC_MODE', '-E')"),
( '(-MP)', AddCCFlag),
( ('(-MQ)','(.*)'), AddCCFlag),
( '(-MD)', AddCCFlag),
( ('(-MT)','(.*)'), AddCCFlag),
( ('(-MF)','(.*)'), "env.append('CC_FLAGS', $0, pathtools.normalize($1))"),
( ('-x', '(.+)'), HandleDashX),
( '-x(.+)', HandleDashX),
( ('(-mllvm)', '(.+)'), AddCCFlag),
# Ignore these gcc flags
( '(-msse)', ""),
( '(-march=armv7-a)', ""),
( '(-pipe)', ""),
( '(-s)', AddLDFlag),
( '(--strip-all)', AddLDFlag),
( '(--strip-debug)', AddLDFlag),
# Ignore these assembler flags
( '(-Qy)', ""),
( ('(--traditional-format)', '.*'), ""),
( '(-gstabs)', ""),
( '(--gstabs)', ""),
( '(-gdwarf2)', ""),
( '(--gdwarf2)', ""),
( '(--fatal-warnings)', ""),
( '(-meabi=.*)', ""),
( '(-mfpu=.*)', ""),
( '(-mfloat-abi=.+)', AddCCFlag),
# GCC diagnostic mode triggers
( '(-print-.*)', AddDiagnosticFlag),
( '(--print.*)', AddDiagnosticFlag),
( '(-dumpspecs)', AddDiagnosticFlag),
( '(--version)', AddVersionFlag),
# These are preprocessor flags which should be passed to the frontend, but
# should not prevent the usual -i flags (which DIAGNOSTIC mode does)
( '(-d[DIMNU])', AddCCFlag),
( '(-d.*)', AddDiagnosticFlag),
# Catch all other command-line arguments
( '(-.+)', "env.append('UNMATCHED', $0)"),
# Standard input
( '-', AddInputFileStdin),
# Input Files
# Call ForceFileType for all input files at the time they are
# parsed on the command-line. This ensures that the gcc "-x"
# setting is correctly applied.
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))\n"
"filetype.ForceFileType(pathtools.normalize($0))"),
]
def CheckSetup():
if not env.has('IS_CXX'):
Log.Fatal('"pnacl-driver" cannot be used directly. '
'Use pnacl-clang or pnacl-clang++.')
def DriverOutputTypes(driver_flag, compiling_to_native):
output_type_map = {
('-E', False) : 'pp',
('-E', True) : 'pp',
('-c', False) : 'po',
('-c', True) : 'o',
('-S', False) : 'll',
('-S', True) : 's',
('', False) : 'pexe',
('', True) : 'nexe',
}
return output_type_map[(driver_flag, compiling_to_native)]
def ReadDriverRevision():
rev_file = env.getone('DRIVER_REV_FILE')
# Might be an SVN version or a GIT hash (depending on the NaCl src client)
nacl_ver = DriverOpen(rev_file, 'rb').readlines()[0]
m = re.search(r'\[SVN\].*/native_client:\s*(\d+)', nacl_ver)
if m:
return m.group(1)
m = re.search(r'\[GIT\].*/native_client.git:\s*(\w+)', nacl_ver)
if m:
return m.group(1)
# fail-fast: if the REV file exists but regex search failed,
# we need to fix the regex to get nacl-version.
if not m:
Log.Fatal('Failed to parse REV file to get nacl-version.')
def main(argv):
env.update(EXTRA_ENV)
CheckSetup()
ParseArgs(argv, CustomPatterns + GCCPatterns)
# "configure", especially when run as part of a toolchain bootstrap
# process, will invoke gcc with various diagnostic options and
# parse the output. In these cases we do not alter the incoming
# commandline. It is also important to not emit spurious messages.
if env.getbool('DIAGNOSTIC'):
if env.getbool('SHOW_VERSION'):
code, stdout, stderr = Run(env.get('CC') + env.get('CC_FLAGS'),
redirect_stdout=subprocess.PIPE)
out = stdout.split('\n')
nacl_version = ReadDriverRevision()
out[0] += ' nacl-version=%s' % nacl_version
stdout = '\n'.join(out)
print stdout,
else:
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
unmatched = env.get('UNMATCHED')
if len(unmatched) > 0:
UnrecognizedOption(*unmatched)
# If -arch was given, we are compiling directly to native code
compiling_to_native = GetArch() is not None
if env.getbool('ALLOW_NATIVE') and not compiling_to_native:
Log.Fatal("--pnacl-allow-native without -arch is not meaningful.")
if not env.get('STDLIB'):
# Default C++ Standard Library.
SetStdLib('libc++')
inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if len(inputs) == 0:
if env.getbool('VERBOSE'):
# -v can be invoked without any inputs. Runs the original
# command without modifying the commandline for this case.
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
else:
Log.Fatal('No input files')
gcc_mode = env.getone('GCC_MODE')
output_type = DriverOutputTypes(gcc_mode, compiling_to_native)
needs_linking = (gcc_mode == '')
if env.getbool('NEED_DASH_E') and gcc_mode != '-E':
Log.Fatal("-E or -x required when input is from stdin")
# There are multiple input files and no linking is being done.
# There will be multiple outputs. Handle this case separately.
if not needs_linking:
# Filter out flags
inputs = [f for f in inputs if not IsFlag(f)]
if output != '' and len(inputs) > 1:
Log.Fatal('Cannot have -o with -c, -S, or -E and multiple inputs: %s',
repr(inputs))
for f in inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if not filetype.IsSourceType(intype):
if ((output_type == 'pp' and intype != 'S') or
(output_type == 'll') or
(output_type == 'po' and intype != 'll') or
(output_type == 's' and intype not in ('ll','po','S')) or
(output_type == 'o' and intype not in ('ll','po','S','s'))):
Log.Fatal("%s: Unexpected type of file for '%s'",
pathtools.touser(f), gcc_mode)
if output == '':
f_output = DefaultOutputName(f, output_type)
else:
f_output = output
namegen = TempNameGen([f], f_output)
CompileOne(f, output_type, namegen, f_output)
return 0
# Linking case
assert(needs_linking)
assert(output_type in ('pso','so','pexe','nexe'))
if output == '':
output = pathtools.normalize('a.out')
namegen = TempNameGen(inputs, output)
# Compile all source files (c/c++/ll) to .po
for i in xrange(0, len(inputs)):
if IsFlag(inputs[i]):
continue
intype = filetype.FileType(inputs[i])
if filetype.IsSourceType(intype) or intype == 'll':
inputs[i] = CompileOne(inputs[i], 'po', namegen)
# Compile all .s/.S to .o
if env.getbool('ALLOW_NATIVE'):
for i in xrange(0, len(inputs)):
if IsFlag(inputs[i]):
continue
intype = filetype.FileType(inputs[i])
if intype in ('s','S'):
inputs[i] = CompileOne(inputs[i], 'o', namegen)
# We should only be left with .po and .o and libraries
for f in inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if intype in ('o','s','S') or filetype.IsNativeArchive(f):
if not env.getbool('ALLOW_NATIVE'):
Log.Fatal('%s: Native object files not allowed in link. '
'Use --pnacl-allow-native to override.', pathtools.touser(f))
assert(intype in ('po','o','so','ldscript') or filetype.IsArchive(f))
# Fix the user-specified linker arguments
ld_inputs = []
for f in inputs:
if f.startswith('-Xlinker='):
ld_inputs.append(f[len('-Xlinker='):])
elif f.startswith('-Wl,'):
ld_inputs += f[len('-Wl,'):].split(',')
else:
ld_inputs.append(f)
if env.getbool('ALLOW_NATIVE'):
ld_inputs.append('--pnacl-allow-native')
# Invoke the linker
env.set('ld_inputs', *ld_inputs)
ld_args = env.get('LD_ARGS')
ld_flags = env.get('LD_FLAGS')
RunDriver('ld', ld_flags + ld_args + ['-o', output])
return 0
def IsFlag(f):
return f.startswith('-')
def CompileOne(infile, output_type, namegen, output = None):
if output is None:
output = namegen.TempNameForInput(infile, output_type)
chain = DriverChain(infile, output, namegen)
SetupChain(chain, filetype.FileType(infile), output_type)
chain.run()
return output
def RunCC(infile, output, mode):
intype = filetype.FileType(infile)
typespec = filetype.FileTypeToGCCType(intype)
include_cxx_headers = (env.get('LANGUAGE') == 'CXX') or (intype == 'c++')
env.setbool('INCLUDE_CXX_HEADERS', include_cxx_headers)
if IsStdinInput(infile):
infile = '-'
RunWithEnv("${RUN_CC}", infile=infile, output=output,
mode=mode,
typespec=typespec)
def RunLLVMAS(infile, output):
if IsStdinInput(infile):
infile = '-'
# This is a bitcode only step - so get rid of "-arch xxx" which
# might be inherited from the current invocation
RunDriver('as', [infile, '-o', output], suppress_inherited_arch_args=True)
def RunNativeAS(infile, output):
if IsStdinInput(infile):
infile = '-'
RunDriver('as', [infile, '-o', output])
def RunTranslate(infile, output, mode):
if not env.getbool('ALLOW_TRANSLATE'):
Log.Fatal('%s: Trying to convert bitcode to an object file before '
'bitcode linking. This is supposed to wait until '
'translation. Use --pnacl-allow-translate to override.',
pathtools.touser(infile))
args = env.get('TRANSLATE_FLAGS') + [mode, '--allow-llvm-bitcode-input',
infile, '-o', output]
if env.getbool('PIC'):
args += ['-fPIC']
RunDriver('translate', args)
def RunOpt(infile, outfile, pass_list):
filtered_list = [pass_option for pass_option in pass_list
if pass_option not in env.get('LLVM_PASSES_TO_DISABLE')]
RunDriver('opt', filtered_list + [infile, '-o', outfile])
def SetupChain(chain, input_type, output_type):
assert(output_type in ('pp','ll','po','s','o'))
cur_type = input_type
# source file -> pp
if filetype.IsSourceType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# source file -> ll
if (filetype.IsSourceType(cur_type) and
(env.getbool('FORCE_INTERMEDIATE_LL') or output_type == 'll')):
chain.add(RunCC, 'll', mode='-S')
cur_type = 'll'
if cur_type == output_type:
return
# ll -> po
if cur_type == 'll':
chain.add(RunLLVMAS, 'po')
cur_type = 'po'
if cur_type == output_type:
return
# source file -> po (we also force native output to go through this phase
if filetype.IsSourceType(cur_type) and output_type in ('po', 'o', 's'):
chain.add(RunCC, 'po', mode='-c')
cur_type = 'po'
if cur_type == output_type:
return
# po -> o
if (cur_type == 'po' and output_type == 'o'):
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 'o', mode='-c')
cur_type = 'o'
if cur_type == output_type:
return
# po -> s
if cur_type == 'po':
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 's', mode='-S')
cur_type = 's'
if cur_type == output_type:
return
# S -> s
if cur_type == 'S':
chain.add(RunCC, 's', mode='-E')
cur_type = 's'
if output_type == 'pp':
return
if cur_type == output_type:
return
# s -> o
if cur_type == 's' and output_type == 'o':
chain.add(RunNativeAS, 'o')
cur_type = 'o'
if cur_type == output_type:
return
Log.Fatal("Unable to compile .%s to .%s", input_type, output_type)
def get_help(argv):
tool = env.getone('SCRIPT_NAME')
if '--help-full' in argv:
# To get ${CC}, etc.
env.update(EXTRA_ENV)
code, stdout, stderr = Run('"${CC}" -help',
redirect_stdout=subprocess.PIPE,
redirect_stderr=subprocess.STDOUT,
errexit=False)
return stdout
else:
return """
This is a "GCC-compatible" driver using clang under the hood.
Usage: %s [options] <inputs> ...
BASIC OPTIONS:
-o <file> Output to <file>.
-E Only run the preprocessor.
-S Generate bitcode assembly.
-c Generate bitcode object.
-I <dir> Add header search path.
-L <dir> Add library search path.
-D<key>[=<val>] Add definition for the preprocessor.
-W<id> Toggle warning <id>.
-f<feature> Enable <feature>.
-Wl,<arg> Pass <arg> to the linker.
-Xlinker <arg> Pass <arg> to the linker.
-Wt,<arg> Pass <arg> to the translator.
-Xtranslator <arg> Pass <arg> to the translator.
-Wp,<arg> Pass <arg> to the preprocessor.
-Xpreprocessor,<arg> Pass <arg> to the preprocessor.
-x <language> Treat subsequent input files as having type <language>.
-static Produce a static executable (the default).
-Bstatic Link subsequent libraries statically.
-Bdynamic Link subsequent libraries dynamically.
-fPIC Ignored (only used by translator backend)
(accepted for compatibility).
-pipe Ignored (for compatibility).
-O<n> Optimation level <n>: 0, 1, 2, 3, 4 or s.
-g Generate complete debug information.
-gline-tables-only Generate debug line-information only
(allowing for stack traces).
-flimit-debug-info Generate limited debug information.
-save-temps Keep intermediate compilation results.
-v Verbose output / show commands.
-h | --help Show this help.
--help-full Show underlying clang driver's help message
(warning: not all options supported).
""" % (tool)
|
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
import time
import pytest
from kmip.core import enums
from kmip.core.factories import attributes as attribute_factory
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects
@pytest.mark.usefixtures("simple")
class TestProxyKmipClientIntegration(testtools.TestCase):
def setUp(self):
super(TestProxyKmipClientIntegration, self).setUp()
self.object_factory = factory.ObjectFactory()
self.attribute_factory = attribute_factory.AttributeFactory()
def tearDown(self):
super(TestProxyKmipClientIntegration, self).tearDown()
uuids = self.client.locate()
for uuid in uuids:
self.client.destroy(uid=uuid)
def test_symmetric_key_create_get_destroy(self):
"""
Test that the ProxyKmipClient can create, retrieve, and destroy a
symmetric key.
"""
uid = self.client.create(enums.CryptographicAlgorithm.AES, 256)
self.assertIsInstance(uid, six.string_types)
try:
key = self.client.get(uid)
self.assertIsInstance(key, objects.SymmetricKey)
self.assertEqual(
key.cryptographic_algorithm,
enums.CryptographicAlgorithm.AES)
self.assertEqual(key.cryptographic_length, 256)
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_create_get_wrapped_destroy(self):
"""
Test that the ProxyKmipClient can create keys, retrieve a wrapped key,
and then destroy the keys for cleanup.
"""
key_id = self.client.create(enums.CryptographicAlgorithm.AES, 256)
wrapping_id = self.client.create(
enums.CryptographicAlgorithm.AES,
256,
cryptographic_usage_mask=[
enums.CryptographicUsageMask.WRAP_KEY,
enums.CryptographicUsageMask.UNWRAP_KEY,
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
self.client.activate(wrapping_id)
unwrapped_key = self.client.get(key_id)
wrapped_key = self.client.get(
key_id,
key_wrapping_specification={
'wrapping_method': enums.WrappingMethod.ENCRYPT,
'encryption_key_information': {
'unique_identifier': wrapping_id,
'cryptographic_parameters': {
'block_cipher_mode':
enums.BlockCipherMode.NIST_KEY_WRAP
}
},
'encoding_option': enums.EncodingOption.NO_ENCODING
}
)
self.assertNotEqual(unwrapped_key.value, wrapped_key.value)
self.client.revoke(
enums.RevocationReasonCode.CESSATION_OF_OPERATION,
wrapping_id
)
self.client.destroy(key_id)
self.client.destroy(wrapping_id)
def test_symmetric_key_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
symmetric key.
"""
# Key encoding obtained from Section 14.2 of the KMIP 1.1 test
# documentation.
key = objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
128,
(b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E'
b'\x0F'),
name="Test Symmetric Key"
)
uid = self.client.register(key)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.SymmetricKey)
self.assertEqual(
result, key, "expected {0}\nobserved {1}".format(result, key))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_register_wrapped_get_destroy(self):
"""
Test that a wrapped key can be registered with the server and that its
metadata is retrieved with the get operation.
"""
key = objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
128,
(b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E'
b'\x0F'),
key_wrapping_data={
'wrapping_method': enums.WrappingMethod.ENCRYPT,
'encryption_key_information': {
'unique_identifier': '42',
'cryptographic_parameters': {
'block_cipher_mode':
enums.BlockCipherMode.NIST_KEY_WRAP
}
},
'encoding_option': enums.EncodingOption.NO_ENCODING
}
)
key_id = self.client.register(key)
result = self.client.get(key_id)
key_wrapping_data = result.key_wrapping_data
self.assertIsInstance(key_wrapping_data, dict)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.get('wrapping_method')
)
eki = key_wrapping_data.get('encryption_key_information')
self.assertIsInstance(eki, dict)
self.assertEqual('42', eki.get('unique_identifier'))
cp = eki.get('cryptographic_parameters')
self.assertIsInstance(cp, dict)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
cp.get('block_cipher_mode')
)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_data.get('encoding_option')
)
def test_asymmetric_key_pair_create_get_destroy(self):
"""
Test that the ProxyKmipClient can create, retrieve, and destroy an
asymmetric key pair.
"""
public_uid, private_uid = self.client.create_key_pair(
enums.CryptographicAlgorithm.RSA,
2048,
public_usage_mask=[enums.CryptographicUsageMask.ENCRYPT],
private_usage_mask=[enums.CryptographicUsageMask.DECRYPT]
)
self.assertIsInstance(public_uid, six.string_types)
self.assertIsInstance(private_uid, six.string_types)
try:
public_key = self.client.get(public_uid)
self.assertIsInstance(public_key, objects.PublicKey)
self.assertEqual(
public_key.cryptographic_algorithm,
enums.CryptographicAlgorithm.RSA)
self.assertEqual(public_key.cryptographic_length, 2048)
private_key = self.client.get(private_uid)
self.assertIsInstance(private_key, objects.PrivateKey)
self.assertEqual(
private_key.cryptographic_algorithm,
enums.CryptographicAlgorithm.RSA)
self.assertEqual(private_key.cryptographic_length, 2048)
finally:
self.client.destroy(public_uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, public_uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy,
public_uid)
self.client.destroy(private_uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, private_uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy,
private_uid)
def test_public_key_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
public key.
"""
# Key encoding obtained from Section 13.4 of the KMIP 1.1 test
# documentation.
key = objects.PublicKey(
enums.CryptographicAlgorithm.RSA,
2048,
(b'\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xAB\x7F\x16\x1C\x00\x42'
b'\x49\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35\x35\x77\x76'
b'\x00\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6\x4A\x87\x55'
b'\xF8\x00\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60\x86\xD7\x46'
b'\x48\x34\x6D\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C\x0F\x65\x83'
b'\xBC\x4D\x7D\xC7\xEC\x11\x4F\x3B\x17\x6B\x79\x57\xC4\x22\xE7'
b'\xD0\x3F\xC6\x26\x7F\xA2\xA6\xF8\x9B\x9B\xEE\x9E\x60\xA1\xD7'
b'\xC2\xD8\x33\xE5\xA5\xF4\xBB\x0B\x14\x34\xF4\xE7\x95\xA4\x11'
b'\x00\xF8\xAA\x21\x49\x00\xDF\x8B\x65\x08\x9F\x98\x13\x5B\x1C'
b'\x67\xB7\x01\x67\x5A\xBD\xBC\x7D\x57\x21\xAA\xC9\xD1\x4A\x7F'
b'\x08\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC\xC8\x29\x53\x53\xC7\x95'
b'\x32\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7\xF4\xE8\xAC\x8C\x81'
b'\x0C\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D\x0C\xA3\x41\x42'
b'\xCB\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE\x64\xC5\x41'
b'\x30\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7\xCC\xE8\x94'
b'\x6A\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8\x2D\x73\xA1'
b'\xF9\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA\x29\xC6\xFC'
b'\x41\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03\x01\x00\x01'),
enums.KeyFormatType.PKCS_1)
uid = self.client.register(key)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.PublicKey)
self.assertEqual(
result, key, "expected {0}\nobserved {1}".format(result, key))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_private_key_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
private key.
"""
# Key encoding obtained from Section 13.4 of the KMIP 1.1 test
# documentation.
key = objects.PrivateKey(
enums.CryptographicAlgorithm.RSA,
2048,
(b'\x30\x82\x04\xA5\x02\x01\x00\x02\x82\x01\x01\x00\xAB\x7F\x16'
b'\x1C\x00\x42\x49\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35'
b'\x35\x77\x76\x00\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6'
b'\x4A\x87\x55\xF8\x00\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60'
b'\x86\xD7\x46\x48\x34\x6D\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C'
b'\x0F\x65\x83\xBC\x4D\x7D\xC7\xEC\x11\x4F\x3B\x17\x6B\x79\x57'
b'\xC4\x22\xE7\xD0\x3F\xC6\x26\x7F\xA2\xA6\xF8\x9B\x9B\xEE\x9E'
b'\x60\xA1\xD7\xC2\xD8\x33\xE5\xA5\xF4\xBB\x0B\x14\x34\xF4\xE7'
b'\x95\xA4\x11\x00\xF8\xAA\x21\x49\x00\xDF\x8B\x65\x08\x9F\x98'
b'\x13\x5B\x1C\x67\xB7\x01\x67\x5A\xBD\xBC\x7D\x57\x21\xAA\xC9'
b'\xD1\x4A\x7F\x08\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC\xC8\x29\x53'
b'\x53\xC7\x95\x32\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7\xF4\xE8'
b'\xAC\x8C\x81\x0C\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D\x0C'
b'\xA3\x41\x42\xCB\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE'
b'\x64\xC5\x41\x30\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7'
b'\xCC\xE8\x94\x6A\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8'
b'\x2D\x73\xA1\xF9\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA'
b'\x29\xC6\xFC\x41\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03'
b'\x01\x00\x01\x02\x82\x01\x00\x3B\x12\x45\x5D\x53\xC1\x81\x65'
b'\x16\xC5\x18\x49\x3F\x63\x98\xAA\xFA\x72\xB1\x7D\xFA\x89\x4D'
b'\xB8\x88\xA7\xD4\x8C\x0A\x47\xF6\x25\x79\xA4\xE6\x44\xF8\x6D'
b'\xA7\x11\xFE\xC8\x50\xCD\xD9\xDB\xBD\x17\xF6\x9A\x44\x3D\x2E'
b'\xC1\xDD\x60\xD3\xC6\x18\xFA\x74\xCD\xE5\xFD\xAF\xAB\xD6\xBA'
b'\xA2\x6E\xB0\xA3\xAD\xB4\xDE\xF6\x48\x0F\xB1\x21\x8C\xD3\xB0'
b'\x83\xE2\x52\xE8\x85\xB6\xF0\x72\x9F\x98\xB2\x14\x4D\x2B\x72'
b'\x29\x3E\x1B\x11\xD7\x33\x93\xBC\x41\xF7\x5B\x15\xEE\x3D\x75'
b'\x69\xB4\x99\x5E\xD1\xA1\x44\x25\xDA\x43\x19\xB7\xB2\x6B\x0E'
b'\x8F\xEF\x17\xC3\x75\x42\xAE\x5C\x6D\x58\x49\xF8\x72\x09\x56'
b'\x7F\x39\x25\xA4\x7B\x01\x6D\x56\x48\x59\x71\x7B\xC5\x7F\xCB'
b'\x45\x22\xD0\xAA\x49\xCE\x81\x6E\x5B\xE7\xB3\x08\x81\x93\x23'
b'\x6E\xC9\xEF\xFF\x14\x08\x58\x04\x5B\x73\xC5\xD7\x9B\xAF\x38'
b'\xF7\xC6\x7F\x04\xC5\xDC\xF0\xE3\x80\x6A\xD9\x82\xD1\x25\x90'
b'\x58\xC3\x47\x3E\x84\x71\x79\xA8\x78\xF2\xC6\xB3\xBD\x96\x8F'
b'\xB9\x9E\xA4\x6E\x91\x85\x89\x2F\x36\x76\xE7\x89\x65\xC2\xAE'
b'\xD4\x87\x7B\xA3\x91\x7D\xF0\x7C\x5E\x92\x74\x74\xF1\x9E\x76'
b'\x4B\xA6\x1D\xC3\x8D\x63\xBF\x29\x02\x81\x81\x00\xD5\xC6\x9C'
b'\x8C\x3C\xDC\x24\x64\x74\x4A\x79\x37\x13\xDA\xFB\x9F\x1D\xBC'
b'\x79\x9F\xF9\x64\x23\xFE\xCD\x3C\xBA\x79\x42\x86\xBC\xE9\x20'
b'\xF4\xB5\xC1\x83\xF9\x9E\xE9\x02\x8D\xB6\x21\x2C\x62\x77\xC4'
b'\xC8\x29\x7F\xCF\xBC\xE7\xF7\xC2\x4C\xA4\xC5\x1F\xC7\x18\x2F'
b'\xB8\xF4\x01\x9F\xB1\xD5\x65\x96\x74\xC5\xCB\xE6\xD5\xFA\x99'
b'\x20\x51\x34\x17\x60\xCD\x00\x73\x57\x29\xA0\x70\xA9\xE5\x4D'
b'\x34\x2B\xEB\xA8\xEF\x47\xEE\x82\xD3\xA0\x1B\x04\xCE\xC4\xA0'
b'\x0D\x4D\xDB\x41\xE3\x51\x16\xFC\x22\x1E\x85\x4B\x43\xA6\x96'
b'\xC0\xE6\x41\x9B\x1B\x02\x81\x81\x00\xCD\x5E\xA7\x70\x27\x89'
b'\x06\x4B\x67\x35\x40\xCB\xFF\x09\x35\x6A\xD8\x0B\xC3\xD5\x92'
b'\x81\x2E\xBA\x47\x61\x0B\x9F\xAC\x6A\xEC\xEF\xE2\x2A\xCA\xE4'
b'\x38\x45\x9C\xDA\x74\xE5\x96\x53\xD8\x8C\x04\x18\x9D\x34\x39'
b'\x9B\xF5\xB1\x4B\x92\x0E\x34\xEF\x38\xA7\xD0\x9F\xE6\x95\x93'
b'\x39\x6E\x8F\xE7\x35\xE6\xF0\xA6\xAE\x49\x90\x40\x10\x41\xD8'
b'\xA4\x06\xB6\xFD\x86\xA1\x16\x1E\x45\xF9\x5A\x3E\xAA\x5C\x10'
b'\x12\xE6\x66\x2E\x44\xF1\x5F\x33\x5A\xC9\x71\xE1\x76\x6B\x2B'
b'\xB9\xC9\x85\x10\x99\x74\x14\x1B\x44\xD3\x7E\x1E\x31\x98\x20'
b'\xA5\x5F\x02\x81\x81\x00\xB2\x87\x12\x37\xBF\x9F\xAD\x38\xC3'
b'\x31\x6A\xB7\x87\x7A\x6A\x86\x80\x63\xE5\x42\xA7\x18\x6D\x43'
b'\x1E\x8D\x27\xC1\x9A\xC0\x41\x45\x84\x03\x39\x42\xE9\xFF\x6E'
b'\x29\x73\xBB\x7B\x2D\x8B\x0E\x94\xAD\x1E\xE8\x21\x58\x10\x8F'
b'\xBC\x86\x64\x51\x7A\x5A\x46\x7F\xB9\x63\x01\x4B\xD5\xDC\xC2'
b'\xB4\xFB\x08\x7C\x23\x03\x9D\x11\x92\x0D\xBE\x22\xFD\x9F\x16'
b'\xB4\xD8\x9E\x23\x22\x5C\xD4\x55\xAD\xBA\xF3\x2E\xF4\x3F\x18'
b'\x58\x64\xA3\x6D\x63\x03\x09\xD6\x85\x3F\x77\x14\xB3\x9A\xAE'
b'\x1E\xBE\xE3\x93\x8F\x87\xC2\x70\x7E\x17\x8C\x73\x9F\x9F\x02'
b'\x81\x81\x00\x96\x90\xBE\xD1\x4B\x2A\xFA\xA2\x6D\x98\x6D\x59'
b'\x22\x31\xEE\x27\xD7\x1D\x49\x06\x5B\xD2\xBA\x1F\x78\x15\x7E'
b'\x20\x22\x98\x81\xFD\x9D\x23\x22\x7D\x0F\x84\x79\xEA\xEF\xA9'
b'\x22\xFD\x75\xD5\xB1\x6B\x1A\x56\x1F\xA6\x68\x0B\x04\x0C\xA0'
b'\xBD\xCE\x65\x0B\x23\xB9\x17\xA4\xB1\xBB\x79\x83\xA7\x4F\xAD'
b'\x70\xE1\xC3\x05\xCB\xEC\x2B\xFF\x1A\x85\xA7\x26\xA1\xD9\x02'
b'\x60\xE4\xF1\x08\x4F\x51\x82\x34\xDC\xD3\xFE\x77\x0B\x95\x20'
b'\x21\x5B\xD5\x43\xBB\x6A\x41\x17\x71\x87\x54\x67\x6A\x34\x17'
b'\x16\x66\xA7\x9F\x26\xE7\x9C\x14\x9C\x5A\xA1\x02\x81\x81\x00'
b'\xA0\xC9\x85\xA0\xA0\xA7\x91\xA6\x59\xF9\x97\x31\x13\x4C\x44'
b'\xF3\x7B\x2E\x52\x0A\x2C\xEA\x35\x80\x0A\xD2\x72\x41\xED\x36'
b'\x0D\xFD\xE6\xE8\xCA\x61\x4F\x12\x04\x7F\xD0\x8B\x76\xAC\x4D'
b'\x13\xC0\x56\xA0\x69\x9E\x2F\x98\xA1\xCA\xC9\x10\x11\x29\x4D'
b'\x71\x20\x8F\x4A\xBA\xB3\x3B\xA8\x7A\xA0\x51\x7F\x41\x5B\xAC'
b'\xA8\x8D\x6B\xAC\x00\x60\x88\xFA\x60\x1D\x34\x94\x17\xE1\xF0'
b'\xC9\xB2\x3A\xFF\xA4\xD4\x96\x61\x8D\xBC\x02\x49\x86\xED\x69'
b'\x0B\xBB\x7B\x02\x57\x68\xFF\x9D\xF8\xAC\x15\x41\x6F\x48\x9F'
b'\x81\x29\xC3\x23\x41\xA8\xB4\x4F'),
enums.KeyFormatType.PKCS_8)
uid = self.client.register(key)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.PrivateKey)
self.assertEqual(
result, key, "expected {0}\nobserved {1}".format(result, key))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_x509_certificate_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy an
X.509 certificate.
"""
# Certificate encoding obtained from Section 13.2 of the KMIP 1.1 test
# documentation.
cert = objects.X509Certificate(
(b'\x30\x82\x03\x12\x30\x82\x01\xFA\xA0\x03\x02\x01\x02\x02\x01'
b'\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05'
b'\x00\x30\x3B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55'
b'\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x0A\x13\x04\x54\x45\x53'
b'\x54\x31\x0E\x30\x0C\x06\x03\x55\x04\x0B\x13\x05\x4F\x41\x53'
b'\x49\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x03\x13\x04\x4B\x4D'
b'\x49\x50\x30\x1E\x17\x0D\x31\x30\x31\x31\x30\x31\x32\x33\x35'
b'\x39\x35\x39\x5A\x17\x0D\x32\x30\x31\x31\x30\x31\x32\x33\x35'
b'\x39\x35\x39\x5A\x30\x3B\x31\x0B\x30\x09\x06\x03\x55\x04\x06'
b'\x13\x02\x55\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x0A\x13\x04'
b'\x54\x45\x53\x54\x31\x0E\x30\x0C\x06\x03\x55\x04\x0B\x13\x05'
b'\x4F\x41\x53\x49\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x03\x13'
b'\x04\x4B\x4D\x49\x50\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86'
b'\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30'
b'\x82\x01\x0A\x02\x82\x01\x01\x00\xAB\x7F\x16\x1C\x00\x42\x49'
b'\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35\x35\x77\x76\x00'
b'\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6\x4A\x87\x55\xF8'
b'\x00\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60\x86\xD7\x46\x48'
b'\x34\x6D\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C\x0F\x65\x83\xBC'
b'\x4D\x7D\xC7\xEC\x11\x4F\x3B\x17\x6B\x79\x57\xC4\x22\xE7\xD0'
b'\x3F\xC6\x26\x7F\xA2\xA6\xF8\x9B\x9B\xEE\x9E\x60\xA1\xD7\xC2'
b'\xD8\x33\xE5\xA5\xF4\xBB\x0B\x14\x34\xF4\xE7\x95\xA4\x11\x00'
b'\xF8\xAA\x21\x49\x00\xDF\x8B\x65\x08\x9F\x98\x13\x5B\x1C\x67'
b'\xB7\x01\x67\x5A\xBD\xBC\x7D\x57\x21\xAA\xC9\xD1\x4A\x7F\x08'
b'\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC\xC8\x29\x53\x53\xC7\x95\x32'
b'\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7\xF4\xE8\xAC\x8C\x81\x0C'
b'\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D\x0C\xA3\x41\x42\xCB'
b'\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE\x64\xC5\x41\x30'
b'\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7\xCC\xE8\x94\x6A'
b'\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8\x2D\x73\xA1\xF9'
b'\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA\x29\xC6\xFC\x41'
b'\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03\x01\x00\x01\xA3'
b'\x21\x30\x1F\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x04'
b'\xE5\x7B\xD2\xC4\x31\xB2\xE8\x16\xE1\x80\xA1\x98\x23\xFA\xC8'
b'\x58\x27\x3F\x6B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01'
b'\x01\x05\x05\x00\x03\x82\x01\x01\x00\xA8\x76\xAD\xBC\x6C\x8E'
b'\x0F\xF0\x17\x21\x6E\x19\x5F\xEA\x76\xBF\xF6\x1A\x56\x7C\x9A'
b'\x13\xDC\x50\xD1\x3F\xEC\x12\xA4\x27\x3C\x44\x15\x47\xCF\xAB'
b'\xCB\x5D\x61\xD9\x91\xE9\x66\x31\x9D\xF7\x2C\x0D\x41\xBA\x82'
b'\x6A\x45\x11\x2F\xF2\x60\x89\xA2\x34\x4F\x4D\x71\xCF\x7C\x92'
b'\x1B\x4B\xDF\xAE\xF1\x60\x0D\x1B\xAA\xA1\x53\x36\x05\x7E\x01'
b'\x4B\x8B\x49\x6D\x4F\xAE\x9E\x8A\x6C\x1D\xA9\xAE\xB6\xCB\xC9'
b'\x60\xCB\xF2\xFA\xE7\x7F\x58\x7E\xC4\xBB\x28\x20\x45\x33\x88'
b'\x45\xB8\x8D\xD9\xAE\xEA\x53\xE4\x82\xA3\x6E\x73\x4E\x4F\x5F'
b'\x03\xB9\xD0\xDF\xC4\xCA\xFC\x6B\xB3\x4E\xA9\x05\x3E\x52\xBD'
b'\x60\x9E\xE0\x1E\x86\xD9\xB0\x9F\xB5\x11\x20\xC1\x98\x34\xA9'
b'\x97\xB0\x9C\xE0\x8D\x79\xE8\x13\x11\x76\x2F\x97\x4B\xB1\xC8'
b'\xC0\x91\x86\xC4\xD7\x89\x33\xE0\xDB\x38\xE9\x05\x08\x48\x77'
b'\xE1\x47\xC7\x8A\xF5\x2F\xAE\x07\x19\x2F\xF1\x66\xD1\x9F\xA9'
b'\x4A\x11\xCC\x11\xB2\x7E\xD0\x50\xF7\xA2\x7F\xAE\x13\xB2\x05'
b'\xA5\x74\xC4\xEE\x00\xAA\x8B\xD6\x5D\x0D\x70\x57\xC9\x85\xC8'
b'\x39\xEF\x33\x6A\x44\x1E\xD5\x3A\x53\xC6\xB6\xB6\x96\xF1\xBD'
b'\xEB\x5F\x7E\xA8\x11\xEB\xB2\x5A\x7F\x86'))
uid = self.client.register(cert)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.X509Certificate)
self.assertEqual(
result, cert, "expected {0}\nobserved {1}".format(
result, cert))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_secret_data_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
secret.
"""
# Secret encoding obtained from Section 3.1.5 of the KMIP 1.1 test
# documentation.
secret = objects.SecretData(
(b'\x53\x65\x63\x72\x65\x74\x50\x61\x73\x73\x77\x6F\x72\x64'),
enums.SecretDataType.PASSWORD)
uid = self.client.register(secret)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.SecretData)
self.assertEqual(
result, secret, "expected {0}\nobserved {1}".format(
result, secret))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_opaque_object_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy an
opaque object.
"""
# Object encoding obtained from Section 3.1.5 of the KMIP 1.1 test
# documentation.
obj = objects.OpaqueObject(
b'\x53\x65\x63\x72\x65\x74\x50\x61\x73\x73\x77\x6F\x72\x64',
enums.OpaqueDataType.NONE)
uid = self.client.register(obj)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.OpaqueObject)
self.assertEqual(
result, obj, "expected {0}\nobserved {1}".format(result, obj))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_derive_key_using_pbkdf2(self):
"""
Test that the ProxyKmipClient can derive a new key using PBKDF2.
"""
password_id = self.client.register(
objects.SecretData(
b'password',
enums.SecretDataType.PASSWORD,
masks=[enums.CryptographicUsageMask.DERIVE_KEY]
)
)
key_id = self.client.derive_key(
enums.ObjectType.SYMMETRIC_KEY,
[password_id],
enums.DerivationMethod.PBKDF2,
{
'cryptographic_parameters': {
'hashing_algorithm': enums.HashingAlgorithm.SHA_1
},
'salt': b'salt',
'iteration_count': 4096
},
cryptographic_length=160,
cryptographic_algorithm=enums.CryptographicAlgorithm.AES
)
key = self.client.get(key_id)
self.assertEqual(
(
b'\x4b\x00\x79\x01\xb7\x65\x48\x9a'
b'\xbe\xad\x49\xd9\x26\xf7\x21\xd0'
b'\x65\xa4\x29\xc1'
),
key.value
)
attribute_list = self.client.get_attribute_list(key_id)
self.assertIn('Cryptographic Algorithm', attribute_list)
self.assertIn('Cryptographic Length', attribute_list)
result_id, attribute_list = self.client.get_attributes(
uid=key_id,
attribute_names=['Cryptographic Algorithm', 'Cryptographic Length']
)
self.assertEqual(key_id, result_id)
self.assertEqual(2, len(attribute_list))
attribute = attribute_list[0]
self.assertEqual(
'Cryptographic Algorithm',
attribute.attribute_name.value
)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
attribute.attribute_value.value
)
attribute = attribute_list[1]
self.assertEqual(
'Cryptographic Length',
attribute.attribute_name.value
)
self.assertEqual(160, attribute.attribute_value.value)
def test_derive_key_using_encryption(self):
"""
Test that the ProxyKmipClient can derive a new key using encryption.
"""
key_id = self.client.register(
objects.SymmetricKey(
enums.CryptographicAlgorithm.BLOWFISH,
128,
(
b'\x01\x23\x45\x67\x89\xAB\xCD\xEF'
b'\xF0\xE1\xD2\xC3\xB4\xA5\x96\x87'
),
masks=[enums.CryptographicUsageMask.DERIVE_KEY]
)
)
secret_id = self.client.derive_key(
enums.ObjectType.SECRET_DATA,
[key_id],
enums.DerivationMethod.ENCRYPT,
{
'cryptographic_parameters': {
'block_cipher_mode': enums.BlockCipherMode.CBC,
'padding_method': enums.PaddingMethod.PKCS5,
'cryptographic_algorithm':
enums.CryptographicAlgorithm.BLOWFISH
},
'initialization_vector': b'\xFE\xDC\xBA\x98\x76\x54\x32\x10',
'derivation_data': (
b'\x37\x36\x35\x34\x33\x32\x31\x20'
b'\x4E\x6F\x77\x20\x69\x73\x20\x74'
b'\x68\x65\x20\x74\x69\x6D\x65\x20'
b'\x66\x6F\x72\x20\x00'
)
},
cryptographic_length=256
)
secret = self.client.get(secret_id)
self.assertEqual(
(
b'\x6B\x77\xB4\xD6\x30\x06\xDE\xE6'
b'\x05\xB1\x56\xE2\x74\x03\x97\x93'
b'\x58\xDE\xB9\xE7\x15\x46\x16\xD9'
b'\x74\x9D\xEC\xBE\xC0\x5D\x26\x4B'
),
secret.value
)
def test_derive_key_using_nist_800_108c(self):
"""
Test that the ProxyKmipClient can derive a new key using
NIST 800 108-C.
"""
base_id = self.client.register(
objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
512,
(
b'\xdd\x5d\xbd\x45\x59\x3e\xe2\xac'
b'\x13\x97\x48\xe7\x64\x5b\x45\x0f'
b'\x22\x3d\x2f\xf2\x97\xb7\x3f\xd7'
b'\x1c\xbc\xeb\xe7\x1d\x41\x65\x3c'
b'\x95\x0b\x88\x50\x0d\xe5\x32\x2d'
b'\x99\xef\x18\xdf\xdd\x30\x42\x82'
b'\x94\xc4\xb3\x09\x4f\x4c\x95\x43'
b'\x34\xe5\x93\xbd\x98\x2e\xc6\x14'
),
masks=[enums.CryptographicUsageMask.DERIVE_KEY]
)
)
key_id = self.client.derive_key(
enums.ObjectType.SYMMETRIC_KEY,
[base_id],
enums.DerivationMethod.NIST800_108_C,
{
'cryptographic_parameters': {
'hashing_algorithm': enums.HashingAlgorithm.SHA_512
},
'derivation_data': (
b'\xb5\x0b\x0c\x96\x3c\x6b\x30\x34'
b'\xb8\xcf\x19\xcd\x3f\x5c\x4e\xbe'
b'\x4f\x49\x85\xaf\x0c\x03\xe5\x75'
b'\xdb\x62\xe6\xfd\xf1\xec\xfe\x4f'
b'\x28\xb9\x5d\x7c\xe1\x6d\xf8\x58'
b'\x43\x24\x6e\x15\x57\xce\x95\xbb'
b'\x26\xcc\x9a\x21\x97\x4b\xbd\x2e'
b'\xb6\x9e\x83\x55'
)
},
cryptographic_length=128,
cryptographic_algorithm=enums.CryptographicAlgorithm.AES
)
key = self.client.get(key_id)
self.assertEqual(
(
b'\xe5\x99\x3b\xf9\xbd\x2a\xa1\xc4'
b'\x57\x46\x04\x2e\x12\x59\x81\x55'
),
key.value
)
attribute_list = self.client.get_attribute_list(key_id)
self.assertIn('Cryptographic Algorithm', attribute_list)
self.assertIn('Cryptographic Length', attribute_list)
result_id, attribute_list = self.client.get_attributes(
uid=key_id,
attribute_names=['Cryptographic Algorithm', 'Cryptographic Length']
)
self.assertEqual(key_id, result_id)
self.assertEqual(2, len(attribute_list))
attribute = attribute_list[0]
self.assertEqual(
'Cryptographic Algorithm',
attribute.attribute_name.value
)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
attribute.attribute_value.value
)
attribute = attribute_list[1]
self.assertEqual(
'Cryptographic Length',
attribute.attribute_name.value
)
self.assertEqual(128, attribute.attribute_value.value)
def test_derive_key_using_hmac(self):
"""
Test that the ProxyKmipClient can derive a new key using HMAC.
"""
base_id = self.client.register(
objects.SecretData(
(
b'\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c'
b'\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c'
b'\x0c\x0c\x0c\x0c\x0c\x0c'
),
enums.SecretDataType.SEED,
masks=[enums.CryptographicUsageMask.DERIVE_KEY]
)
)
secret_id = self.client.derive_key(
enums.ObjectType.SECRET_DATA,
[base_id],
enums.DerivationMethod.HMAC,
{
'cryptographic_parameters': {
'hashing_algorithm': enums.HashingAlgorithm.SHA_1
},
'derivation_data': b'',
'salt': b''
},
cryptographic_length=336
)
secret = self.client.get(secret_id)
self.assertEqual(
(
b'\x2c\x91\x11\x72\x04\xd7\x45\xf3'
b'\x50\x0d\x63\x6a\x62\xf6\x4f\x0a'
b'\xb3\xba\xe5\x48\xaa\x53\xd4\x23'
b'\xb0\xd1\xf2\x7e\xbb\xa6\xf5\xe5'
b'\x67\x3a\x08\x1d\x70\xcc\xe7\xac'
b'\xfc\x48'
),
secret.value
)
def test_encrypt_decrypt(self):
"""
Test that the ProxyKmipClient can create an encryption key, encrypt
plain text with it, and then decrypt the cipher text, retrieving the
original plain text.
"""
# Create an encryption key.
key_id = self.client.create(
enums.CryptographicAlgorithm.AES,
256,
cryptographic_usage_mask=[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
# Activate the encryption key.
self.client.activate(key_id)
# Encrypt some plain text.
plain_text = b'This is a secret message.'
cipher_text, iv = self.client.encrypt(
plain_text,
uid=key_id,
cryptographic_parameters={
'cryptographic_algorithm': enums.CryptographicAlgorithm.AES,
'block_cipher_mode': enums.BlockCipherMode.CBC,
'padding_method': enums.PaddingMethod.PKCS5
},
iv_counter_nonce=(
b'\x85\x1e\x87\x64\x77\x6e\x67\x96'
b'\xaa\xb7\x22\xdb\xb6\x44\xac\xe8'
)
)
self.assertEqual(None, iv)
# Decrypt the cipher text.
result = self.client.decrypt(
cipher_text,
uid=key_id,
cryptographic_parameters={
'cryptographic_algorithm': enums.CryptographicAlgorithm.AES,
'block_cipher_mode': enums.BlockCipherMode.CBC,
'padding_method': enums.PaddingMethod.PKCS5
},
iv_counter_nonce=(
b'\x85\x1e\x87\x64\x77\x6e\x67\x96'
b'\xaa\xb7\x22\xdb\xb6\x44\xac\xe8'
)
)
self.assertEqual(plain_text, result)
# Clean up.
self.client.revoke(
enums.RevocationReasonCode.CESSATION_OF_OPERATION,
key_id
)
self.client.destroy(key_id)
def test_create_key_pair_sign_signature_verify(self):
"""
Test that the ProxyKmipClient can create an asymmetric key pair and
then use that key pair (1) to sign data and (2) verify the signature
on the data.
"""
# Create a public/private key pair.
public_key_id, private_key_id = self.client.create_key_pair(
enums.CryptographicAlgorithm.RSA,
2048,
public_usage_mask=[
enums.CryptographicUsageMask.VERIFY
],
private_usage_mask=[
enums.CryptographicUsageMask.SIGN
]
)
self.assertIsInstance(public_key_id, str)
self.assertIsInstance(private_key_id, str)
# Activate the signing key and the signature verification key.
self.client.activate(private_key_id)
self.client.activate(public_key_id)
# Sign a message.
signature = self.client.sign(
b'This is a signed message.',
uid=private_key_id,
cryptographic_parameters={
'padding_method': enums.PaddingMethod.PSS,
'cryptographic_algorithm': enums.CryptographicAlgorithm.RSA,
'hashing_algorithm': enums.HashingAlgorithm.SHA_256
}
)
self.assertIsInstance(signature, six.binary_type)
# Verify the message signature.
result = self.client.signature_verify(
b'This is a signed message.',
signature,
uid=public_key_id,
cryptographic_parameters={
'padding_method': enums.PaddingMethod.PSS,
'cryptographic_algorithm': enums.CryptographicAlgorithm.RSA,
'hashing_algorithm': enums.HashingAlgorithm.SHA_256
}
)
self.assertEqual(result, enums.ValidityIndicator.VALID)
# Clean up.
self.client.revoke(
enums.RevocationReasonCode.CESSATION_OF_OPERATION,
public_key_id
)
self.client.revoke(
enums.RevocationReasonCode.CESSATION_OF_OPERATION,
private_key_id
)
self.client.destroy(public_key_id)
self.client.destroy(private_key_id)
def test_certificate_register_locate_destroy(self):
"""
Test that newly registered certificates can be located based on their
attributes.
"""
label = "Integration Test - Register-Locate-Destroy Certificate"
value = (
b'\x30\x82\x03\x12\x30\x82\x01\xFA\xA0\x03\x02\x01\x02\x02\x01\x01'
b'\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30'
b'\x3B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0D'
b'\x30\x0B\x06\x03\x55\x04\x0A\x13\x04\x54\x45\x53\x54\x31\x0E\x30'
b'\x0C\x06\x03\x55\x04\x0B\x13\x05\x4F\x41\x53\x49\x53\x31\x0D\x30'
b'\x0B\x06\x03\x55\x04\x03\x13\x04\x4B\x4D\x49\x50\x30\x1E\x17\x0D'
b'\x31\x30\x31\x31\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x17\x0D\x32'
b'\x30\x31\x31\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x3B\x31\x0B'
b'\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0D\x30\x0B\x06'
b'\x03\x55\x04\x0A\x13\x04\x54\x45\x53\x54\x31\x0E\x30\x0C\x06\x03'
b'\x55\x04\x0B\x13\x05\x4F\x41\x53\x49\x53\x31\x0D\x30\x0B\x06\x03'
b'\x55\x04\x03\x13\x04\x4B\x4D\x49\x50\x30\x82\x01\x22\x30\x0D\x06'
b'\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F'
b'\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xAB\x7F\x16\x1C\x00\x42'
b'\x49\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35\x35\x77\x76\x00'
b'\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6\x4A\x87\x55\xF8\x00'
b'\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60\x86\xD7\x46\x48\x34\x6D'
b'\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C\x0F\x65\x83\xBC\x4D\x7D\xC7'
b'\xEC\x11\x4F\x3B\x17\x6B\x79\x57\xC4\x22\xE7\xD0\x3F\xC6\x26\x7F'
b'\xA2\xA6\xF8\x9B\x9B\xEE\x9E\x60\xA1\xD7\xC2\xD8\x33\xE5\xA5\xF4'
b'\xBB\x0B\x14\x34\xF4\xE7\x95\xA4\x11\x00\xF8\xAA\x21\x49\x00\xDF'
b'\x8B\x65\x08\x9F\x98\x13\x5B\x1C\x67\xB7\x01\x67\x5A\xBD\xBC\x7D'
b'\x57\x21\xAA\xC9\xD1\x4A\x7F\x08\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC'
b'\xC8\x29\x53\x53\xC7\x95\x32\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7'
b'\xF4\xE8\xAC\x8C\x81\x0C\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D'
b'\x0C\xA3\x41\x42\xCB\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE'
b'\x64\xC5\x41\x30\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7\xCC'
b'\xE8\x94\x6A\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8\x2D\x73'
b'\xA1\xF9\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA\x29\xC6\xFC'
b'\x41\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03\x01\x00\x01\xA3'
b'\x21\x30\x1F\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x04\xE5'
b'\x7B\xD2\xC4\x31\xB2\xE8\x16\xE1\x80\xA1\x98\x23\xFA\xC8\x58\x27'
b'\x3F\x6B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05'
b'\x00\x03\x82\x01\x01\x00\xA8\x76\xAD\xBC\x6C\x8E\x0F\xF0\x17\x21'
b'\x6E\x19\x5F\xEA\x76\xBF\xF6\x1A\x56\x7C\x9A\x13\xDC\x50\xD1\x3F'
b'\xEC\x12\xA4\x27\x3C\x44\x15\x47\xCF\xAB\xCB\x5D\x61\xD9\x91\xE9'
b'\x66\x31\x9D\xF7\x2C\x0D\x41\xBA\x82\x6A\x45\x11\x2F\xF2\x60\x89'
b'\xA2\x34\x4F\x4D\x71\xCF\x7C\x92\x1B\x4B\xDF\xAE\xF1\x60\x0D\x1B'
b'\xAA\xA1\x53\x36\x05\x7E\x01\x4B\x8B\x49\x6D\x4F\xAE\x9E\x8A\x6C'
b'\x1D\xA9\xAE\xB6\xCB\xC9\x60\xCB\xF2\xFA\xE7\x7F\x58\x7E\xC4\xBB'
b'\x28\x20\x45\x33\x88\x45\xB8\x8D\xD9\xAE\xEA\x53\xE4\x82\xA3\x6E'
b'\x73\x4E\x4F\x5F\x03\xB9\xD0\xDF\xC4\xCA\xFC\x6B\xB3\x4E\xA9\x05'
b'\x3E\x52\xBD\x60\x9E\xE0\x1E\x86\xD9\xB0\x9F\xB5\x11\x20\xC1\x98'
b'\x34\xA9\x97\xB0\x9C\xE0\x8D\x79\xE8\x13\x11\x76\x2F\x97\x4B\xB1'
b'\xC8\xC0\x91\x86\xC4\xD7\x89\x33\xE0\xDB\x38\xE9\x05\x08\x48\x77'
b'\xE1\x47\xC7\x8A\xF5\x2F\xAE\x07\x19\x2F\xF1\x66\xD1\x9F\xA9\x4A'
b'\x11\xCC\x11\xB2\x7E\xD0\x50\xF7\xA2\x7F\xAE\x13\xB2\x05\xA5\x74'
b'\xC4\xEE\x00\xAA\x8B\xD6\x5D\x0D\x70\x57\xC9\x85\xC8\x39\xEF\x33'
b'\x6A\x44\x1E\xD5\x3A\x53\xC6\xB6\xB6\x96\xF1\xBD\xEB\x5F\x7E\xA8'
b'\x11\xEB\xB2\x5A\x7F\x86')
usage_mask = [
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.VERIFY
]
certificate = objects.Certificate(
enums.CertificateType.X_509,
value,
masks=usage_mask,
name=label
)
a_id = self.client.register(certificate)
# Test locating the certificate by its "Certificate Type" value.
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CERTIFICATE_TYPE,
enums.CertificateType.X_509
)
]
)
self.assertEqual(1, len(result))
self.assertEqual(a_id, result[0])
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CERTIFICATE_TYPE,
enums.CertificateType.PGP
)
]
)
self.assertEqual(0, len(result))
# Clean up the certificate
self.client.destroy(a_id)
def test_create_getattributes_locate_destroy(self):
"""
Test that the ProxyKmipClient can create symmetric keys and then
locate those keys using their attributes.
"""
start_time = int(time.time())
time.sleep(2)
# Create some symmetric keys
a_id = self.client.create(enums.CryptographicAlgorithm.AES, 256)
time.sleep(2)
mid_time = int(time.time())
time.sleep(2)
b_id = self.client.create(enums.CryptographicAlgorithm.IDEA, 128)
time.sleep(2)
end_time = int(time.time())
self.assertIsInstance(a_id, str)
self.assertIsInstance(b_id, str)
# Get the "Initial Date" attributes for each key
result_id, result_attributes = self.client.get_attributes(
uid=a_id,
attribute_names=["Initial Date"]
)
self.assertEqual(1, len(result_attributes))
self.assertEqual(
"Initial Date",
result_attributes[0].attribute_name.value
)
initial_date_a = result_attributes[0].attribute_value.value
result_id, result_attributes = self.client.get_attributes(
uid=b_id,
attribute_names=["Initial Date"]
)
self.assertEqual(1, len(result_attributes))
self.assertEqual(
"Initial Date",
result_attributes[0].attribute_name.value
)
initial_date_b = result_attributes[0].attribute_value.value
# Test locating each key by its exact "Initial Date" value
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.INITIAL_DATE,
initial_date_a
)
]
)
self.assertEqual(1, len(result))
self.assertEqual(a_id, result[0])
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.INITIAL_DATE,
initial_date_b
)
]
)
self.assertEqual(1, len(result))
self.assertEqual(b_id, result[0])
# Test locating each key by a range around its "Initial Date" value
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.INITIAL_DATE,
start_time
),
self.attribute_factory.create_attribute(
enums.AttributeType.INITIAL_DATE,
mid_time
)
]
)
self.assertEqual(1, len(result))
self.assertEqual(a_id, result[0])
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.INITIAL_DATE,
mid_time
),
self.attribute_factory.create_attribute(
enums.AttributeType.INITIAL_DATE,
end_time
)
]
)
self.assertEqual(1, len(result))
self.assertEqual(b_id, result[0])
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.INITIAL_DATE,
start_time
),
self.attribute_factory.create_attribute(
enums.AttributeType.INITIAL_DATE,
end_time
)
]
)
self.assertEqual(2, len(result))
self.assertIn(a_id, result)
self.assertIn(b_id, result)
# Test locating each key by its state.
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.STATE,
enums.State.PRE_ACTIVE
)
]
)
self.assertEqual(2, len(result))
self.assertIn(a_id, result)
self.assertIn(b_id, result)
# Test locating each key by its object type.
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.OBJECT_TYPE,
enums.ObjectType.SYMMETRIC_KEY
)
]
)
self.assertEqual(2, len(result))
self.assertIn(a_id, result)
self.assertIn(b_id, result)
# Test locating each key by its cryptographic algorithm.
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
]
)
self.assertEqual(1, len(result))
self.assertIn(a_id, result)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.IDEA
)
]
)
self.assertEqual(1, len(result))
self.assertIn(b_id, result)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
)
]
)
self.assertEqual(0, len(result))
# Test locating each key by its cryptographic length.
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
)
]
)
self.assertEqual(1, len(result))
self.assertIn(b_id, result)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
)
]
)
self.assertEqual(1, len(result))
self.assertIn(a_id, result)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
self.assertEqual(0, len(result))
# Test locating each key by its unique identifier.
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.UNIQUE_IDENTIFIER,
a_id
)
]
)
self.assertEqual(1, len(result))
self.assertIn(a_id, result)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.UNIQUE_IDENTIFIER,
b_id
)
]
)
self.assertEqual(1, len(result))
self.assertIn(b_id, result)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.UNIQUE_IDENTIFIER,
"unknown"
)
]
)
self.assertEqual(0, len(result))
# Test locating each key by its operation policy name.
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
"default"
)
]
)
self.assertEqual(2, len(result))
self.assertIn(a_id, result)
self.assertIn(b_id, result)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
"unknown"
)
]
)
self.assertEqual(0, len(result))
# Test locating keys using offset and maximum item constraints.
result = self.client.locate(offset_items=1)
self.assertEqual(1, len(result))
self.assertIn(a_id, result)
result = self.client.locate(maximum_items=1)
self.assertEqual(1, len(result))
self.assertIn(b_id, result)
result = self.client.locate(offset_items=1, maximum_items=1)
self.assertEqual(1, len(result))
self.assertIn(a_id, result)
# Test locating keys using their cryptographic usage masks
mask = [enums.CryptographicUsageMask.ENCRYPT]
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask
)
]
)
self.assertEqual(2, len(result))
self.assertIn(a_id, result)
self.assertIn(b_id, result)
mask.append(enums.CryptographicUsageMask.DECRYPT)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask
)
]
)
self.assertEqual(2, len(result))
self.assertIn(a_id, result)
self.assertIn(b_id, result)
mask.append(enums.CryptographicUsageMask.SIGN)
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask
)
]
)
self.assertEqual(0, len(result))
mask = [enums.CryptographicUsageMask.EXPORT]
result = self.client.locate(
attributes=[
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask
)
]
)
self.assertEqual(0, len(result))
# Clean up the keys
self.client.destroy(a_id)
self.client.destroy(b_id)
def test_split_key_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
split key.
"""
key = objects.SplitKey(
cryptographic_algorithm=enums.CryptographicAlgorithm.AES,
cryptographic_length=128,
key_value=(
b'\x00\x01\x02\x03\x04\x05\x06\x07'
b'\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F'
),
name="Test Split Key",
cryptographic_usage_masks=[enums.CryptographicUsageMask.EXPORT],
key_format_type=enums.KeyFormatType.RAW,
key_wrapping_data=None,
split_key_parts=3,
key_part_identifier=1,
split_key_threshold=2,
split_key_method=enums.SplitKeyMethod.XOR,
prime_field_size=None
)
uid = self.client.register(key)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.SplitKey)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
result.cryptographic_algorithm
)
self.assertEqual(128, result.cryptographic_length)
self.assertEqual(
(
b'\x00\x01\x02\x03\x04\x05\x06\x07'
b'\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F'
),
result.value
)
self.assertEqual(enums.KeyFormatType.RAW, result.key_format_type)
self.assertEqual(3, result.split_key_parts)
self.assertEqual(1, result.key_part_identifier)
self.assertEqual(2, result.split_key_threshold)
self.assertEqual(enums.SplitKeyMethod.XOR, result.split_key_method)
self.assertIsNone(result.prime_field_size)
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_modify_delete_attribute(self):
"""
Test that the ProxyKmipClient can modify and delete an attribute.
"""
key_id = self.client.create(
enums.CryptographicAlgorithm.IDEA,
128,
name="Symmetric Key"
)
self.assertIsInstance(key_id, str)
# Get the "Name" attribute for the key.
result_id, result_attributes = self.client.get_attributes(
uid=key_id,
attribute_names=["Name"]
)
self.assertEqual(1, len(result_attributes))
self.assertEqual("Name", result_attributes[0].attribute_name.value)
self.assertEqual(
"Symmetric Key",
result_attributes[0].attribute_value.name_value.value
)
# Modify the "Name" attribute for the key.
response_id, response_attr = self.client.modify_attribute(
unique_identifier=key_id,
attribute=self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
"Modified Name",
index=0
)
)
self.assertEqual(key_id, response_id)
self.assertEqual("Name", response_attr.attribute_name.value)
self.assertEqual(0, response_attr.attribute_index.value)
self.assertEqual(
"Modified Name",
response_attr.attribute_value.name_value.value
)
# Get the "Name" attribute for the key to verify it was modified.
result_id, result_attributes = self.client.get_attributes(
uid=key_id,
attribute_names=["Name"]
)
self.assertEqual(1, len(result_attributes))
self.assertEqual("Name", result_attributes[0].attribute_name.value)
self.assertEqual(
"Modified Name",
result_attributes[0].attribute_value.name_value.value
)
# Delete the "Name" attribute for the key.
response_id, response_attr = self.client.delete_attribute(
unique_identifier=key_id,
attribute_name="Name",
attribute_index=0
)
self.assertEqual(key_id, response_id)
self.assertEqual("Name", response_attr.attribute_name.value)
self.assertEqual(0, response_attr.attribute_index.value)
self.assertEqual(
"Modified Name",
response_attr.attribute_value.name_value.value
)
|
|
#!/usr/bin/env python
"""
Some I/O tools.
"""
import re
import warnings
import cPickle
import numpy
import pyfits
import yaml
def loaddata(filename):
"""
Load data from a yaml (.yaml or .yml) or a pickle file (.pkl).
"""
if filename.endswith('yaml') or filename.endswith('yml'):
return yaml.load(open(filename))
elif filename.endswith('pkl'):
return cPickle.load(open(filename))
else:
raise 'Wrong extension: %s (extension needed: .yaml, .yml or .pkl).' % filename
# Spectrum class ##############################
class Spectrum(object):
"""Class to read and manage a spectrum from a FITS file (NAXIS=1)."""
def __init__(self, name, varname=None, keepfits=True):
"""
Spectrum initialization.
Class to read and manage a spectrum from a FITS file (NAXIS=1)
including the associated [co]variance from an extension or an
external file.
Note: use helper function [Spectrum.]read_spectrum method for
a transparent use.
"""
self.name = name # Generic name
if name is None: # Blank instance
return
self._readFits(name, # Read signal [and variance if any]
mode='update' if keepfits else 'readonly')
if not keepfits:
self.close()
if varname: # Override variance extension if any
if self.varname: # Set by _readFits from var. extension
warnings.warn("%s: VARIANCE extension overriden by %s" %
(name, varname), RuntimeWarning)
self.varname = varname
V = Spectrum(varname, varname=None, keepfits=keepfits)
assert (V.npts, V.start, V.step) == (self.npts, self.start, self.step), \
"Incompatible variance spectrum '%s' wrt. to spectrum '%s'" % \
(varname, name)
self.v = V.y.copy()
# All other attributes and header keywords should be
# essentially the same as for signal spectrum, no need to
# keep them
if self.hasCov and self.hasVar: # Test variance vs. cov. coherence
assert numpy.allclose(self.v, self.cov.diagonal()), \
"%s: VARIANCE and COVARiance diagonal are incompatible"
# Channel
self.X = self.readKey('CHANNEL', 'X')[0].upper() # 'B' or 'R' (or 'X')
@property
def hasVar(self):
"""Check if variance exists."""
return hasattr(self, 'v') and self.v is not None
@property
def hasCov(self):
"""Check if covariance exists."""
return hasattr(self, 'cov') and self.cov is not None
def close(self):
"""Close FITS file (if any) and forget about it."""
if self._fits is not None:
self._fits.close()
self._fits = None
def __str__(self):
s = "Spectrum %s%s: %d px [%.2f-%.2f A] @%.2f A/px" % \
(self.name, ' [%c]' % self.X if self.X != 'X' else '',
self.npts, self.start, self.end, self.step)
if self.hasCov:
s += " with covariance"
elif self.hasVar:
s += " with variance"
else:
s += " (no [co]variance)"
if self._fits is None:
s += " (closed)"
if hasattr(self, 'ebmv'): # Dereddened spectrum
s += "\n Dereddened: E(B-V)=%.3f, Rv=%.2f, ExtLaw=%s" % \
(self.ebmv, self.rv, self.law)
if hasattr(self, 'zorig'): # Deredshifted spectrum
s += "\n Deredshifted: z=%.5f, exp=%d" % (self.zorig, self.zexp)
return s
def _readFits(self, name, mode='readonly'):
"""
Initialize a Spectrum from FITS spectrum name.
'name' can be 'name[ext]', in which case only extension 'ext' is considered.
"""
# Decipher name and extension from name[EXT]
self.filename, self.ext = get_extension(name)
self._fits = pyfits.open(self.filename,
mode=mode, ignore_missing_end=True)
extnames = [h.name for h in self._fits] # "PRIMARY", etc.
try:
spec = self._fits[self.ext] # Spectrum extension
except (IndexError, KeyError,):
raise IOError("Cannot read extension %s in %s:%s" %
(self.ext, self.filename, extnames))
self._hdr = spec.header.copy() # Spectrum header
self._hdr['CRPIX1'] = self._hdr.get('CRPIX1', 1) # Make it mandatory
self.npts = self._hdr['NAXIS1']
self.step = self._hdr['CDELT1']
self.start = self._hdr['CRVAL1'] - \
(self._hdr['CRPIX1'] - 1) * self.step
self.end = self.start + (self.npts - 1) * self.step
self.x = numpy.linspace(self.start, self.end, self.npts) # Wavelength
self.y = spec.data.copy() # Signal
if 'VARIANCE' in extnames: # Read VARIANCE extension
vhdr = self._fits['VARIANCE'].header
vhdr['CRPIX1'] = vhdr.get('CRPIX1', 1) # Make it mandatory
try:
assert vhdr['NAXIS1'] == self.npts
assert vhdr['CDELT1'] == self.step
assert vhdr['CRVAL1'] == self._hdr['CRVAL1']
assert vhdr['CRPIX1'] == self._hdr['CRPIX1']
except AssertionError:
warnings.warn(
"%s[VARIANCE]: header incompatible with primary header" %
self.filename, RuntimeWarning)
self.varname = "%s[VARIANCE]" % (self.filename)
self.v = self._fits['VARIANCE'].data.copy() # Variance
else:
self.varname = None
self.v = None
if 'COVAR' in extnames: # Read COVAR extension
vhdr = self._fits['COVAR'].header
vhdr['CRPIX1'] = vhdr.get('CRPIX1', 1) # Make it mandatory
vhdr['CRPIX2'] = vhdr.get('CRPIX2', 1)
try:
assert vhdr['NAXIS1'] == vhdr['NAXIS2'] == self.npts
assert vhdr['CDELT1'] == vhdr['CDELT2'] == self.step
assert vhdr['CRVAL1'] == vhdr['CRVAL2'] == self._hdr['CRVAL1']
assert vhdr['CRPIX1'] == vhdr['CRPIX2'] == self._hdr['CRPIX1']
except AssertionError:
warnings.warn(
"%s[VARIANCE]: header incompatible with primary header" %
self.filename, RuntimeWarning)
self.covname = "%s[COVAR]" % (self.filename)
self.cov = self._fits['COVAR'].data.copy() # Lower-tri. covariance
self.cov += numpy.triu(self.cov.T, 1) # Reconstruct full cov.
else:
self.covname = None
self.cov = None
def readKey(self, keyword, default=None):
"""Read a single keyword, defaulting to *default* if any."""
if default is None:
return self._hdr[keyword]
else:
return self._hdr.get(keyword, default)
def setKey(self, keywords=(), **kwargs):
"""
Set keywords.
Set keywords from *keywords*=((key, val[, comment]),) or kwargs
'key=val' or 'key=(val, comment)'.
"""
for key in keywords:
name, val = key[0], key[1:] # name, (value, [comment])
self._hdr[name.upper()] = val
for key in kwargs:
self._hdr[key.upper()] = kwargs[key]
def deredden(self, ebmv, law='OD94', Rv=3.1):
"""
Deredden spectrum using E(B-V) and a nextinction law.
:param float ebmv: E(B-V) value.
:param int law: Extinction law. Could be CCM89, OD94, FM98 or G08
:param float Rv: Value for Rv. Default is 3.1
"""
from extinctions.extinction import extinction_factor
if hasattr(self, 'zorig'): # Spectrum has been deredshifted
raise ValueError, \
"Dereddening should be done prior to deredshifting."
# Extinction factor (<1)
ext = extinction_factor(self.x, ebmv, rv=Rv, law=law)
self.y /= ext
if self.hasVar:
self.v /= ext**2
if self.hasCov:
self.cov /= ext**2
self.ebmv = ebmv # Mark spectrum as unreddened
self.rv = Rv
self.law = law
self.setKey(MWEBMV=(ebmv, "MW E(B-V) correction applied"),
MWRV=(Rv, "R_V used for MW E(B-V) correction"),
MWLAW=(law, "Extinction law used for MW correction"))
def deredshift(self, z, exp=3):
"""
Deredshift spectrum from z to 0, and apply a (1+z)**exp flux-correction.
exp=3 is for erg/s/cm2/A spectra to be latter corrected using proper
(comoving) distance but *not* luminosity distance.
"""
zp1 = 1. + z
self.x /= zp1 # Wavelength correction
self.step /= zp1
self.start, self.end = self.x[0], self.x[-1]
zp1exp = zp1 ** exp
self.y *= zp1exp # Flux correction
if self.hasVar:
self.v *= zp1exp**2
if self.hasCov:
self.cov *= zp1exp**2
self.zorig = z # Mark spectrum as deredshifted
self.zexp = exp
self.setKey(ZORIG=(z, "Redshift correction applied"),
ZEXP=(exp, "Flux correction applied is (1+z)**zexp"))
def get_extension(name, default=0):
"""Return name, EXT from name[ext], using default ext if unspecified."""
# Decipher name and extension from name[EXT]
search = re.search(r'(.*)\[(.*)\]', name)
if search:
bname, ext = search.groups()
else:
bname, ext = name, default
try:
ext = int(ext) # ext is an integer
except ValueError:
ext = ext.upper() # ext is a string
return bname, ext
|
|
# RedSqaure
# unicornhat.device handler
# this one uses the Pimoroni standard library and Pillow
# 8x8 16K RGB LED Matrix
# Copyright (c) 2017 full phat products
#
import threading
import time
import sos
unicorn = None
unicornLib = None
# number of concurrent threads (matches detected devices)
maxThread = None
# list of queued requests
queue = None
RotationOffset = 0
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# flash: alternate between the two given images
#
def flash_image(unicorn, statusIcon, imageIcon, repeat=3):
for i in range(repeat):
show_image(unicorn, 'icons/' + statusIcon + '.png')
time.sleep(1.0)
show_image(unicorn, 'icons/' + imageIcon + '.png')
time.sleep(1.0)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# init: try to load up luma.led_matrix
#
def init():
global unicorn
global Image
global ImageDraw
global ImageFont
global unicornlib
try:
import unicornlib
sos.sos_print("Got unicornlib...'")
except:
sos.sos_fail("Couldn't load unicornlib")
return False
try:
import unicornhat as unicorn
sos.sos_print("Got unicornhat...'")
except:
sos.sos_fail("Couldn't load unicornhat")
sos.sos_print("To install the support library, see:")
sos.sos_print("https://github.com/pimoroni/unicorn-hat-hd")
return False
global queue
queue = []
# initialise the HAT
sos.sos_print("Configuring device...")
fBright = 0.7
global RotationOffset
import ConfigParser
config = ConfigParser.RawConfigParser(allow_no_value=True)
files = config.read('etc/unicorn.rc')
if len(files) == 1:
# got config...
sos.sos_print("Configuration file found...")
success,f = sos.ConfigTryGetFloat(config, "general", "brightness")
if success:
fBright = f
success,i = sos.ConfigTryGetInt(config, "general", "rotation")
if success:
RotationOffset = i
else:
# no config
sos.sos_info("No configuration file: using defaults")
sos.sos_info("Brightness: " + str(fBright))
sos.sos_info("Rotation: " + str(RotationOffset))
#unicorn.set_layout(unicorn.HAT)
unicorn.brightness(fBright)
#show_image(unicorn, "./icons/save.png")
#time.sleep(0.5)
unicorn.off()
unicornlib.scroll_text(unicorn, RotationOffset, "RSOS 2.07", "info")
return True
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# device handler
# return bool,string (True,"OK") if query was handled, or false otherwise
#
def handle(queryDict, apiVersion=0, unit=0):
# queue the request...
global queue
queue.append(queryDict)
global maxThread
if maxThread:
if maxThread.is_alive():
print ' [unicornhat] busy: added to queue...'
return (True, "Request queued")
# start a thread to display the message
#maxThread = threading.Thread(target=device_thread, args=(queryDict,))
maxThread = threading.Thread(target=device_thread)
maxThread.daemon = True
maxThread.start()
return (True, "OK")
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# thread that talks to the unicornhat
#
def device_thread():
global queue
while len(queue) > 0:
# pop from top of list
dict = queue[0]
del queue[0]
# process it
process(dict)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# thread that talks to the unicornhat
#
def process(queryDict):
global unicorn
global unicornlib
global RotationOffset
# set defaults
_device = '0'
mode = 'text'
icon = ''
# read variables...
if 'icon' in queryDict:
icon = queryDict['icon'][0]
if 'mode' in queryDict:
mode = queryDict['mode'][0]
# process based on mode...
if mode == "off":
unicorn.off()
return (True, "Device turned off")
elif mode == "icon":
# get supplied info
priority = 0
# required elements...
if icon == '':
print ' [unicornhat]: no icon supplied!'
return (False, "No icon provided")
# if 'device' in queryDict:
# _device = queryDict['device'][0]
elif mode == "text":
_font = ''
_invert = '0'
text = ''
if 'text' in queryDict:
text = queryDict['text'][0]
if text != "":
# good to go!
global RotationOffset
sos.sos_print("Displaying '" + text + "'")
unicornlib.scroll_text(unicorn, RotationOffset, text, icon)
else:
sos.sos_fail("No text to display")
return (False, "Nothing to do")
return (True, "OK")
# if 'invert' in queryDict:
# _invert = queryDict['invert'][0]
# if 'font' in queryDict:
# _font = queryDict['font'][0]
# if 'priority' in queryDict:
# _priority = queryDict['priority'][0]
# determine status icon to use
# pri = 0
# try:
# pri = int(_priority)
# except:
# print ' [unicornhat]: bad priority: ' + _priority
# if pri > 1:
# _statusIcon = 'alert'
# elif pri == 1:
# _statusIcon = 'warn'
# else:
# _statusIcon = 'info'
# good to go!
# flash_image(unicorn, _statusIcon, _icon)
# if _text == "":
# return (False, "Nothing to display")
if __name__ == '__main__':
init()
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import TestCase
from contextlib import contextmanager
from posix import stat_result, statvfs_result
import os
import swift.common.constraints
from swift.common.swob import Request
from swift.common.middleware import recon
class FakeApp(object):
def __call__(self, env, start_response):
return "FAKE APP"
def start_response(*args):
pass
class FakeFromCache(object):
def __init__(self, out=None):
self.fakeout = out
self.fakeout_calls = []
def fake_from_recon_cache(self, *args, **kwargs):
self.fakeout_calls.append((args, kwargs))
return self.fakeout
class OpenAndReadTester(object):
def __init__(self, output_iter):
self.index = 0
self.out_len = len(output_iter) - 1
self.data = output_iter
self.output_iter = iter(output_iter)
self.read_calls = []
self.open_calls = []
def __iter__(self):
return self
def next(self):
if self.index == self.out_len:
raise StopIteration
else:
line = self.data[self.index]
self.index += 1
return line
def read(self, *args, **kwargs):
self.read_calls.append((args, kwargs))
try:
return self.output_iter.next()
except StopIteration:
return ''
@contextmanager
def open(self, *args, **kwargs):
self.open_calls.append((args, kwargs))
yield self
class MockOS(object):
def __init__(self, ls_out=None, pe_out=None, statvfs_out=None,
lstat_out=(1, 1, 5, 4, 5, 5, 55, 55, 55, 55)):
self.ls_output = ls_out
self.path_exists_output = pe_out
self.statvfs_output = statvfs_out
self.lstat_output_tuple = lstat_out
self.listdir_calls = []
self.statvfs_calls = []
self.path_exists_calls = []
self.lstat_calls = []
def fake_listdir(self, *args, **kwargs):
self.listdir_calls.append((args, kwargs))
return self.ls_output
def fake_path_exists(self, *args, **kwargs):
self.path_exists_calls.append((args, kwargs))
return self.path_exists_output
def fake_statvfs(self, *args, **kwargs):
self.statvfs_calls.append((args, kwargs))
return statvfs_result(self.statvfs_output)
def fake_lstat(self, *args, **kwargs):
self.lstat_calls.append((args, kwargs))
return stat_result(self.lstat_output_tuple)
class FakeRecon(object):
def __init__(self):
self.fake_replication_rtype = None
self.fake_updater_rtype = None
self.fake_auditor_rtype = None
self.fake_expirer_rtype = None
def fake_mem(self):
return {'memtest': "1"}
def fake_load(self):
return {'loadtest': "1"}
def fake_async(self):
return {'asynctest': "1"}
def fake_get_device_info(self):
return {"/srv/1/node": ["sdb1"]}
def fake_replication(self, recon_type):
self.fake_replication_rtype = recon_type
return {'replicationtest': "1"}
def fake_updater(self, recon_type):
self.fake_updater_rtype = recon_type
return {'updatertest': "1"}
def fake_auditor(self, recon_type):
self.fake_auditor_rtype = recon_type
return {'auditortest': "1"}
def fake_expirer(self, recon_type):
self.fake_expirer_rtype = recon_type
return {'expirertest': "1"}
def fake_mounted(self):
return {'mountedtest': "1"}
def fake_unmounted(self):
return {'unmountedtest': "1"}
def fake_no_unmounted(self):
return []
def fake_diskusage(self):
return {'diskusagetest': "1"}
def fake_ringmd5(self):
return {'ringmd5test': "1"}
def fake_quarantined(self):
return {'quarantinedtest': "1"}
def fake_sockstat(self):
return {'sockstattest': "1"}
def nocontent(self):
return None
def raise_IOError(self, *args, **kwargs):
raise IOError
def raise_ValueError(self, *args, **kwargs):
raise ValueError
def raise_Exception(self, *args, **kwargs):
raise Exception
class TestReconSuccess(TestCase):
def setUp(self):
self.app = recon.ReconMiddleware(FakeApp(), {})
self.mockos = MockOS()
self.fakecache = FakeFromCache()
self.real_listdir = os.listdir
self.real_path_exists = os.path.exists
self.real_lstat = os.lstat
self.real_statvfs = os.statvfs
os.listdir = self.mockos.fake_listdir
os.path.exists = self.mockos.fake_path_exists
os.lstat = self.mockos.fake_lstat
os.statvfs = self.mockos.fake_statvfs
self.real_from_cache = self.app._from_recon_cache
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
self.frecon = FakeRecon()
def tearDown(self):
os.listdir = self.real_listdir
os.path.exists = self.real_path_exists
os.lstat = self.real_lstat
os.statvfs = self.real_statvfs
del self.mockos
self.app._from_recon_cache = self.real_from_cache
del self.fakecache
def test_from_recon_cache(self):
oart = OpenAndReadTester(['{"notneeded": 5, "testkey1": "canhazio"}'])
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('test.cache', 'r'), {})])
self.assertEquals(rv, {'notpresentkey': None, 'testkey1': 'canhazio'})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_ioerror(self):
oart = self.frecon.raise_IOError
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEquals(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_valueerror(self):
oart = self.frecon.raise_ValueError
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEquals(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_exception(self):
oart = self.frecon.raise_Exception
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEquals(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_get_mounted(self):
mounts_content = ['rootfs / rootfs rw 0 0',
'none /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /proc proc rw,nosuid,nodev,noexec,relatime 0 0',
'none /dev devtmpfs rw,relatime,size=248404k,nr_inodes=62101,mode=755 0 0',
'none /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0',
'/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252 / ext4 rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0',
'none /sys/fs/fuse/connections fusectl rw,relatime 0 0',
'none /sys/kernel/debug debugfs rw,relatime 0 0',
'none /sys/kernel/security securityfs rw,relatime 0 0',
'none /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0',
'none /var/run tmpfs rw,nosuid,relatime,mode=755 0 0',
'none /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0',
'/dev/loop0 /mnt/sdb1 xfs rw,noatime,nodiratime,attr2,nobarrier,logbufs=8,noquota 0 0',
'rpc_pipefs /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0',
'nfsd /proc/fs/nfsd nfsd rw,relatime 0 0',
'none /proc/fs/vmblock/mountPoint vmblock rw,relatime 0 0',
'']
mounted_resp = [{'device': 'rootfs', 'path': '/'},
{'device': 'none', 'path': '/sys'},
{'device': 'none', 'path': '/proc'},
{'device': 'none', 'path': '/dev'},
{'device': 'none', 'path': '/dev/pts'},
{'device': '/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252', 'path': '/'},
{'device': 'none', 'path': '/sys/fs/fuse/connections'},
{'device': 'none', 'path': '/sys/kernel/debug'},
{'device': 'none', 'path': '/sys/kernel/security'},
{'device': 'none', 'path': '/dev/shm'},
{'device': 'none', 'path': '/var/run'},
{'device': 'none', 'path': '/var/lock'},
{'device': 'none', 'path': '/lib/init/rw'},
{'device': '/dev/loop0', 'path': '/mnt/sdb1'},
{'device': 'rpc_pipefs', 'path': '/var/lib/nfs/rpc_pipefs'},
{'device': 'nfsd', 'path': '/proc/fs/nfsd'},
{'device': 'none', 'path': '/proc/fs/vmblock/mountPoint'}]
oart = OpenAndReadTester(mounts_content)
rv = self.app.get_mounted(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/mounts', 'r'), {})])
self.assertEquals(rv, mounted_resp)
def test_get_load(self):
oart = OpenAndReadTester(['0.03 0.03 0.00 1/220 16306'])
rv = self.app.get_load(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/proc/loadavg', 'r'), {})])
self.assertEquals(rv, {'5m': 0.029999999999999999, '15m': 0.0,
'processes': 16306, 'tasks': '1/220',
'1m': 0.029999999999999999})
def test_get_mem(self):
meminfo_content = ['MemTotal: 505840 kB',
'MemFree: 26588 kB',
'Buffers: 44948 kB',
'Cached: 146376 kB',
'SwapCached: 14736 kB',
'Active: 194900 kB',
'Inactive: 193412 kB',
'Active(anon): 94208 kB',
'Inactive(anon): 102848 kB',
'Active(file): 100692 kB',
'Inactive(file): 90564 kB',
'Unevictable: 0 kB',
'Mlocked: 0 kB',
'SwapTotal: 407544 kB',
'SwapFree: 313436 kB',
'Dirty: 104 kB',
'Writeback: 0 kB',
'AnonPages: 185268 kB',
'Mapped: 9592 kB',
'Shmem: 68 kB',
'Slab: 61716 kB',
'SReclaimable: 46620 kB',
'SUnreclaim: 15096 kB',
'KernelStack: 1760 kB',
'PageTables: 8832 kB',
'NFS_Unstable: 0 kB',
'Bounce: 0 kB',
'WritebackTmp: 0 kB',
'CommitLimit: 660464 kB',
'Committed_AS: 565608 kB',
'VmallocTotal: 34359738367 kB',
'VmallocUsed: 266724 kB',
'VmallocChunk: 34359467156 kB',
'HardwareCorrupted: 0 kB',
'HugePages_Total: 0',
'HugePages_Free: 0',
'HugePages_Rsvd: 0',
'HugePages_Surp: 0',
'Hugepagesize: 2048 kB',
'DirectMap4k: 10240 kB',
'DirectMap2M: 514048 kB',
'']
meminfo_resp = {'WritebackTmp': '0 kB',
'SwapTotal': '407544 kB',
'Active(anon)': '94208 kB',
'SwapFree': '313436 kB',
'DirectMap4k': '10240 kB',
'KernelStack': '1760 kB',
'MemFree': '26588 kB',
'HugePages_Rsvd': '0',
'Committed_AS': '565608 kB',
'Active(file)': '100692 kB',
'NFS_Unstable': '0 kB',
'VmallocChunk': '34359467156 kB',
'Writeback': '0 kB',
'Inactive(file)': '90564 kB',
'MemTotal': '505840 kB',
'VmallocUsed': '266724 kB',
'HugePages_Free': '0',
'AnonPages': '185268 kB',
'Active': '194900 kB',
'Inactive(anon)': '102848 kB',
'CommitLimit': '660464 kB',
'Hugepagesize': '2048 kB',
'Cached': '146376 kB',
'SwapCached': '14736 kB',
'VmallocTotal': '34359738367 kB',
'Shmem': '68 kB',
'Mapped': '9592 kB',
'SUnreclaim': '15096 kB',
'Unevictable': '0 kB',
'SReclaimable': '46620 kB',
'Mlocked': '0 kB',
'DirectMap2M': '514048 kB',
'HugePages_Surp': '0',
'Bounce': '0 kB',
'Inactive': '193412 kB',
'PageTables': '8832 kB',
'HardwareCorrupted': '0 kB',
'HugePages_Total': '0',
'Slab': '61716 kB',
'Buffers': '44948 kB',
'Dirty': '104 kB'}
oart = OpenAndReadTester(meminfo_content)
rv = self.app.get_mem(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/meminfo', 'r'), {})])
self.assertEquals(rv, meminfo_resp)
def test_get_async_info(self):
from_cache_response = {'async_pending': 5}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_async_info()
self.assertEquals(rv, {'async_pending': 5})
def test_get_replication_info_account(self):
from_cache_response = {"replication_stats": {
"attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 1333044050.855202,
"success": 2, "ts_repl": 0 },
"replication_time": 0.2615511417388916,
"replication_last": 1357969645.25}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('account')
self.assertEquals(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last'],
'/var/cache/swift/account.recon'), {})])
self.assertEquals(rv, {"replication_stats": {
"attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 1333044050.855202,
"success": 2, "ts_repl": 0 },
"replication_time": 0.2615511417388916,
"replication_last": 1357969645.25})
def test_get_replication_info_container(self):
from_cache_response = {"replication_time": 200.0,
"replication_stats": {
"attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 5.5, "success": 358,
"ts_repl": 0},
"replication_last": 1357969645.25}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('container')
self.assertEquals(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last'],
'/var/cache/swift/container.recon'), {})])
self.assertEquals(rv, {"replication_time": 200.0,
"replication_stats": {
"attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 5.5, "success": 358,
"ts_repl": 0},
"replication_last": 1357969645.25})
def test_get_replication_object(self):
from_cache_response = {"object_replication_time": 200.0,
"object_replication_last": 1357962809.15}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('object')
self.assertEquals(self.fakecache.fakeout_calls,
[((['object_replication_time',
'object_replication_last'],
'/var/cache/swift/object.recon'), {})])
self.assertEquals(rv, {'object_replication_time': 200.0,
'object_replication_last': 1357962809.15})
def test_get_updater_info_container(self):
from_cache_response = {"container_updater_sweep": 18.476239919662476}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_updater_info('container')
self.assertEquals(self.fakecache.fakeout_calls,
[((['container_updater_sweep'],
'/var/cache/swift/container.recon'), {})])
self.assertEquals(rv, {"container_updater_sweep": 18.476239919662476})
def test_get_updater_info_object(self):
from_cache_response = {"object_updater_sweep": 0.79848217964172363}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_updater_info('object')
self.assertEquals(self.fakecache.fakeout_calls,
[((['object_updater_sweep'],
'/var/cache/swift/object.recon'), {})])
self.assertEquals(rv, {"object_updater_sweep": 0.79848217964172363})
def test_get_auditor_info_account(self):
from_cache_response = {"account_auditor_pass_completed": 0.24,
"account_audits_failed": 0,
"account_audits_passed": 6,
"account_audits_since": "1333145374.1373529"}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('account')
self.assertEquals(self.fakecache.fakeout_calls,
[((['account_audits_passed',
'account_auditor_pass_completed',
'account_audits_since',
'account_audits_failed'],
'/var/cache/swift/account.recon'), {})])
self.assertEquals(rv, {"account_auditor_pass_completed": 0.24,
"account_audits_failed": 0,
"account_audits_passed": 6,
"account_audits_since": "1333145374.1373529"})
def test_get_auditor_info_container(self):
from_cache_response = {"container_auditor_pass_completed": 0.24,
"container_audits_failed": 0,
"container_audits_passed": 6,
"container_audits_since": "1333145374.1373529"}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('container')
self.assertEquals(self.fakecache.fakeout_calls,
[((['container_audits_passed',
'container_auditor_pass_completed',
'container_audits_since',
'container_audits_failed'],
'/var/cache/swift/container.recon'), {})])
self.assertEquals(rv, {"container_auditor_pass_completed": 0.24,
"container_audits_failed": 0,
"container_audits_passed": 6,
"container_audits_since": "1333145374.1373529"})
def test_get_auditor_info_object(self):
from_cache_response = {"object_auditor_stats_ALL": {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0 },
"object_auditor_stats_ZBF": {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0 }}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('object')
self.assertEquals(self.fakecache.fakeout_calls,
[((['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
'/var/cache/swift/object.recon'), {})])
self.assertEquals(rv, {"object_auditor_stats_ALL": {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0 },
"object_auditor_stats_ZBF": {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0 }})
def test_get_unmounted(self):
def fake_checkmount_true(*args):
return True
unmounted_resp = [{'device': 'fakeone', 'mounted': False},
{'device': 'faketwo', 'mounted': False}]
self.mockos.ls_output=['fakeone', 'faketwo']
self.mockos.path_exists_output=False
real_checkmount = swift.common.constraints.check_mount
swift.common.constraints.check_mount = fake_checkmount_true
rv = self.app.get_unmounted()
swift.common.constraints.check_mount = real_checkmount
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node/',), {})])
self.assertEquals(rv, unmounted_resp)
def test_no_get_unmounted(self):
def fake_checkmount_true(*args):
return True
unmounted_resp = []
self.mockos.ls_output=[]
self.mockos.path_exists_output=False
real_checkmount = swift.common.constraints.check_mount
swift.common.constraints.check_mount = fake_checkmount_true
rv = self.app.get_unmounted()
swift.common.constraints.check_mount = real_checkmount
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node/',), {})])
self.assertEquals(rv, unmounted_resp)
def test_get_diskusage(self):
#posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185,
# f_bfree=1113075, f_bavail=1013351, f_files=498736,
# f_ffree=397839, f_favail=397839, f_flag=0,
# f_namemax=255)
statvfs_content=(4096, 4096, 1963185, 1113075, 1013351, 498736, 397839,
397839, 0, 255)
du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696,
'mounted': True, 'used': 3890520064, 'size': 8041205760}]
self.mockos.ls_output=['canhazdrive1']
self.mockos.statvfs_output=statvfs_content
self.mockos.path_exists_output=True
rv = self.app.get_diskusage()
self.assertEquals(self.mockos.statvfs_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEquals(rv, du_resp)
def test_get_diskusage_checkmount_fail(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': False, 'used': '', 'size': ''}]
self.mockos.ls_output=['canhazdrive1']
self.mockos.path_exists_output=False
rv = self.app.get_diskusage()
self.assertEquals(self.mockos.listdir_calls,[(('/srv/node/',), {})])
self.assertEquals(self.mockos.path_exists_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEquals(rv, du_resp)
def test_get_quarantine_count(self):
#posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4,
# st_uid=5, st_gid=6, st_size=7, st_atime=8,
# st_mtime=9, st_ctime=10)
lstat_content = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
self.mockos.ls_output=['sda']
self.mockos.path_exists_output=True
self.mockos.lstat_output=lstat_content
rv = self.app.get_quarantine_count()
self.assertEquals(rv, {'objects': 2, 'accounts': 2, 'containers': 2})
def test_get_socket_info(self):
sockstat_content = ['sockets: used 271',
'TCP: inuse 30 orphan 0 tw 0 alloc 31 mem 0',
'UDP: inuse 16 mem 4', 'UDPLITE: inuse 0',
'RAW: inuse 0', 'FRAG: inuse 0 memory 0',
'']
sockstat6_content = ['TCP6: inuse 1',
'UDP6: inuse 3',
'UDPLITE6: inuse 0',
'RAW6: inuse 0',
'FRAG6: inuse 0 memory 0',
'']
oart = OpenAndReadTester(sockstat_content)
rv = self.app.get_socket_info(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/net/sockstat', 'r'), {}),
(('/proc/net/sockstat6', 'r'), {})])
class TestReconMiddleware(unittest.TestCase):
def setUp(self):
self.frecon = FakeRecon()
self.app = recon.ReconMiddleware(FakeApp(), {'object_recon': "true"})
#self.app.object_recon = True
self.app.get_mem = self.frecon.fake_mem
self.app.get_load = self.frecon.fake_load
self.app.get_async_info = self.frecon.fake_async
self.app.get_device_info = self.frecon.fake_get_device_info
self.app.get_replication_info = self.frecon.fake_replication
self.app.get_auditor_info = self.frecon.fake_auditor
self.app.get_updater_info = self.frecon.fake_updater
self.app.get_expirer_info = self.frecon.fake_expirer
self.app.get_mounted = self.frecon.fake_mounted
self.app.get_unmounted = self.frecon.fake_unmounted
self.app.get_diskusage = self.frecon.fake_diskusage
self.app.get_ring_md5 = self.frecon.fake_ringmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
def test_recon_get_mem(self):
get_mem_resp = ['{"memtest": "1"}']
req = Request.blank('/recon/mem', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_mem_resp)
def test_recon_get_load(self):
get_load_resp = ['{"loadtest": "1"}']
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_load_resp)
def test_recon_get_async(self):
get_async_resp = ['{"asynctest": "1"}']
req = Request.blank('/recon/async', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_async_resp)
def test_get_device_info(self):
get_device_resp = ['{"/srv/1/node": ["sdb1"]}']
req = Request.blank('/recon/devices',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_device_resp)
def test_recon_get_replication_notype(self):
get_replication_resp = ['{"replicationtest": "1"}']
req = Request.blank('/recon/replication',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
self.assertEquals(self.frecon.fake_replication_rtype, 'object')
self.frecon.fake_replication_rtype = None
def test_recon_get_replication_all(self):
get_replication_resp = ['{"replicationtest": "1"}']
#test account
req = Request.blank('/recon/replication/account',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
self.assertEquals(self.frecon.fake_replication_rtype, 'account')
self.frecon.fake_replication_rtype = None
#test container
req = Request.blank('/recon/replication/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
self.assertEquals(self.frecon.fake_replication_rtype, 'container')
self.frecon.fake_replication_rtype = None
#test object
req = Request.blank('/recon/replication/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
self.assertEquals(self.frecon.fake_replication_rtype, 'object')
self.frecon.fake_replication_rtype = None
def test_recon_get_auditor_invalid(self):
get_auditor_resp = ['Invalid path: /recon/auditor/invalid']
req = Request.blank('/recon/auditor/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
def test_recon_get_auditor_notype(self):
get_auditor_resp = ['Invalid path: /recon/auditor']
req = Request.blank('/recon/auditor',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
def test_recon_get_auditor_all(self):
get_auditor_resp = ['{"auditortest": "1"}']
req = Request.blank('/recon/auditor/account',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
self.assertEquals(self.frecon.fake_auditor_rtype, 'account')
self.frecon.fake_auditor_rtype = None
req = Request.blank('/recon/auditor/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
self.assertEquals(self.frecon.fake_auditor_rtype, 'container')
self.frecon.fake_auditor_rtype = None
req = Request.blank('/recon/auditor/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
self.assertEquals(self.frecon.fake_auditor_rtype, 'object')
self.frecon.fake_auditor_rtype = None
def test_recon_get_updater_invalid(self):
get_updater_resp = ['Invalid path: /recon/updater/invalid']
req = Request.blank('/recon/updater/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
def test_recon_get_updater_notype(self):
get_updater_resp = ['Invalid path: /recon/updater']
req = Request.blank('/recon/updater',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
def test_recon_get_updater(self):
get_updater_resp = ['{"updatertest": "1"}']
req = Request.blank('/recon/updater/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(self.frecon.fake_updater_rtype, 'container')
self.frecon.fake_updater_rtype = None
self.assertEquals(resp, get_updater_resp)
req = Request.blank('/recon/updater/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
self.assertEquals(self.frecon.fake_updater_rtype, 'object')
self.frecon.fake_updater_rtype = None
def test_recon_get_expirer_invalid(self):
get_updater_resp = ['Invalid path: /recon/expirer/invalid']
req = Request.blank('/recon/expirer/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
def test_recon_get_expirer_notype(self):
get_updater_resp = ['Invalid path: /recon/expirer']
req = Request.blank('/recon/expirer',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
def test_recon_get_expirer_object(self):
get_expirer_resp = ['{"expirertest": "1"}']
req = Request.blank('/recon/expirer/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_expirer_resp)
self.assertEquals(self.frecon.fake_expirer_rtype, 'object')
self.frecon.fake_updater_rtype = None
def test_recon_get_mounted(self):
get_mounted_resp = ['{"mountedtest": "1"}']
req = Request.blank('/recon/mounted',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_mounted_resp)
def test_recon_get_unmounted(self):
get_unmounted_resp = ['{"unmountedtest": "1"}']
self.app.get_unmounted = self.frecon.fake_unmounted
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_unmounted_resp)
def test_recon_no_get_unmounted(self):
get_unmounted_resp = '[]'
self.app.get_unmounted = self.frecon.fake_no_unmounted
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = ''.join(self.app(req.environ, start_response))
self.assertEquals(resp, get_unmounted_resp)
def test_recon_get_diskusage(self):
get_diskusage_resp = ['{"diskusagetest": "1"}']
req = Request.blank('/recon/diskusage',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_diskusage_resp)
def test_recon_get_ringmd5(self):
get_ringmd5_resp = ['{"ringmd5test": "1"}']
req = Request.blank('/recon/ringmd5',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_ringmd5_resp)
def test_recon_get_quarantined(self):
get_quarantined_resp = ['{"quarantinedtest": "1"}']
req = Request.blank('/recon/quarantined',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_quarantined_resp)
def test_recon_get_sockstat(self):
get_sockstat_resp = ['{"sockstattest": "1"}']
req = Request.blank('/recon/sockstat',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_sockstat_resp)
def test_recon_invalid_path(self):
req = Request.blank('/recon/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, ['Invalid path: /recon/invalid'])
def test_no_content(self):
self.app.get_load = self.frecon.nocontent
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, ['Internal server error.'])
def test_recon_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, 'FAKE APP')
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix remote services: XML-RPC implementation
Based on standard package xmlrpclib
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.7
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 7)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# iPOPO decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Validate, \
Invalidate, Property, Provides
# Pelix constants
from pelix.utilities import to_str
import pelix.http
import pelix.remote
import pelix.remote.transport.commons as commons
# Standard library
import logging
# XML RPC modules
try:
# Python 3
# pylint: disable=F0401
from xmlrpc.server import SimpleXMLRPCDispatcher
import xmlrpc.client as xmlrpclib
except ImportError:
# Python 2
# pylint: disable=F0401
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
import xmlrpclib
# ------------------------------------------------------------------------------
XMLRPC_CONFIGURATION = 'xmlrpc'
""" Remote Service configuration constant """
PROP_XMLRPC_URL = '{0}.url'.format(XMLRPC_CONFIGURATION)
""" XML-RPC servlet URL """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class _XmlRpcServlet(SimpleXMLRPCDispatcher):
"""
A XML-RPC servlet that can be registered in the Pelix HTTP service
Calls the dispatch method given in the constructor
"""
def __init__(self, dispatch_method, encoding=None):
"""
Sets up the servlet
"""
SimpleXMLRPCDispatcher.__init__(self, allow_none=True,
encoding=encoding)
# Register the system.* functions
self.register_introspection_functions()
# Make a link to the dispatch method
self._dispatch_method = dispatch_method
def _simple_dispatch(self, name, params):
"""
Dispatch method
"""
try:
# Internal method
return self.funcs[name](*params)
except KeyError:
# Other method
pass
# Call the other method outside the except block, to avoid messy logs
# in case of error
return self._dispatch_method(name, params)
def do_POST(self, request, response):
"""
Handles a HTTP POST request
:param request: The HTTP request bean
:param request: The HTTP response handler
"""
# Get the request content
data = to_str(request.read_data())
# Dispatch
result = self._marshaled_dispatch(data, self._simple_dispatch)
# Send the result
response.send_content(200, result, 'text/xml')
# ------------------------------------------------------------------------------
@ComponentFactory(pelix.remote.FACTORY_TRANSPORT_XMLRPC_EXPORTER)
@Provides(pelix.remote.SERVICE_EXPORT_PROVIDER)
@Requires('_http', pelix.http.HTTP_SERVICE)
@Property('_path', pelix.http.HTTP_SERVLET_PATH, '/XML-RPC')
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED,
(XMLRPC_CONFIGURATION,))
class XmlRpcServiceExporter(commons.AbstractRpcServiceExporter):
"""
XML-RPC Remote Services exporter
"""
def __init__(self):
"""
Sets up the exporter
"""
# Call parent
super(XmlRpcServiceExporter, self).__init__()
# Handled configurations
self._kinds = None
# HTTP Service
self._http = None
self._path = None
# XML-RPC servlet
self._servlet = None
def get_access(self):
"""
Retrieves the URL to access this component
"""
port = self._http.get_access()[1]
return "http://{{server}}:{0}{1}".format(port, self._path)
def make_endpoint_properties(self, svc_ref, name, fw_uid):
"""
Prepare properties for the ExportEndpoint to be created
:param svc_ref: Service reference
:param name: Endpoint name
:param fw_uid: Framework UID
:return: A dictionary of extra endpoint properties
"""
return {PROP_XMLRPC_URL: self.get_access()}
@Validate
def validate(self, context):
"""
Component validated
"""
# Call parent
super(XmlRpcServiceExporter, self).validate(context)
# Create/register the servlet
self._servlet = _XmlRpcServlet(self.dispatch)
self._http.register_servlet(self._path, self._servlet)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# Unregister the servlet
self._http.unregister(None, self._servlet)
# Call parent
super(XmlRpcServiceExporter, self).invalidate(context)
# Clean up members
self._servlet = None
# ------------------------------------------------------------------------------
class _ServiceCallProxy(object):
"""
Service call proxy
"""
def __init__(self, name, url):
"""
Sets up the call proxy
:param name: End point name
:param url: End point URL
"""
self.__name = name
self.__url = url
def __getattr__(self, name):
"""
Prefixes the requested attribute name by the endpoint name
"""
# Make a proxy for this call
# This is an ugly trick to handle multithreaded calls, as the
# underlying proxy re-uses the same connection when possible: sometimes
# it means sending a request before retrieving a result
proxy = xmlrpclib.ServerProxy(self.__url, allow_none=True)
return getattr(proxy, "{0}.{1}".format(self.__name, name))
@ComponentFactory(pelix.remote.FACTORY_TRANSPORT_XMLRPC_IMPORTER)
@Provides(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER)
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED,
(XMLRPC_CONFIGURATION,))
class XmlRpcServiceImporter(commons.AbstractRpcServiceImporter):
"""
XML-RPC Remote Services importer
"""
def __init__(self):
"""
Sets up the exporter
"""
# Call parent
super(XmlRpcServiceImporter, self).__init__()
# Component properties
self._kinds = None
def make_service_proxy(self, endpoint):
"""
Creates the proxy for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
:return: A service proxy
"""
# Get the access URL
access_url = endpoint.properties.get(PROP_XMLRPC_URL)
if not access_url:
# No URL information
_logger.warning("No access URL given: %s", endpoint)
return
if endpoint.server is not None:
# Server information given
access_url = access_url.format(server=endpoint.server)
else:
# Use the local IP as the source server, just in case
local_server = "localhost"
access_url = access_url.format(server=local_server)
# Return the proxy
return _ServiceCallProxy(endpoint.name, access_url)
def clear_service_proxy(self, endpoint):
"""
Destroys the proxy made for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
"""
# Nothing to do
return
|
|
from typing import Any, List
import logging
from PyQt5 import QtCore, QtGui
from .devicestatuslogger import DeviceStatusLogger
from ....devices.device.variable import VariableType
from ..component import Component
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class DeviceLogManager(QtCore.QAbstractItemModel, Component):
_loggers: List[DeviceStatusLogger]
def __init__(self, **kwargs):
self._loggers = []
super().__init__(**kwargs)
def rowCount(self, parent: QtCore.QModelIndex = ...) -> int:
return len(self._loggers)
def columnCount(self, parent: QtCore.QModelIndex = ...) -> int:
return 5
def parent(self, child: QtCore.QModelIndex) -> QtCore.QModelIndex:
return QtCore.QModelIndex()
def index(self, row: int, column: int, parent: QtCore.QModelIndex = ...) -> QtCore.QModelIndex:
return self.createIndex(row, column, None)
def data(self, index: QtCore.QModelIndex, role: int = ...) -> Any:
lgr = self._loggers[index.row()]
if (index.column() == 0) and (role == QtCore.Qt.DisplayRole):
return lgr.name()
elif (index.column() == 1) and (role == QtCore.Qt.DisplayRole):
return lgr.fileName()
elif (index.column() == 2) and (role == QtCore.Qt.DisplayRole):
return f'{len(lgr)}'
elif (index.column() == 3) and (role == QtCore.Qt.DisplayRole):
return f'{lgr.period()}'
elif (index.column() == 4) and (role == QtCore.Qt.DisplayRole):
return 'Running' if lgr.isRecording() else 'Stopped'
elif (index.column() == 4) and (role == QtCore.Qt.DecorationRole):
return QtGui.QIcon(QtGui.QPixmap(':/icons/start.svg' if lgr.isRecording() else ':/icons/stop.svg'))
elif (index.column() == 0) and (role == QtCore.Qt.EditRole):
return lgr.name()
elif (index.column() == 1) and (role == QtCore.Qt.EditRole):
return lgr.fileName()
elif (index.column() == 2) and (role == QtCore.Qt.EditRole):
return None
elif (index.column() == 3) and (role == QtCore.Qt.EditRole):
return lgr.period()
elif (index.column() == 4) and (role == QtCore.Qt.EditRole):
return lgr.isRecording()
return None
def setData(self, index: QtCore.QModelIndex, value: Any, role: int = ...) -> bool:
lgr = self._loggers[index.row()]
if (index.column() == 0) and (role == QtCore.Qt.EditRole):
lgr.setName(value)
self.dataChanged.emit(index, index)
self.saveToConfig()
return True
if (index.column() == 1) and (role == QtCore.Qt.EditRole):
lgr.setFileName(value)
self.dataChanged.emit(index, index)
self.saveToConfig()
return True
elif (index.column() == 3) and (role == QtCore.Qt.EditRole):
try:
lgr.setPeriod(float(value))
except ValueError:
return False
self.dataChanged.emit(index, index)
self.saveToConfig()
return True
elif (index.column() == 4) and (role == QtCore.Qt.EditRole):
if value:
lgr.startRecording()
else:
lgr.stopRecording()
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index: QtCore.QModelIndex) -> QtCore.Qt.ItemFlag:
if (index.column() in [0, 1, 3]) and not self._loggers[index.row()].isRecording():
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsEditable
elif index.column() == 4:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsEnabled
else:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemNeverHasChildren
def removeRows(self, row: int, count: int, parent: QtCore.QModelIndex = ...) -> bool:
self.beginRemoveRows(parent, row, row + count - 1)
for lgr in self._loggers[row:row+count]:
try:
lgr.deleteLater()
except RuntimeError:
pass
del self._loggers[row:row + count]
self.endRemoveRows()
self.saveToConfig()
return True
def removeRow(self, row: int, parent: QtCore.QModelIndex = ...) -> bool:
return self.removeRows(row, 1, parent)
def insertRows(self, row: int, count: int, parent: QtCore.QModelIndex = ...) -> bool:
self.beginInsertRows(parent, row, row+count-1)
for i in range(count):
nameindex = 1
while (name := f'Untitled_{nameindex}') in [lgr.name() for lgr in self._loggers]:
nameindex += 1
lgr = DeviceStatusLogger(self.instrument.devicemanager, name=name)
lgr.rowsInserted.connect(self.saveToConfig)
lgr.rowsRemoved.connect(self.saveToConfig)
lgr.modelReset.connect(self.saveToConfig)
lgr.destroyed.connect(self.onLoggerDestroyed)
self._loggers.insert(row+i, lgr)
self.endInsertRows()
self.saveToConfig()
return True
def insertRow(self, row: int, parent: QtCore.QModelIndex = ...) -> bool:
return self.insertRows(row, 1, parent)
def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = ...) -> Any:
if (role == QtCore.Qt.DisplayRole) and (orientation == QtCore.Qt.Horizontal):
return ['Name', 'File name', 'Variable count', 'Period', 'Running?'][section]
def loadFromConfig(self):
if 'deviceloggers' not in self.config:
self.config['deviceloggers'] = {}
self.beginResetModel()
try:
self._loggers = []
for loggerkey in sorted(self.config['deviceloggers']):
lgr = DeviceStatusLogger(
self.instrument.devicemanager,
self.config['deviceloggers'][loggerkey]['filename'],
float(self.config['deviceloggers'][loggerkey]['period']), loggerkey)
for devname, varname, vartype, scaling in self.config['deviceloggers'][loggerkey]['variables']:
lgr.addRecordedVariable(devname, varname, scaling, vartype=VariableType[vartype])
lgr.rowsInserted.connect(self.saveToConfig)
lgr.rowsRemoved.connect(self.saveToConfig)
lgr.modelReset.connect(self.saveToConfig)
lgr.destroyed.connect(self.onLoggerDestroyed)
self._loggers.append(lgr)
finally:
self.endResetModel()
def saveToConfig(self):
self.config['deviceloggers'] = {}
for key in list(self.config['deviceloggers'].keys()):
# Config is somewhat counterintuitive here, assigning a {} does not make it empty, only updates it
del self.config['deviceloggers'][key]
for i, lgr in enumerate(self._loggers):
logger.debug(f'Saving logger {lgr.name()}')
self.config['deviceloggers'][lgr.name()] = {
'filename': lgr.fileName(),
'period': lgr.period(),
'variables': lgr.variables(),
}
logger.debug(f'Loggers saved to config: {self.config["deviceloggers"].keys()}')
def startAll(self):
for lgr in self._loggers:
if not lgr.isRecording():
lgr.startRecording()
def stopAll(self):
for lgr in self._loggers:
if lgr.isRecording():
lgr.stopRecording()
def startComponent(self):
self.startAll()
super().startComponent()
def stopComponent(self):
self.stopAll()
return super().stopComponent()
def __getitem__(self, item: int) -> DeviceStatusLogger:
return self._loggers[item]
def __len__(self) -> int:
return len(self._loggers)
def onLoggerDestroyed(self):
while True:
for i, lgr in enumerate(self._loggers[:]):
try:
lgr.objectName()
except RuntimeError:
self.removeRow(i, QtCore.QModelIndex())
break
else:
break
|
|
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import close_all_sessions
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import declared_attr
from sqlalchemy.orm import deferred
from sqlalchemy.orm import relationship
from sqlalchemy.orm.decl_api import registry
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
Base = None
class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults):
def setup_test(self):
global Base
self.mapper_registry = registry()
Base = self.mapper_registry.generate_base()
def teardown_test(self):
close_all_sessions()
self.mapper_registry.dispose()
Base.metadata.drop_all(testing.db)
class DeclarativeInheritanceTest(DeclarativeTestBase):
def test_we_must_copy_mapper_args(self):
class Person(Base):
__tablename__ = "people"
id = Column(Integer, primary_key=True)
discriminator = Column("type", String(50))
__mapper_args__ = {
"polymorphic_on": discriminator,
"polymorphic_identity": "person",
}
class Engineer(Person):
primary_language = Column(String(50))
assert "inherits" not in Person.__mapper_args__
assert class_mapper(Engineer).polymorphic_identity is None
assert class_mapper(Engineer).polymorphic_on is Person.__table__.c.type
def test_we_must_only_copy_column_mapper_args(self):
class Person(Base):
__tablename__ = "people"
id = Column(Integer, primary_key=True)
a = Column(Integer)
b = Column(Integer)
c = Column(Integer)
d = Column(Integer)
discriminator = Column("type", String(50))
__mapper_args__ = {
"polymorphic_on": discriminator,
"polymorphic_identity": "person",
"version_id_col": "a",
"column_prefix": "bar",
"include_properties": ["id", "a", "b"],
}
assert class_mapper(Person).version_id_col == "a"
assert class_mapper(Person).include_properties == set(["id", "a", "b"])
def test_custom_join_condition(self):
class Foo(Base):
__tablename__ = "foo"
id = Column("id", Integer, primary_key=True)
class Bar(Foo):
__tablename__ = "bar"
bar_id = Column("id", Integer, primary_key=True)
foo_id = Column("foo_id", Integer)
__mapper_args__ = {"inherit_condition": foo_id == Foo.id}
# compile succeeds because inherit_condition is honored
configure_mappers()
def test_joined(self):
class Company(Base, fixtures.ComparableEntity):
__tablename__ = "companies"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
employees = relationship("Person")
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
company_id = Column(
"company_id", Integer, ForeignKey("companies.id")
)
name = Column("name", String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__tablename__ = "engineers"
__mapper_args__ = {"polymorphic_identity": "engineer"}
id = Column(
"id", Integer, ForeignKey("people.id"), primary_key=True
)
primary_language = Column("primary_language", String(50))
class Manager(Person):
__tablename__ = "managers"
__mapper_args__ = {"polymorphic_identity": "manager"}
id = Column(
"id", Integer, ForeignKey("people.id"), primary_key=True
)
golf_swing = Column("golf_swing", String(50))
Base.metadata.create_all(testing.db)
sess = fixture_session()
c1 = Company(
name="MegaCorp, Inc.",
employees=[
Engineer(name="dilbert", primary_language="java"),
Engineer(name="wally", primary_language="c++"),
Manager(name="dogbert", golf_swing="fore!"),
],
)
c2 = Company(
name="Elbonia, Inc.",
employees=[Engineer(name="vlad", primary_language="cobol")],
)
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Company)
.filter(
Company.employees.of_type(Engineer).any(
Engineer.primary_language == "cobol"
)
)
.first(),
c2,
)
# ensure that the Manager mapper was compiled with the Manager id
# column as higher priority. this ensures that "Manager.id"
# is appropriately treated as the "id" column in the "manager"
# table (reversed from 0.6's behavior.)
eq_(
Manager.id.property.columns,
[Manager.__table__.c.id, Person.__table__.c.id],
)
# assert that the "id" column is available without a second
# load. as of 0.7, the ColumnProperty tests all columns
# in its list to see which is present in the row.
sess.expunge_all()
def go():
assert (
sess.query(Manager).filter(Manager.name == "dogbert").one().id
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
assert (
sess.query(Person).filter(Manager.name == "dogbert").one().id
)
self.assert_sql_count(testing.db, go, 1)
def test_add_subcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__tablename__ = "engineers"
__mapper_args__ = {"polymorphic_identity": "engineer"}
id = Column(
"id", Integer, ForeignKey("people.id"), primary_key=True
)
Engineer.primary_language = Column("primary_language", String(50))
Base.metadata.create_all(testing.db)
sess = fixture_session()
e1 = Engineer(primary_language="java", name="dilbert")
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Person).first(),
Engineer(primary_language="java", name="dilbert"),
)
def test_add_parentcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__tablename__ = "engineers"
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language = Column(String(50))
id = Column(
"id", Integer, ForeignKey("people.id"), primary_key=True
)
Person.name = Column("name", String(50))
Base.metadata.create_all(testing.db)
sess = fixture_session()
e1 = Engineer(primary_language="java", name="dilbert")
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Person).first(),
Engineer(primary_language="java", name="dilbert"),
)
def test_add_sub_parentcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__tablename__ = "engineers"
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language = Column(String(50))
id = Column(
"id", Integer, ForeignKey("people.id"), primary_key=True
)
class Admin(Engineer):
__tablename__ = "admins"
__mapper_args__ = {"polymorphic_identity": "admin"}
workstation = Column(String(50))
id = Column(
"id", Integer, ForeignKey("engineers.id"), primary_key=True
)
Person.name = Column("name", String(50))
Base.metadata.create_all(testing.db)
sess = fixture_session()
e1 = Admin(primary_language="java", name="dilbert", workstation="foo")
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Person).first(),
Admin(primary_language="java", name="dilbert", workstation="foo"),
)
def test_subclass_mixin(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column("id", Integer, primary_key=True)
name = Column("name", String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class MyMixin(object):
pass
class Engineer(MyMixin, Person):
__tablename__ = "engineers"
__mapper_args__ = {"polymorphic_identity": "engineer"}
id = Column(
"id", Integer, ForeignKey("people.id"), primary_key=True
)
primary_language = Column("primary_language", String(50))
assert class_mapper(Engineer).inherits is class_mapper(Person)
def test_intermediate_abstract_class_on_classical(self):
class Person(object):
pass
person_table = Table(
"people",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("kind", String(50)),
)
self.mapper_registry.map_imperatively(
Person,
person_table,
polymorphic_on="kind",
polymorphic_identity="person",
)
class SpecialPerson(Person):
__abstract__ = True
class Manager(SpecialPerson, Base):
__tablename__ = "managers"
id = Column(Integer, ForeignKey(Person.id), primary_key=True)
__mapper_args__ = {"polymorphic_identity": "manager"}
from sqlalchemy import inspect
assert inspect(Manager).inherits is inspect(Person)
eq_(set(class_mapper(Person).class_manager), {"id", "kind"})
eq_(set(class_mapper(Manager).class_manager), {"id", "kind"})
def test_intermediate_unmapped_class_on_classical(self):
class Person(object):
pass
person_table = Table(
"people",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("kind", String(50)),
)
self.mapper_registry.map_imperatively(
Person,
person_table,
polymorphic_on="kind",
polymorphic_identity="person",
)
class SpecialPerson(Person):
pass
class Manager(SpecialPerson, Base):
__tablename__ = "managers"
id = Column(Integer, ForeignKey(Person.id), primary_key=True)
__mapper_args__ = {"polymorphic_identity": "manager"}
from sqlalchemy import inspect
assert inspect(Manager).inherits is inspect(Person)
eq_(set(class_mapper(Person).class_manager), {"id", "kind"})
eq_(set(class_mapper(Manager).class_manager), {"id", "kind"})
def test_class_w_invalid_multiple_bases(self):
class Person(object):
pass
person_table = Table(
"people",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("kind", String(50)),
)
self.mapper_registry.map_imperatively(
Person,
person_table,
polymorphic_on="kind",
polymorphic_identity="person",
)
class DeclPerson(Base):
__tablename__ = "decl_people"
id = Column(Integer, primary_key=True)
kind = Column(String(50))
class SpecialPerson(Person):
pass
def go():
class Manager(SpecialPerson, DeclPerson):
__tablename__ = "managers"
id = Column(
Integer, ForeignKey(DeclPerson.id), primary_key=True
)
__mapper_args__ = {"polymorphic_identity": "manager"}
assert_raises_message(
sa.exc.InvalidRequestError,
r"Class .*Manager.* has multiple mapped "
r"bases: \[.*Person.*DeclPerson.*\]",
go,
)
def test_with_undefined_foreignkey(self):
class Parent(Base):
__tablename__ = "parent"
id = Column("id", Integer, primary_key=True)
tp = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=tp)
class Child1(Parent):
__tablename__ = "child1"
id = Column(
"id", Integer, ForeignKey("parent.id"), primary_key=True
)
related_child2 = Column("c2", Integer, ForeignKey("child2.id"))
__mapper_args__ = dict(polymorphic_identity="child1")
# no exception is raised by the ForeignKey to "child2" even
# though child2 doesn't exist yet
class Child2(Parent):
__tablename__ = "child2"
id = Column(
"id", Integer, ForeignKey("parent.id"), primary_key=True
)
related_child1 = Column("c1", Integer)
__mapper_args__ = dict(polymorphic_identity="child2")
sa.orm.configure_mappers() # no exceptions here
def test_foreign_keys_with_col(self):
"""Test that foreign keys that reference a literal 'id' subclass
'id' attribute behave intuitively.
See [ticket:1892].
"""
class Booking(Base):
__tablename__ = "booking"
id = Column(Integer, primary_key=True)
class PlanBooking(Booking):
__tablename__ = "plan_booking"
id = Column(Integer, ForeignKey(Booking.id), primary_key=True)
# referencing PlanBooking.id gives us the column
# on plan_booking, not booking
class FeatureBooking(Booking):
__tablename__ = "feature_booking"
id = Column(Integer, ForeignKey(Booking.id), primary_key=True)
plan_booking_id = Column(Integer, ForeignKey(PlanBooking.id))
plan_booking = relationship(
PlanBooking, backref="feature_bookings"
)
assert FeatureBooking.__table__.c.plan_booking_id.references(
PlanBooking.__table__.c.id
)
assert FeatureBooking.__table__.c.id.references(Booking.__table__.c.id)
def test_single_colsonbase(self):
"""test single inheritance where all the columns are on the base
class."""
class Company(Base, fixtures.ComparableEntity):
__tablename__ = "companies"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
employees = relationship("Person")
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
company_id = Column(
"company_id", Integer, ForeignKey("companies.id")
)
name = Column("name", String(50))
discriminator = Column("type", String(50))
primary_language = Column("primary_language", String(50))
golf_swing = Column("golf_swing", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__mapper_args__ = {"polymorphic_identity": "engineer"}
class Manager(Person):
__mapper_args__ = {"polymorphic_identity": "manager"}
Base.metadata.create_all(testing.db)
sess = fixture_session()
c1 = Company(
name="MegaCorp, Inc.",
employees=[
Engineer(name="dilbert", primary_language="java"),
Engineer(name="wally", primary_language="c++"),
Manager(name="dogbert", golf_swing="fore!"),
],
)
c2 = Company(
name="Elbonia, Inc.",
employees=[Engineer(name="vlad", primary_language="cobol")],
)
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Person)
.filter(Engineer.primary_language == "cobol")
.first(),
Engineer(name="vlad"),
)
eq_(
sess.query(Company)
.filter(
Company.employees.of_type(Engineer).any(
Engineer.primary_language == "cobol"
)
)
.first(),
c2,
)
def test_single_colsonsub(self):
"""test single inheritance where the columns are local to their
class.
this is a newer usage.
"""
class Company(Base, fixtures.ComparableEntity):
__tablename__ = "companies"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
employees = relationship("Person")
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
company_id = Column(Integer, ForeignKey("companies.id"))
name = Column(String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language = Column(String(50))
class Manager(Person):
__mapper_args__ = {"polymorphic_identity": "manager"}
golf_swing = Column(String(50))
# we have here a situation that is somewhat unique. the Person
# class is mapped to the "people" table, but it was mapped when
# the table did not include the "primary_language" or
# "golf_swing" columns. declarative will also manipulate the
# exclude_properties collection so that sibling classes don't
# cross-pollinate.
assert Person.__table__.c.company_id is not None
assert Person.__table__.c.golf_swing is not None
assert Person.__table__.c.primary_language is not None
assert Engineer.primary_language is not None
assert Manager.golf_swing is not None
assert not hasattr(Person, "primary_language")
assert not hasattr(Person, "golf_swing")
assert not hasattr(Engineer, "golf_swing")
assert not hasattr(Manager, "primary_language")
Base.metadata.create_all(testing.db)
sess = fixture_session()
e1 = Engineer(name="dilbert", primary_language="java")
e2 = Engineer(name="wally", primary_language="c++")
m1 = Manager(name="dogbert", golf_swing="fore!")
c1 = Company(name="MegaCorp, Inc.", employees=[e1, e2, m1])
e3 = Engineer(name="vlad", primary_language="cobol")
c2 = Company(name="Elbonia, Inc.", employees=[e3])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Person)
.filter(Engineer.primary_language == "cobol")
.first(),
Engineer(name="vlad"),
)
eq_(
sess.query(Company)
.filter(
Company.employees.of_type(Engineer).any(
Engineer.primary_language == "cobol"
)
)
.first(),
c2,
)
eq_(
sess.query(Engineer).filter_by(primary_language="cobol").one(),
Engineer(name="vlad", primary_language="cobol"),
)
def test_single_cols_on_sub_base_of_joined(self):
"""test [ticket:3895]"""
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
type = Column(String)
__mapper_args__ = {"polymorphic_on": type}
class Contractor(Person):
contractor_field = Column(String)
__mapper_args__ = {"polymorphic_identity": "contractor"}
class Employee(Person):
__tablename__ = "employee"
id = Column(Integer, ForeignKey(Person.id), primary_key=True)
class Engineer(Employee):
__mapper_args__ = {"polymorphic_identity": "engineer"}
configure_mappers()
is_false(hasattr(Person, "contractor_field"))
is_true(hasattr(Contractor, "contractor_field"))
is_false(hasattr(Employee, "contractor_field"))
is_false(hasattr(Engineer, "contractor_field"))
def test_single_cols_on_sub_to_joined(self):
"""test [ticket:3797]"""
class BaseUser(Base):
__tablename__ = "root"
id = Column(Integer, primary_key=True)
row_type = Column(String)
__mapper_args__ = {
"polymorphic_on": row_type,
"polymorphic_identity": "baseuser",
}
class User(BaseUser):
__tablename__ = "user"
__mapper_args__ = {"polymorphic_identity": "user"}
baseuser_id = Column(
Integer, ForeignKey("root.id"), primary_key=True
)
class Bat(Base):
__tablename__ = "bat"
id = Column(Integer, primary_key=True)
class Thing(Base):
__tablename__ = "thing"
id = Column(Integer, primary_key=True)
owner_id = Column(Integer, ForeignKey("user.baseuser_id"))
owner = relationship("User")
class SubUser(User):
__mapper_args__ = {"polymorphic_identity": "subuser"}
sub_user_custom_thing = Column(Integer, ForeignKey("bat.id"))
eq_(
User.__table__.foreign_keys,
User.baseuser_id.foreign_keys.union(
SubUser.sub_user_custom_thing.foreign_keys
),
)
is_true(
Thing.owner.property.primaryjoin.compare(
Thing.owner_id == User.baseuser_id
)
)
def test_single_constraint_on_sub(self):
"""test the somewhat unusual case of [ticket:3341]"""
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language = Column(String(50))
__hack_args_one__ = sa.UniqueConstraint(
Person.name, primary_language
)
__hack_args_two__ = sa.CheckConstraint(
Person.name != primary_language
)
uq = [
c
for c in Person.__table__.constraints
if isinstance(c, sa.UniqueConstraint)
][0]
ck = [
c
for c in Person.__table__.constraints
if isinstance(c, sa.CheckConstraint)
][0]
eq_(
list(uq.columns),
[Person.__table__.c.name, Person.__table__.c.primary_language],
)
eq_(
list(ck.columns),
[Person.__table__.c.name, Person.__table__.c.primary_language],
)
@testing.skip_if(
lambda: testing.against("oracle"),
"Test has an empty insert in it at the moment",
)
def test_columns_single_inheritance_conflict_resolution(self):
"""Test that a declared_attr can return the existing column and it will
be ignored. this allows conditional columns to be added.
See [ticket:2472].
"""
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
class Engineer(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
"target_id", Column(Integer, ForeignKey("other.id"))
)
@declared_attr
def target(cls):
return relationship("Other")
class Manager(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
"target_id", Column(Integer, ForeignKey("other.id"))
)
@declared_attr
def target(cls):
return relationship("Other")
class Other(Base):
__tablename__ = "other"
id = Column(Integer, primary_key=True)
is_(
Engineer.target_id.property.columns[0],
Person.__table__.c.target_id,
)
is_(
Manager.target_id.property.columns[0], Person.__table__.c.target_id
)
# do a brief round trip on this
Base.metadata.create_all(testing.db)
session = fixture_session()
o1, o2 = Other(), Other()
session.add_all(
[Engineer(target=o1), Manager(target=o2), Manager(target=o1)]
)
session.commit()
eq_(session.query(Engineer).first().target, o1)
def test_columns_single_inheritance_conflict_resolution_pk(self):
"""Test #2472 in terms of a primary key column. This is
#4352.
"""
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
target_id = Column(Integer, primary_key=True)
class Engineer(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
"target_id", Column(Integer, primary_key=True)
)
class Manager(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
"target_id", Column(Integer, primary_key=True)
)
is_(
Engineer.target_id.property.columns[0],
Person.__table__.c.target_id,
)
is_(
Manager.target_id.property.columns[0], Person.__table__.c.target_id
)
def test_columns_single_inheritance_cascading_resolution_pk(self):
"""An additional test for #4352 in terms of the requested use case."""
class TestBase(Base):
__abstract__ = True
@declared_attr.cascading
def id(cls):
col_val = None
if TestBase not in cls.__bases__:
col_val = cls.__table__.c.get("id")
if col_val is None:
col_val = Column(Integer, primary_key=True)
return col_val
class Person(TestBase):
"""single table base class"""
__tablename__ = "person"
class Engineer(Person):
"""single table inheritance, no extra cols"""
class Manager(Person):
"""single table inheritance, no extra cols"""
is_(Engineer.id.property.columns[0], Person.__table__.c.id)
is_(Manager.id.property.columns[0], Person.__table__.c.id)
def test_joined_from_single(self):
class Company(Base, fixtures.ComparableEntity):
__tablename__ = "companies"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
employees = relationship("Person")
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
company_id = Column(Integer, ForeignKey("companies.id"))
name = Column(String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Manager(Person):
__mapper_args__ = {"polymorphic_identity": "manager"}
golf_swing = Column(String(50))
class Engineer(Person):
__tablename__ = "engineers"
__mapper_args__ = {"polymorphic_identity": "engineer"}
id = Column(Integer, ForeignKey("people.id"), primary_key=True)
primary_language = Column(String(50))
assert Person.__table__.c.golf_swing is not None
assert "primary_language" not in Person.__table__.c
assert Engineer.__table__.c.primary_language is not None
assert Engineer.primary_language is not None
assert Manager.golf_swing is not None
assert not hasattr(Person, "primary_language")
assert not hasattr(Person, "golf_swing")
assert not hasattr(Engineer, "golf_swing")
assert not hasattr(Manager, "primary_language")
Base.metadata.create_all(testing.db)
sess = fixture_session()
e1 = Engineer(name="dilbert", primary_language="java")
e2 = Engineer(name="wally", primary_language="c++")
m1 = Manager(name="dogbert", golf_swing="fore!")
c1 = Company(name="MegaCorp, Inc.", employees=[e1, e2, m1])
e3 = Engineer(name="vlad", primary_language="cobol")
c2 = Company(name="Elbonia, Inc.", employees=[e3])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Person)
.with_polymorphic(Engineer)
.filter(Engineer.primary_language == "cobol")
.first(),
Engineer(name="vlad"),
)
eq_(
sess.query(Company)
.filter(
Company.employees.of_type(Engineer).any(
Engineer.primary_language == "cobol"
)
)
.first(),
c2,
)
eq_(
sess.query(Engineer).filter_by(primary_language="cobol").one(),
Engineer(name="vlad", primary_language="cobol"),
)
def test_single_from_joined_colsonsub(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Manager(Person):
__tablename__ = "manager"
__mapper_args__ = {"polymorphic_identity": "manager"}
id = Column(Integer, ForeignKey("people.id"), primary_key=True)
golf_swing = Column(String(50))
class Boss(Manager):
boss_name = Column(String(50))
is_(
Boss.__mapper__.column_attrs["boss_name"].columns[0],
Manager.__table__.c.boss_name,
)
def test_polymorphic_on_converted_from_inst(self):
class A(Base):
__tablename__ = "A"
id = Column(Integer, primary_key=True)
discriminator = Column(String)
@declared_attr
def __mapper_args__(cls):
return {
"polymorphic_identity": cls.__name__,
"polymorphic_on": cls.discriminator,
}
class B(A):
pass
is_(B.__mapper__.polymorphic_on, A.__table__.c.discriminator)
def test_add_deferred(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
Person.name = deferred(Column(String(10)))
Base.metadata.create_all(testing.db)
sess = fixture_session()
p = Person(name="ratbert")
sess.add(p)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).all(), [Person(name="ratbert")])
sess.expunge_all()
person = sess.query(Person).filter(Person.name == "ratbert").one()
assert "name" not in person.__dict__
def test_single_fksonsub(self):
"""test single inheritance with a foreign key-holding column on
a subclass.
"""
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language_id = Column(Integer, ForeignKey("languages.id"))
primary_language = relationship("Language")
class Language(Base, fixtures.ComparableEntity):
__tablename__ = "languages"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
assert not hasattr(Person, "primary_language_id")
Base.metadata.create_all(testing.db)
sess = fixture_session()
java, cpp, cobol = (
Language(name="java"),
Language(name="cpp"),
Language(name="cobol"),
)
e1 = Engineer(name="dilbert", primary_language=java)
e2 = Engineer(name="wally", primary_language=cpp)
e3 = Engineer(name="vlad", primary_language=cobol)
sess.add_all([e1, e2, e3])
sess.flush()
sess.expunge_all()
eq_(
sess.query(Person)
.filter(Engineer.primary_language.has(Language.name == "cobol"))
.first(),
Engineer(name="vlad", primary_language=Language(name="cobol")),
)
eq_(
sess.query(Engineer)
.filter(Engineer.primary_language.has(Language.name == "cobol"))
.one(),
Engineer(name="vlad", primary_language=Language(name="cobol")),
)
eq_(
sess.query(Person)
.join(Engineer.primary_language)
.order_by(Language.name)
.all(),
[
Engineer(name="vlad", primary_language=Language(name="cobol")),
Engineer(name="wally", primary_language=Language(name="cpp")),
Engineer(
name="dilbert", primary_language=Language(name="java")
),
],
)
def test_single_three_levels(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column(Integer, primary_key=True)
name = Column(String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language = Column(String(50))
class JuniorEngineer(Engineer):
__mapper_args__ = {"polymorphic_identity": "junior_engineer"}
nerf_gun = Column(String(50))
class Manager(Person):
__mapper_args__ = {"polymorphic_identity": "manager"}
golf_swing = Column(String(50))
assert JuniorEngineer.nerf_gun
assert JuniorEngineer.primary_language
assert JuniorEngineer.name
assert Manager.golf_swing
assert Engineer.primary_language
assert not hasattr(Engineer, "golf_swing")
assert not hasattr(Engineer, "nerf_gun")
assert not hasattr(Manager, "nerf_gun")
assert not hasattr(Manager, "primary_language")
def test_single_detects_conflict(self):
class Person(Base):
__tablename__ = "people"
id = Column(Integer, primary_key=True)
name = Column(String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Engineer(Person):
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language = Column(String(50))
# test sibling col conflict
def go():
class Manager(Person):
__mapper_args__ = {"polymorphic_identity": "manager"}
golf_swing = Column(String(50))
primary_language = Column(String(50))
assert_raises(sa.exc.ArgumentError, go)
# test parent col conflict
def go():
class Salesman(Person):
__mapper_args__ = {"polymorphic_identity": "manager"}
name = Column(String(50))
assert_raises(sa.exc.ArgumentError, go)
def test_single_no_special_cols(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column("id", Integer, primary_key=True)
name = Column("name", String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
def go():
class Engineer(Person):
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language = Column("primary_language", String(50))
foo_bar = Column(Integer, primary_key=True)
assert_raises_message(sa.exc.ArgumentError, "place primary key", go)
def test_single_no_table_args(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = "people"
id = Column("id", Integer, primary_key=True)
name = Column("name", String(50))
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
def go():
class Engineer(Person):
__mapper_args__ = {"polymorphic_identity": "engineer"}
primary_language = Column("primary_language", String(50))
# this should be on the Person class, as this is single
# table inheritance, which is why we test that this
# throws an exception!
__table_args__ = {"mysql_engine": "InnoDB"}
assert_raises_message(sa.exc.ArgumentError, "place __table_args__", go)
@testing.emits_warning("This declarative")
def test_dupe_name_in_hierarchy(self):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
a_1 = A
class A(a_1):
__tablename__ = "b"
id = Column(Integer(), ForeignKey(a_1.id), primary_key=True)
assert A.__mapper__.inherits is a_1.__mapper__
class OverlapColPrecedenceTest(DeclarativeTestBase):
"""test #1892 cases when declarative does column precedence."""
def _run_test(self, Engineer, e_id, p_id):
p_table = Base.metadata.tables["person"]
e_table = Base.metadata.tables["engineer"]
assert Engineer.id.property.columns[0] is e_table.c[e_id]
assert Engineer.id.property.columns[1] is p_table.c[p_id]
def test_basic(self):
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
class Engineer(Person):
__tablename__ = "engineer"
id = Column(Integer, ForeignKey("person.id"), primary_key=True)
self._run_test(Engineer, "id", "id")
def test_alt_name_base(self):
class Person(Base):
__tablename__ = "person"
id = Column("pid", Integer, primary_key=True)
class Engineer(Person):
__tablename__ = "engineer"
id = Column(Integer, ForeignKey("person.pid"), primary_key=True)
self._run_test(Engineer, "id", "pid")
def test_alt_name_sub(self):
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
class Engineer(Person):
__tablename__ = "engineer"
id = Column(
"eid", Integer, ForeignKey("person.id"), primary_key=True
)
self._run_test(Engineer, "eid", "id")
def test_alt_name_both(self):
class Person(Base):
__tablename__ = "person"
id = Column("pid", Integer, primary_key=True)
class Engineer(Person):
__tablename__ = "engineer"
id = Column(
"eid", Integer, ForeignKey("person.pid"), primary_key=True
)
self._run_test(Engineer, "eid", "pid")
|
|
import pytest
from django.urls import reverse
from cfp.choices import TALK_DURATIONS
from cfp.logic import accept_application
from cfp.models import CallForPaper, PaperApplication, AudienceSkillLevel
from talks.models import Talk
from tests.factories import PaperApplicationFactory
from workshops.models import Workshop
@pytest.mark.django_db
def test_cfp_announcement_view_active(client, active_cfp):
url = reverse('cfp_announcement')
response = client.get(url)
content = response.content.decode(response.charset)
assert response.status_code == 200
assert "Submit a talk" in content
assert active_cfp.announcement in content
@pytest.mark.django_db
def test_cfp_announcement_view_inactive(client, past_cfp):
url = reverse('cfp_announcement')
response = client.get(url)
content = response.content.decode(response.charset)
assert response.status_code == 200
assert "The CFP is now closed" in content
assert past_cfp.announcement in content
@pytest.mark.django_db
def test_cfp_announcement_view_when_no_cfp_exists(client):
url = reverse('cfp_announcement')
response = client.get(url)
content = response.content.decode(response.charset)
assert response.status_code == 200
assert CallForPaper.objects.count() == 0
assert "There are no active calls for paper" in content
@pytest.mark.django_db
def test_GET_create_application(user, applicant, client, active_cfp):
url = reverse('application_create')
response = client.get(url)
content = response.content.decode(response.charset)
# User needs to log in for this page
assert response.status_code == 302
assert response.url.startswith(reverse('account_login'))
client.login(username=user.email, password='webcamp')
response = client.get(url)
content = response.content.decode(response.charset)
assert response.status_code == 200
assert " Submit a talk or workshop" in content
assert active_cfp.description in content
@pytest.mark.django_db
def test_GET_create_application_when_cfp_is_inactive(user, applicant, client, past_cfp):
url = reverse('application_create')
login_url = reverse('account_login')
# When not logged in redirects to login form
response = client.get(url)
content = response.content.decode(response.charset)
assert response.status_code == 302
assert response.url == f"{login_url}?next={url}"
# Forbidden when logged in
client.login(username=user.email, password='webcamp')
response = client.get(url)
content = response.content.decode(response.charset)
assert response.status_code == 403
assert "Call for proposals is not active" in content
@pytest.mark.django_db
def test_POST_create_application(user, applicant, client, active_cfp):
url = reverse('application_create')
client.login(username=user.email, password='webcamp')
assert PaperApplication.objects.count() == 0
data = {
"title": "Hello dolly",
"about": "Hello dolly",
"abstract": "Hello dolly",
"skill_level": "10",
"type": "talk_short",
"extra_info": "Hello dolly",
"about_applicant": applicant.about + "mod", # Changed to test the change
"biography": applicant.biography + "mod",
"speaker_experience": applicant.speaker_experience + "mod",
"image": "",
}
# Permissions not granted
response = client.post(url, data)
assert response.status_code == 200
data.update({
'grant_email_contact': True,
'grant_process_data': True,
'grant_publish_data': True,
'grant_publish_video': True,
})
# Permissions granted
response = client.post(url, data)
assert response.status_code == 302
assert response.url == reverse('user_profile')
assert PaperApplication.objects.count() == 1
pa = PaperApplication.objects.first()
assert pa.applicant == applicant
assert pa.title == data['title']
assert pa.about == data['about']
assert pa.abstract == data['abstract']
assert pa.skill_level == AudienceSkillLevel.objects.get(pk=data['skill_level'])
assert pa.duration is None
assert pa.type == data['type']
assert pa.extra_info == data['extra_info']
applicant.refresh_from_db()
assert applicant.about == data['about_applicant']
assert applicant.biography == data['biography']
assert applicant.speaker_experience == data['speaker_experience']
@pytest.mark.django_db
def test_update_application_anon(user, applicant, client, active_cfp):
"""
Regression test for a bug where accessing application update page when
not logged in would cause an error.
"""
pa = PaperApplicationFactory()
url = reverse('application_update', args=[pa.pk])
response = client.get(url)
assert response.status_code == 404
@pytest.mark.django_db
def test_accept_application(user, applicant, client, active_cfp):
pa1 = PaperApplicationFactory(type=PaperApplication.TYPE_KEYNOTE)
pa2 = PaperApplicationFactory(type=PaperApplication.TYPE_TALK_LONG)
pa3 = PaperApplicationFactory(type=PaperApplication.TYPE_TALK_SHORT)
pa4 = PaperApplicationFactory(type=PaperApplication.TYPE_WORKSHOP_HALF)
pa5 = PaperApplicationFactory(type=PaperApplication.TYPE_WORKSHOP_FULL)
instance1 = accept_application(pa1)
assert isinstance(instance1, Talk)
assert instance1.applicants.get() == pa1.applicant
assert instance1.duration == TALK_DURATIONS.MIN_60
assert instance1.keynote
instance2 = accept_application(pa2)
assert isinstance(instance2, Talk)
assert instance2.applicants.get() == pa2.applicant
assert instance2.duration == TALK_DURATIONS.MIN_45
assert not instance2.keynote
instance3 = accept_application(pa3)
assert isinstance(instance3, Talk)
assert instance3.applicants.get() == pa3.applicant
assert instance3.duration == TALK_DURATIONS.MIN_25
assert not instance3.keynote
instance4 = accept_application(pa4)
assert isinstance(instance4, Workshop)
assert instance4.applicants.get() == pa4.applicant
assert instance4.duration_hours == 4
assert instance4.published
instance5 = accept_application(pa5)
assert isinstance(instance5, Workshop)
assert instance5.applicants.get() == pa5.applicant
assert instance5.duration_hours == 8
assert instance5.published
try:
accept_application(pa1)
except AssertionError:
pass
try:
accept_application(pa2)
except AssertionError:
pass
try:
accept_application(pa3)
except AssertionError:
pass
try:
accept_application(pa4)
except AssertionError:
pass
try:
accept_application(pa5)
except AssertionError:
pass
|
|
import logging, math
import ptypes
from ptypes import *
### Primitive types
class RGB(pstruct.type):
_fields_ = [
(pint.uint8_t, 'R'),
(pint.uint8_t, 'G'),
(pint.uint8_t, 'B'),
]
def summary(self):
r, g, b = (self[k] for k in 'RGB')
return "R={:d} G={:d} B={:d}".format(r.int(), g.int(), b.int())
class TRANSMATRIX(parray.type):
length, _object_ = 3 * 4, pfloat.single
def matrix(self):
iterable = iter(n.float() for n in self)
identity = iter([0, 0, 0, 1])
# produce a matrix
rows = []
for r in range(4):
row = []
for c in range(3):
row.append(next(iterable))
row.append(next(identity))
rows.append(row)
return rows
def summary(self):
rows = self.matrix()
return ' / '.join(''.join('[{:d}]'.format(math.trunc(r)) if math.trunc(r) == r else '[{:+f}]'.format(r) for r in row) for row in rows)
def details(self):
rows = self.matrix()
return '\n'.join(''.join('[{:+f}]'.format(r) for r in row) for row in rows)
def repr(self):
return self.details()
### Chunk base types
class ChunkType(ptype.definition): cache = {}
class ID(pint.enum, pint.uint16_t):
Type, _values_ = ChunkType, []
@classmethod
def define(cls, t):
res = cls.Type.define(t)
cls._values_.append((t.__name__, t.type))
return res
class Chunk(pstruct.type):
Identifier = ID
def __Data(self):
type, length = self['ID'].li, self['Length'].li
cb = type.blocksize() + length.blocksize()
if cb > length.int():
raise AssertionError(cb, length)
try:
res = self.Identifier.Type.lookup(type.int())
res = dyn.clone(res, blocksize=lambda s, res=length.int() - cb: res)
except KeyError:
res = dyn.block(length.int() - cb, type=type.int())
return res
_fields_ = [
(lambda self: self.Identifier, 'ID'),
(pint.uint32_t, 'Length'),
(__Data, 'Data'),
]
class ChunkContainer(parray.block):
_object_ = Chunk
### Chunk type definitions
## Main chunks base types
class MainChunkType(ptype.definition): cache = {}
class Main(ID): Type, _values_ = MainChunkType, []
class MainChunk(Chunk): Identifier = Main
class MainChunkContainer(parray.block): _object_ = MainChunk
## Edit chunks base types
class EditChunkType(ptype.definition): cache = {}
class Edit(ID): Type, _values_ = EditChunkType, []
class EditChunk(Chunk): Identifier = Edit
class EditChunkContainer(parray.block): _object_ = EditChunk
## Object chunks base types
class ObjectChunkType(ptype.definition): cache = {}
class Object(ID): Type, _values_ = ObjectChunkType, []
class ObjectChunk(Chunk): Identifier = Object
class ObjectChunkContainer(parray.block): _object_ = ObjectChunk
## Camera chunks base types
class CameraChunkType(ptype.definition): cache = {}
class Camera(ID): Type, _values_ = CameraChunkType, []
class CameraChunk(Chunk): Identifier = Camera
class CameraChunkContainer(parray.block): _object_ = CameraChunk
## Light chunks base types
class LightChunkType(ptype.definition): cache = {}
class Light(ID): Type, _values_ = LightChunkType, []
class LightChunk(Chunk): Identifier = Light
class LightChunkContainer(parray.block): _object_ = LightChunk
## KeyF chunks base types
class KeyFChunkType(ptype.definition): cache = {}
class KeyF(ID): Type, _values_ = KeyFChunkType, []
class KeyFChunk(Chunk): Identifier = KeyF
class KeyFChunkContainer(parray.block): _object_ = KeyFChunk
## Color chunks base types
class ColorChunkType(ptype.definition): cache = {}
class Color(ID): Type, _values_ = ColorChunkType, []
class ColorChunk(Chunk): Identifier = Color
class ColorChunkContainer(parray.block): _object_ = ColorChunk
## Viewport chunks base types
class ViewportChunkType(ptype.definition): cache = {}
class Viewport(ID): Type, _values_ = ViewportChunkType, []
class ViewportChunk(Chunk): Identifier = Viewport
class ViewportChunkContainer(parray.block): _object_ = ViewportChunk
## Material chunks base types
class MaterialChunkType(ptype.definition): cache = {}
class Material(ID): Type, _values_ = MaterialChunkType, []
class MaterialChunk(Chunk): Identifier = Material
class MaterialChunkContainer(parray.block): _object_ = MaterialChunk
## MaterialSub chunks base types
class MaterialSubChunkType(ptype.definition): cache = {}
class MaterialSub(ID): Type, _values_ = MaterialSubChunkType, []
class MaterialSubChunk(Chunk): Identifier = MaterialSub
class MaterialSubChunkContainer(parray.block): _object_ = MaterialSubChunk
## Tri chunks base types
class TriChunkType(ptype.definition): cache = {}
class Tri(ID): Type, _values_ = TriChunkType, []
class TriChunk(Chunk): Identifier = Tri
class TriChunkContainer(parray.block): _object_ = TriChunk
### Chunk definitions
## main chunk
@ID.define
class MAIN3DS(MainChunkContainer):
type = 0x4d4d
@Main.define
class EDIT3DS(EditChunkContainer):
type = 0x3d3d
@Main.define
class KEYF3DS(KeyFChunkContainer):
type = 0xb000
## Edit chunks
@Edit.define
class EDIT_MATERIAL(MaterialChunkContainer):
type = 0xafff
@Edit.define
class EDIT_CONFIG1(ptype.block):
type = 0x0100
@Edit.define
class EDIT_CONFIG2(ptype.block):
type = 0x3e3d
@Edit.define
class EDIT_VIEW_P1(ViewportChunkContainer):
type = 0x7012
@Edit.define
class EDIT_VIEW_P2(ViewportChunkContainer):
type = 0x7011
@Edit.define
class EDIT_VIEW_P3(ptype.block):
type = 0x7020
@Edit.define
class EDIT_VIEW1(ptype.block):
type = 0x7001
@Edit.define
class EDIT_BACKGR(ptype.block):
type = 0x1200
@Edit.define
class EDIT_AMBIENT(ColorChunkContainer):
type = 0x2100
@Edit.define
class EDIT_OBJECT(pstruct.type): # FIXME: ObjectChunkContainer?
type = 0x4000
_fields_ = [
(pstr.szstring, 'name'),
(ObjectChunk, 'chunk'),
]
@Edit.define
class EDIT_UNKNWN01(ptype.block):
type = 0x1100
@Edit.define
class EDIT_UNKNWN02(ptype.block):
type = 0x1201
@Edit.define
class EDIT_UNKNWN03(ptype.block):
type = 0x1300
@Edit.define
class EDIT_UNKNWN04(ptype.block):
type = 0x1400
@Edit.define
class EDIT_UNKNWN05(ptype.block):
type = 0x1420
@Edit.define
class EDIT_UNKNWN06(ptype.block):
type = 0x1450
@Edit.define
class EDIT_UNKNWN07(ptype.block):
type = 0x1500
@Edit.define
class EDIT_UNKNWN08(ptype.block):
type = 0x2200
@Edit.define
class EDIT_UNKNWN09(ptype.block):
type = 0x2201
@Edit.define
class EDIT_UNKNWN10(ptype.block):
type = 0x2210
@Edit.define
class EDIT_UNKNWN11(ptype.block):
type = 0x2300
@Edit.define
class EDIT_UNKNWN12(ptype.block):
type = 0x2302
@Edit.define
class EDIT_UNKNWN13(ptype.block):
type = 0x3000
## Material chunks
@Material.define
class MAT_NAME(pstr.szstring): type = 0xa000
@Material.define
class MAT_AMBIENT(MaterialSubChunk): type = 0xa010
@Material.define
class MAT_DIFFUSE(MaterialSubChunk): type = 0xa020
@Material.define
class MAT_SPECULAR(MaterialSubChunk): type = 0xa030
@Material.define
class MAT_SHININESS(MaterialSubChunk): type = 0xa040
@Material.define
class MAT_SHININESS_STRENGTH(MaterialSubChunk): type = 0xa041
@Material.define
class MAT_TRANSPARENCY(MaterialSubChunk): type = 0xa050
@Material.define
class MAT_TRANSPARENCY_FALLOFF(MaterialSubChunk): type = 0xa052
@Material.define
class MAT_REFLECT_BLUR(MaterialSubChunk): type = 0xa053
@Material.define
class MAT_TYPE(pint.enum, pint.uint16_t):
type = 0xa100
_values_ = [
('flat', 1),
('gouraud', 2),
('phong', 3),
('metal', 4),
]
@Material.define
class MAT_SELF_ILLUM(MaterialSubChunk): type = 0xa084
@Material.define
class MAT_UNKNOWN(ptype.undefined): type = 0xa087
@Material.define
class MAT_SOME_TRANSPARENCY_FALLOFF_AMOUNT(ptype.undefined): type = 0xa240
@Material.define
class MAT_SOME_REFLECT_BLUR(ptype.undefined): type = 0xa250
@Material.define
class MAT_TWO_SIDED(ptype.undefined): type = 0xa081
@Material.define
class MAT_TRANSPARENCY_ADD(ptype.undefined): type = 0xa083
@Material.define
class MAT_WIRE_ON(ptype.undefined): type = 0xa085
@Material.define
class MAT_FACE_MAP(ptype.undefined): type = 0xa088
@Material.define
class MAT_TRANSPARENCY_FALLOFF_IN(ptype.undefined): type = 0xa08a
@Material.define
class MAT_SOFTEN(ptype.undefined): type = 0xa08c
@Material.define
class MAT_3D_WIRE_THICKNESS_IN_PIX(ptype.block): type = 0xa08e
@Material.define
class MAT_WIRE_THICKNESS(pfloat.single): type = 0xa087
@Material.define
class texture1_map(MaterialSubChunkContainer): type = 0xa200
@Material.define
class texture1_mask(MaterialSubChunkContainer): type = 0xa33e
@Material.define
class texture2_map(MaterialSubChunkContainer): type = 0xa33a
@Material.define
class texture2_mask(MaterialSubChunkContainer): type = 0xa340
@Material.define
class opacity_map(MaterialSubChunkContainer): type = 0xa210
@Material.define
class opacity_mask(MaterialSubChunkContainer): type = 0xa342
@Material.define
class bump_map(MaterialSubChunkContainer): type = 0xa230
@Material.define
class bump_mask(MaterialSubChunkContainer): type = 0xa344
@Material.define
class specular_map(MaterialSubChunkContainer): type = 0xa204
@Material.define
class specular_mask(MaterialSubChunkContainer): type = 0xa348
@Material.define
class shininess_map(MaterialSubChunkContainer): type = 0xa33c
@Material.define
class shininess_mask(MaterialSubChunkContainer): type = 0xa346
@Material.define
class self_illum_map(MaterialSubChunkContainer): type = 0xa33d
@Material.define
class self_illum_mask(MaterialSubChunkContainer): type = 0xa34a
@Material.define
class reflection_map(MaterialSubChunkContainer): type = 0xa220
@Material.define
class reflection_mask(MaterialSubChunkContainer): type = 0xa34c
## MaterialSub chunks
@MaterialSub.define
class RGB1(RGB): type = 0x0011
@MaterialSub.define
class RGB2(RGB): type = 0x0012
@MaterialSub.define
class intsh(pint.uint16_t): type = 0x0030
@MaterialSub.define
class asciiz(pstr.szstring): type = 0xa300
@MaterialSub.define
class map_options(pint.uint16_t): type = 0xa351 # FIXME: this is a pbinary.flags
@MaterialSub.define
class map_filtering_blur(pfloat.single): type = 0xa353
@MaterialSub.define
class u_scale(pfloat.single): type = 0xa354
@MaterialSub.define
class v_scale(pfloat.single): type = 0xa356
@MaterialSub.define
class u_offset(pfloat.single): type = 0xa358
@MaterialSub.define
class v_offset(pfloat.single): type = 0xa35a
@MaterialSub.define
class map_rotation_angle(pfloat.single): type = 0xa35c
@MaterialSub.define
class tint_first_color(RGB): type = 0xa360
@MaterialSub.define
class tint_secnd_color(RGB): type = 0xa362
@MaterialSub.define
class tint_Rchan_color(RGB): type = 0xa364
@MaterialSub.define
class tint_Gchan_color(RGB): type = 0xa366
@MaterialSub.define
class tint_Bchan_color(RGB): type = 0xa368
@MaterialSub.define
class tint_Bchan_color(RGB): type = 0xa368
## KeyF chunks
@KeyF.define
class KEYF_UNKNWN01(ptype.block): type = 0xb009
@KeyF.define
class KEYF_UNKNWN02(ptype.block): type = 0xb00a
@KeyF.define
class KEYF_FRAMES(pstruct.type):
type = 0xb008
_fields_ = [
(pint.uint32_t, 'start'),
(pint.uint32_t, 'end'),
]
@KeyF.define
class KEYF_OBJDES(KeyFChunkContainer): type = 0xb002
@KeyF.define
class KEYF_OBJINDEX(pint.uint16_t): type = 0xb030
@KeyF.define
class KEYF_OBJHIERARCH(pstruct.type):
type = 0xb010
_fields_ = [
(pstr.szstring, 'name'),
(pint.uint16_t, 'flags1'),
(pint.uint16_t, 'flags2'),
(pint.uint16_t, 'hierarchy'),
]
@KeyF.define
class KEYF_OBJDUMMYNAME(pstr.szstring): type = 0xb011
@KeyF.define
class KEYF_OBJPIVOT(ptype.block): type = 0xb013
@KeyF.define
class KEYF_OBJBOUNDBOX(parray.type):
type = 0xb014
length, _object_ = 6, pfloat.single
def summary(self):
iterable = (n.float() for n in self)
return "[{:s}]".format(', '.join(map("{:f}".format, iterable)))
@KeyF.define
class KEYF_OBJUNKNWN03(ptype.block): type = 0xb015
@KeyF.define
class KEYF_OBJPOSITION(pstruct.type):
type = 0xb020
class key(pstruct.type):
_fields_ = [
(pint.uint16_t, 'framenum'),
(pint.uint32_t, 'unknown'),
(pfloat.single, 'x'),
(pfloat.single, 'y'),
(pfloat.single, 'z'),
]
def summary(self):
x, y, z = (self[k] for k in 'xyz')
return "framenum({:d}) unknown({:#x}) ({:f}, {:f}, {:f})".format(self['framenum'].int(), self['unknown'].int(), x.float(), y.float(), z.float())
_fields_ = [
(pint.uint16_t, 'flags'),
(dyn.array(pint.uint16_t, 4), 'unknown_0'),
(pint.uint16_t, 'keys'),
(pint.uint16_t, 'unknown_1'),
(lambda self: dyn.array(self.key, self['keys'].li.int()), 'pos'),
]
@KeyF.define
class KEYF_OBJROTATION(pstruct.type):
type = 0xb021
class key(pstruct.type):
_fields_ = [
(pint.uint16_t, 'framenum'),
(pint.uint32_t, 'unknown'),
(pfloat.single, 'angle'),
(pfloat.single, 'x'),
(pfloat.single, 'y'),
(pfloat.single, 'z'),
]
def summary(self):
x, y, z = (self[k] for k in 'xyz')
return "framenum({:d}) unknown({:#x}) ({:f}, {:f}, {:f})".format(self['framenum'].int(), self['unknown'].int(), x.float(), y.float(), z.float())
_fields_ = [
(pint.uint16_t, 'flags'),
(dyn.array(pint.uint16_t, 4), 'unknown_0'),
(pint.uint16_t, 'keys'),
(pint.uint16_t, 'unknown_1'),
(lambda self: dyn.array(self.key, self['keys'].li.int()), 'rotate'),
]
@KeyF.define
class KEYF_OBJSCALING(pstruct.type):
type = 0xb022
class key(pstruct.type):
_fields_ = [
(pint.uint16_t, 'framenum'),
(pint.uint32_t, 'unknown'),
(pfloat.single, 'x'),
(pfloat.single, 'y'),
(pfloat.single, 'z'),
]
def summary(self):
x, y, z = (self[k] for k in 'xyz')
return "framenum({:d}) unknown({:#x}) ({:f}, {:f}, {:f})".format(self['framenum'].int(), self['unknown'].int(), x.float(), y.float(), z.float())
_fields_ = [
(pint.uint16_t, 'flags'),
(dyn.array(pint.uint16_t, 4), 'unknown_0'),
(pint.uint16_t, 'keys'),
(pint.uint16_t, 'unknown_1'),
(lambda self: dyn.array(self.key, self['keys'].li.int()), 'scale'),
]
## object chunks
@Object.define
class OBJ_TRIMESH(TriChunkContainer): type = 0x4100
@Object.define
class OBJ_LIGHT(LightChunkContainer): type = 0x4600
@Object.define
class OBJ_CAMERA(CameraChunkContainer): type = 0x4700
@Object.define
class OBJ_UNKNWN01(ptype.block): type = 0x4710
@Object.define
class OBJ_UNKNWN02(ptype.block): type = 0x4720
## tri chunks
@Tri.define
class TRI_VERTEXL(pstruct.type):
type = 0x4110
class vertex(pstruct.type):
_fields_ = [(pfloat.single, 'x'), (pfloat.single, 'y'), (pfloat.single, 'z')]
def summary(self):
x, y, z = (self[k] for k in 'xyz')
return "({:f}, {:f}, {:f})".format(x.float(), y.float(), z.float())
_fields_ = [
(pint.uint16_t, 'count'),
(lambda self: dyn.array(self.vertex, self['count'].li.int()), 'vertex'),
]
@Tri.define
class TRI_VERTEXOPTIONS(ptype.block): type = 0x4111
@Tri.define
class TRI_MAPPINGCOORS(pstruct.type):
type = 0x4140
class vertex(pstruct.type):
_fields_ = [(pfloat.single, 'x'), (pfloat.single, 'y')]
def summary(self):
x, y = (self[k] for k in 'xy')
return "({:f}, {:f})".format(x.float(), y.float())
_fields_ = [
(pint.uint16_t, 'count'),
(lambda self: dyn.array(self.vertex, self['count'].li.int()), 'vertex'),
]
@Tri.define
class TRI_MAPPINGSTANDARD(ptype.block): type = 0x4170
@Tri.define
class TRI_FACEL1(pstruct.type):
type = 0x4120
class face(pstruct.type):
class faceinfo(pint.uint16_t): pass # XXX: this is a pbinary.flags
_fields_ = [
(pint.uint16_t, 'vertexA'),
(pint.uint16_t, 'vertexB'),
(pint.uint16_t, 'vertexC'),
(faceinfo, 'faceinfo'),
]
def summary(self):
A, B, C = (self['vertex'+k] for k in 'ABC')
return "vertices=({:d},{:d},{:d}) faceinfo={:#x}".format(A.int(), B.int(), C.int(), self['faceinfo'].int())
_fields_ = [
(pint.uint16_t, 'count'),
(lambda self: dyn.array(self.face, self['count'].li.int()), 'face'),
(lambda self: dyn.clone(TriChunkContainer, blocksize=lambda s, cb=self.blocksize()-(self['count'].li.size()+self['face'].li.size()): cb), 'facedata'),
]
@Tri.define
class TRI_SMOOTH(parray.block):
type = 0x4150
_object_ = pint.uint32_t
@Tri.define
class TRI_MATERIAL(pstruct.type):
type = 0x4130
_fields_ = [
(pstr.szstring, 'material'),
(pint.uint16_t, 'count'),
(lambda self: dyn.array(pint.uint16_t, self['count'].li.int()), 'face'),
]
@Tri.define
class TRI_LOCAL(TRANSMATRIX): type = 0x4160
@Tri.define
class TRI_VISIBLE(ptype.block): type = 0x4165
## lit chunks
@Light.define
class LIT_OFF(ptype.block): type = 0x4620
@Light.define
class LIT_SPOT(ptype.block): type = 0x4610
@Light.define
class LIT_UNKNWN01(ptype.block): type = 0x465a
## lit chunks
@Camera.define
class CAM_UNKNWN01(ptype.block): type = 0x4710
@Camera.define
class CAM_UNKNWN02(ptype.block): type = 0x4720
## color chunks
@Color.define
class COL_RGB(RGB): type = 0x0010
@Color.define
class COL_TRU(RGB): type = 0x0011
@Color.define
class COL_UNK(ptype.block): type = 0x0013
# viewport chunks
@Viewport.define
class TOP(ptype.block): type = 0x0001
@Viewport.define
class BOTTOM(ptype.block): type = 0x0002
@Viewport.define
class LEFT(ptype.block): type = 0x0003
@Viewport.define
class RIGHT(ptype.block): type = 0x0004
@Viewport.define
class FRONT(ptype.block): type = 0x0005
@Viewport.define
class BACK(ptype.block): type = 0x0006
@Viewport.define
class USER(ptype.block): type = 0x0007
@Viewport.define
class CAMERA(ptype.block): type = 0x0008
@Viewport.define
class LIGHT(ptype.block): type = 0x0009
@Viewport.define
class DISABLED(ptype.block): type = 0x0010
@Viewport.define
class BOGUS(ptype.block): type = 0x0011
## File chunk
class File(Chunk): pass
if __name__ == '__main__':
import ptypes, vector.max as max
ptypes.setsource(ptypes.prov.file('./samples/3ds/boletus.3ds', mode='rb'))
z = max.File()
z=z.l
print(z['data'])
print(z['data'][1]['data'][0]['data'])
print(z['data'][1]['data'][1])
print(z['data'][1]['data'][2]['data'][0]['data'])
print(z['data'][1]['data'][3]['data']['chunk']['data'][0]['data']) # TRI_VERTEXL
print(z['data'][1]['data'][3]['data']['chunk']['data'][1]['data']) # TRI_LOCAL
print(z['data'][1]['data'][3]['data']['chunk']['data'][2]['data']) # TRI_MAPPINGCOORS
print(z['data'][1]['data'][3]['data']['chunk']['data'][3])
print(z['data'][1]['data'][3]['data']['chunk']['data'][3]['data'])
print(z['data'][1]['data'][3]['data']['chunk']['data'][3]['data']['face'])
print(z['data'][1]['data'][3]['data']['chunk']['data'][3]['data']['facedata'][0]['data']['face'])
print(z['data'][1]['data'][3]['data']['chunk']['data'][3]['data']['facedata'][1]['data'])
print(max.TriChunk(offset=0x228a).l)
print(max.TriChunk(offset=0x25d6).l)
print(z['data'][2]['data'][0])
print(z['data'][2]['data'][1])
print(z['data'][2]['data'][2])
print(z['data'][2]['data'][3])
print(z['data'][2]['data'][3]['data'][0]) # KEYF_OBJINDEX
print(z['data'][2]['data'][3]['data'][1]) # KEYF_OBJHIERARCH
print(z['data'][2]['data'][3]['data'][1]['data'])
print(z['data'][2]['data'][3]['data'][2]) # KEYF_OBJBOUNDBOX
print(z['data'][2]['data'][3]['data'][2]['data'])
print(z['data'][2]['data'][3]['data'][3]) # KEYF_OBJPIVOT
print(z['data'][2]['data'][3]['data'][3]['data']['pos'][0])
print(z['data'][2]['data'][3]['data'][4]) # KEYF_OBJSCALING
print(z['data'][2]['data'][3]['data'][4]['data']['scale'][0])
print(z['data'][2]['data'][3]['data'][5]) # KEYF_OBJROTATION
print(z['data'][2]['data'][3]['data'][5]['data']['rotate'][0])
if __name__ == '__main__':
import ptypes, vector.max as max
ptypes.setsource(ptypes.prov.file('./results/3ds/crashes/id_000071_00', mode='rb'))
ptypes.setsource(ptypes.prov.file('./samples/3ds/boletus.3ds', mode='rb'))
z = max.File()
z=z.l
print(z.at(0x12f).getparent(max.Chunk))
print(z.at(0x11d).getparent(max.Chunk))
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from mock import patch
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.ovh import OvhNodeDriver
from libcloud.test.common.test_ovh import BaseOvhMockHttp
from libcloud.test.secrets import OVH_PARAMS
from libcloud.test.file_fixtures import ComputeFileFixtures
class OvhMockHttp(BaseOvhMockHttp):
"""Fixtures needed for tests related to rating model"""
fixtures = ComputeFileFixtures('ovh')
def _json_1_0_auth_time_get(self, method, url, body, headers):
body = self.fixtures.load('auth_time_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_region_get(self, method, url, body, headers):
body = self.fixtures.load('region_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_flavor_get(self, method, url, body, headers):
body = self.fixtures.load('flavor_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_flavor_region_SBG1_get(self, method, url, body, headers):
body = self.fixtures.load('flavor_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_flavor_foo_id_get(self, method, url, body, headers):
body = self.fixtures.load('flavor_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_image_get(self, method, url, body, headers):
body = self.fixtures.load('image_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_image_foo_id_get(self, method, url, body, headers):
body = self.fixtures.load('image_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_sshkey_region_SBG1_get(self, method, url, body, headers):
body = self.fixtures.load('ssh_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_sshkey_post(self, method, url, body, headers):
body = self.fixtures.load('ssh_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_ssh_mykey_get(self, method, url, body, headers):
body = self.fixtures.load('ssh_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_instance_get(self, method, url, body, headers):
body = self.fixtures.load('instance_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_instance_foo_get(self, method, url, body, headers):
body = self.fixtures.load('instance_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_instance_foo_delete(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_instance_post(self, method, url, body, headers):
body = self.fixtures.load('instance_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_get(self, method, url, body, headers):
body = self.fixtures.load('volume_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_post(self, method, url, body, headers):
body = self.fixtures.load('volume_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_get(self, method, url, body, headers):
body = self.fixtures.load('volume_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_delete(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_attach_post(self, method, url, body, headers):
body = self.fixtures.load('volume_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_detach_post(self, method, url, body, headers):
body = self.fixtures.load('volume_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_snapshot_region_SBG_1_get(self, method, url, body, headers):
body = self.fixtures.load('volume_snapshot_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_snapshot_get(self, method, url, body, headers):
body = self.fixtures.load('volume_snapshot_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_snapshot_foo_get(self, method, url, body, headers):
body = self.fixtures.load('volume_snapshot_get_details.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_snapshot_foo_snap_delete(self, method, url, body, headers):
return (httplib.OK, None, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_snapshot__post(self, method, url, body, headers):
body = self.fixtures.load('volume_snapshot_get_details.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
@patch('libcloud.common.ovh.OvhConnection._timedelta', 42)
class OvhTests(unittest.TestCase):
def setUp(self):
OvhNodeDriver.connectionCls.conn_classes = (
OvhMockHttp, OvhMockHttp)
OvhMockHttp.type = None
self.driver = OvhNodeDriver(*OVH_PARAMS)
def test_list_locations(self):
images = self.driver.list_locations()
self.assertTrue(len(images) > 0)
def test_list_images(self):
images = self.driver.list_images()
self.assertTrue(len(images) > 0)
def test_get_image(self):
image = self.driver.get_image('foo-id')
self.assertEqual(image.id, 'foo-id')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertTrue(len(sizes) > 0)
def test_get_size(self):
size = self.driver.ex_get_size('foo-id')
self.assertEqual(size.id, 'foo-id')
def test_list_key_pairs(self):
keys = self.driver.list_sizes()
self.assertTrue(len(keys) > 0)
def test_get_key_pair(self):
location = self.driver.list_locations()[0]
key = self.driver.get_key_pair('mykey', location)
self.assertEqual(key.name, 'mykey')
def test_import_key_pair_from_string(self):
location = self.driver.list_locations()[0]
key = self.driver.import_key_pair_from_string('mykey', 'material',
location)
self.assertEqual(key.name, 'mykey')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertTrue(len(nodes) > 0)
def test_get_node(self):
node = self.driver.ex_get_node('foo')
self.assertEqual(node.name, 'test_vm')
def test_create_node(self):
location = self.driver.list_locations()[0]
image = self.driver.list_sizes(location)[0]
size = self.driver.list_sizes(location)[0]
node = self.driver.create_node(name='test_vm', image=image, size=size,
location=location)
self.assertEqual(node.name, 'test_vm')
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
self.driver.destroy_node(node)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertTrue(len(volumes) > 0)
def test_get_volume(self):
volume = self.driver.ex_get_volume('foo')
self.assertEqual(volume.name, 'testvol')
def test_create_volume(self):
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(size=10, name='testvol',
location=location)
self.assertEqual(volume.name, 'testvol')
def test_destroy_volume(self):
volume = self.driver.list_volumes()[0]
self.driver.destroy_volume(volume)
def test_attach_volume(self):
node = self.driver.list_nodes()[0]
volume = self.driver.ex_get_volume('foo')
response = self.driver.attach_volume(node=node, volume=volume)
self.assertTrue(response)
def test_detach_volume(self):
node = self.driver.list_nodes()[0]
volume = self.driver.ex_get_volume('foo')
response = self.driver.detach_volume(ex_node=node, volume=volume)
self.assertTrue(response)
def test_ex_list_snapshots(self):
self.driver.ex_list_snapshots()
def test_ex_get_volume_snapshot(self):
self.driver.ex_get_volume_snapshot('foo')
def test_list_volume_snapshots(self):
volume = self.driver.ex_get_volume('foo')
self.driver.list_volume_snapshots(volume)
def test_create_volume_snapshot(self):
volume = self.driver.ex_get_volume('foo')
self.driver.create_volume_snapshot(volume)
def test_destroy_volume_snapshot(self):
snapshot = self.driver.ex_get_volume_snapshot('foo')
result = self.driver.destroy_volume_snapshot(snapshot)
self.assertTrue(result)
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_char])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self.destructor.func = self.destructor.get_func(
*self.destructor.args, **self.destructor.kwargs
)
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
ptr_type = WKT_READ_PTR
destructor = wkt_reader_destroy
def read(self, wkt):
if not isinstance(wkt, (bytes, str)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
ptr_type = WKB_READ_PTR
destructor = wkb_reader_destroy
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, str)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
ptr_type = WKT_WRITE_PTR
destructor = wkt_writer_destroy
_trim = False
_precision = None
def __init__(self, dim=2, trim=False, precision=None):
super().__init__()
if bool(trim) != self._trim:
self.trim = trim
if precision is not None:
self.precision = precision
self.outdim = dim
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
if bool(flag) != self._trim:
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, b'\x01' if flag else b'\x00')
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if (not isinstance(precision, int) or precision < 0) and precision is not None:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
if precision != self._precision:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
ptr_type = WKB_WRITE_PTR
destructor = wkb_writer_destroy
def __init__(self, dim=2):
super().__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float('NaN'), float('NaN'), srid=geom.srid)
else:
raise ValueError('Empty point is not representable in WKB.')
return geom
def write(self, geom):
"Returns the WKB representation of the given geometry."
from django.contrib.gis.geos import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
# Fix GEOS output for empty polygon.
# See https://trac.osgeo.org/geos/ticket/680.
wkb = wkb[:-8] + b'\0' * 4
return memoryview(wkb)
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
from django.contrib.gis.geos.polygon import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
wkb = wkb[:-16] + b'0' * 8
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
@srid.setter
def srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2, trim=False, precision=None):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision)
else:
thread_context.wkt_w.outdim = dim
thread_context.wkt_w.trim = trim
thread_context.wkt_w.precision = precision
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter(dim=dim)
else:
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter(dim=dim)
thread_context.ewkb_w.srid = True
else:
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
|
|
"""
pypi.py
=======
Desc: Library for getting information about Python packages by querying
The CheeseShop (PYPI a.k.a. Python Package Index).
Author: Rob Cakebread <cakebread at gmail>
License : BSD (See COPYING)
"""
__docformat__ = 'restructuredtext'
import re
import platform
if platform.python_version().startswith('2'):
import xmlrpclib
import cPickle
import urllib2
else:
import xmlrpc.client as xmlrpclib
import pickle
import urllib.request as urllib2
import os
import time
import logging
import urllib
from yolk.utils import get_yolk_dir
XML_RPC_SERVER = 'http://pypi.python.org/pypi'
class addinfourl(urllib2.addinfourl):
"""
Replacement addinfourl class compatible with python-2.7's xmlrpclib
In python-2.7, xmlrpclib expects that the response object that it receives
has a getheader method. httplib.HTTPResponse provides this but
urllib2.addinfourl does not. Add the necessary functions here, ported to
use the internal data structures of addinfourl.
"""
def getheader(self, name, default=None):
if self.headers is None:
raise httplib.ResponseNotReady()
return self.headers.getheader(name, default)
def getheaders(self):
if self.headers is None:
raise httplib.ResponseNotReady()
return self.headers.items()
urllib2.addinfourl = addinfourl
class ProxyTransport(xmlrpclib.Transport):
"""
Provides an XMl-RPC transport routing via a http proxy.
This is done by using urllib2, which in turn uses the environment
varable http_proxy and whatever else it is built to use (e.g. the
windows registry).
NOTE: the environment variable http_proxy should be set correctly.
See check_proxy_setting() below.
Written from scratch but inspired by xmlrpc_urllib_transport.py
file from http://starship.python.net/crew/jjkunce/ by jjk.
A. Ellerton 2006-07-06
"""
def request(self, host, handler, request_body, verbose):
'''Send xml-rpc request using proxy'''
#We get a traceback if we don't have this attribute:
self.verbose = verbose
url = 'http://' + host + handler
request = urllib2.Request(url)
request.add_data(request_body)
# Note: 'Host' and 'Content-Length' are added automatically
request.add_header('User-Agent', self.user_agent)
request.add_header('Content-Type', 'text/xml')
proxy_handler = urllib2.ProxyHandler()
opener = urllib2.build_opener(proxy_handler)
fhandle = opener.open(request)
return(self.parse_response(fhandle))
def check_proxy_setting():
"""
If the environmental variable 'HTTP_PROXY' is set, it will most likely be
in one of these forms:
proxyhost:8080
http://proxyhost:8080
urlllib2 requires the proxy URL to start with 'http://'
This routine does that, and returns the transport for xmlrpc.
"""
try:
http_proxy = os.environ['HTTP_PROXY']
except KeyError:
return
if not http_proxy.startswith('http://'):
match = re.match('(http://)?([-_\.A-Za-z]+):(\d+)', http_proxy)
#if not match:
# raise Exception('Proxy format not recognised: [%s]' % http_proxy)
os.environ['HTTP_PROXY'] = 'http://%s:%s' % (match.group(2),
match.group(3))
return
class CheeseShop(object):
"""Interface to Python Package Index"""
def __init__(self, debug=False, no_cache=False, yolk_dir=None):
self.debug = debug
self.no_cache = no_cache
if yolk_dir:
self.yolk_dir = yolk_dir
else:
self.yolk_dir = get_yolk_dir()
self.xmlrpc = self.get_xmlrpc_server()
self.pkg_cache_file = self.get_pkg_cache_file()
self.last_sync_file = self.get_last_sync_file()
self.pkg_list = None
self.logger = logging.getLogger("yolk")
self.get_cache()
def get_cache(self):
"""
Get a package name list from disk cache or PyPI
"""
#This is used by external programs that import `CheeseShop` and don't
#want a cache file written to ~/.pypi and query PyPI every time.
if self.no_cache:
self.pkg_list = self.list_packages()
return
if not os.path.exists(self.yolk_dir):
os.mkdir(self.yolk_dir)
if os.path.exists(self.pkg_cache_file):
self.pkg_list = self.query_cached_package_list()
else:
self.logger.debug("DEBUG: Fetching package list cache from PyPi...")
self.fetch_pkg_list()
def get_last_sync_file(self):
"""
Get the last time in seconds since The Epoc since the last pkg list sync
"""
return os.path.abspath(self.yolk_dir + "/last_sync")
def get_xmlrpc_server(self):
"""
Returns PyPI's XML-RPC server instance
"""
check_proxy_setting()
if os.environ.has_key('XMLRPC_DEBUG'):
debug = 1
else:
debug = 0
try:
return xmlrpclib.Server(XML_RPC_SERVER, transport=ProxyTransport(), verbose=debug)
except IOError:
self.logger("ERROR: Can't connect to XML-RPC server: %s" \
% XML_RPC_SERVER)
def get_pkg_cache_file(self):
"""
Returns filename of pkg cache
"""
return os.path.abspath('%s/pkg_list.pkl' % self.yolk_dir)
def query_versions_pypi(self, package_name):
"""Fetch list of available versions for a package from The CheeseShop"""
if not package_name in self.pkg_list:
self.logger.debug("Package %s not in cache, querying PyPI..." \
% package_name)
self.fetch_pkg_list()
#I have to set version=[] for edge cases like "Magic file extensions"
#but I'm not sure why this happens. It's included with Python or
#because it has a space in it's name?
versions = []
for pypi_pkg in self.pkg_list:
if pypi_pkg.lower() == package_name.lower():
if self.debug:
self.logger.debug("DEBUG: %s" % package_name)
versions = self.package_releases(pypi_pkg)
package_name = pypi_pkg
break
return (package_name, versions)
def query_cached_package_list(self):
"""Return list of pickled package names from PYPI"""
if self.debug:
self.logger.debug("DEBUG: reading pickled cache file")
return cPickle.load(open(self.pkg_cache_file, "r"))
def fetch_pkg_list(self):
"""Fetch and cache master list of package names from PYPI"""
self.logger.debug("DEBUG: Fetching package name list from PyPI")
package_list = self.list_packages()
cPickle.dump(package_list, open(self.pkg_cache_file, "w"))
self.pkg_list = package_list
def search(self, spec, operator):
'''Query PYPI via XMLRPC interface using search spec'''
return self.xmlrpc.search(spec, operator.lower())
def changelog(self, hours):
'''Query PYPI via XMLRPC interface using search spec'''
return self.xmlrpc.changelog(get_seconds(hours))
def updated_releases(self, hours):
'''Query PYPI via XMLRPC interface using search spec'''
return self.xmlrpc.updated_releases(get_seconds(hours))
def list_packages(self):
"""Query PYPI via XMLRPC interface for a a list of all package names"""
return self.xmlrpc.list_packages()
def release_urls(self, package_name, version):
"""Query PYPI via XMLRPC interface for a pkg's available versions"""
return self.xmlrpc.release_urls(package_name, version)
def release_data(self, package_name, version):
"""Query PYPI via XMLRPC interface for a pkg's metadata"""
try:
return self.xmlrpc.release_data(package_name, version)
except xmlrpclib.Fault:
#XXX Raises xmlrpclib.Fault if you give non-existant version
#Could this be server bug?
return
def package_releases(self, package_name):
"""Query PYPI via XMLRPC interface for a pkg's available versions"""
if self.debug:
self.logger.debug("DEBUG: querying PyPI for versions of " \
+ package_name)
return self.xmlrpc.package_releases(package_name)
def get_download_urls(self, package_name, version="", pkg_type="all"):
"""Query PyPI for pkg download URI for a packge"""
if version:
versions = [version]
else:
#If they don't specify version, show em all.
(package_name, versions) = self.query_versions_pypi(package_name)
all_urls = []
for ver in versions:
metadata = self.release_data(package_name, ver)
for urls in self.release_urls(package_name, ver):
if pkg_type == "source" and urls['packagetype'] == "sdist":
all_urls.append(urls['url'])
elif pkg_type == "egg" and \
urls['packagetype'].startswith("bdist"):
all_urls.append(urls['url'])
elif pkg_type == "all":
#All
all_urls.append(urls['url'])
#Try the package's metadata directly in case there's nothing
#returned by XML-RPC's release_urls()
if metadata and metadata.has_key('download_url') and \
metadata['download_url'] != "UNKNOWN" and \
metadata['download_url'] != None:
if metadata['download_url'] not in all_urls:
if pkg_type != "all":
url = filter_url(pkg_type, metadata['download_url'])
if url:
all_urls.append(url)
return all_urls
def filter_url(pkg_type, url):
"""
Returns URL of specified file type
'source', 'egg', or 'all'
"""
bad_stuff = ["?modtime", "#md5="]
for junk in bad_stuff:
if junk in url:
url = url.split(junk)[0]
break
#pkg_spec==dev (svn)
if url.endswith("-dev"):
url = url.split("#egg=")[0]
if pkg_type == "all":
return url
elif pkg_type == "source":
valid_source_types = [".tgz", ".tar.gz", ".zip", ".tbz2", ".tar.bz2"]
for extension in valid_source_types:
if url.lower().endswith(extension):
return url
elif pkg_type == "egg":
if url.lower().endswith(".egg"):
return url
def get_seconds(hours):
"""
Get number of seconds since epoch from now minus `hours`
@param hours: Number of `hours` back in time we are checking
@type hours: int
Return integer for number of seconds for now minus hours
"""
return int(time.time() - (60 * 60) * hours)
|
|
"""Pylons Decorators
Common decorators intended for use in controllers. Additional
decorators for use with controllers are in the
:mod:`~pylons.decorators.cache`, :mod:`~pylons.decorators.rest` and
:mod:`~pylons.decorators.secure` modules.
"""
import logging
import warnings
import formencode
import simplejson
from decorator import decorator
from formencode import api, htmlfill, variabledecode
from pylons.decorators.util import get_pylons
from pylons.i18n import _ as pylons_gettext
__all__ = ['jsonify', 'validate']
log = logging.getLogger(__name__)
class JSONEncoder(simplejson.JSONEncoder):
def default(self, obj):
encoder = getattr(obj, '__json__', None)
if encoder is not None:
return encoder()
return super(JSONEncoder, self).default(obj)
@decorator
def jsonify(func, *args, **kwargs):
"""Action decorator that formats output for JSON
Given a function that will return content, this decorator will turn
the result into JSON, with a content-type of 'application/json' and
output it.
"""
pylons = get_pylons(args)
pylons.response.headers['Content-Type'] = 'application/json; charset=utf-8'
data = func(*args, **kwargs)
if isinstance(data, (list, tuple)):
msg = "JSON responses with Array envelopes are susceptible to " \
"cross-site data leak attacks, see " \
"http://wiki.pylonshq.com/display/pylonsfaq/Warnings"
warnings.warn(msg, Warning, 2)
log.warning(msg)
log.debug("Returning JSON wrapped action output")
return simplejson.dumps(data, cls=JSONEncoder, encoding='utf-8')
def validate(schema=None, validators=None, form=None, variable_decode=False,
dict_char='.', list_char='-', post_only=True, state=None,
on_get=False, **htmlfill_kwargs):
"""Validate input either for a FormEncode schema, or individual
validators
Given a form schema or dict of validators, validate will attempt to
validate the schema or validator list.
If validation was successful, the valid result dict will be saved
as ``self.form_result``. Otherwise, the action will be re-run as if
it was a GET, and the output will be filled by FormEncode's
htmlfill to fill in the form field errors.
``schema``
Refers to a FormEncode Schema object to use during validation.
``form``
Method used to display the form, which will be used to get the
HTML representation of the form for error filling.
``variable_decode``
Boolean to indicate whether FormEncode's variable decode
function should be run on the form input before validation.
``dict_char``
Passed through to FormEncode. Toggles the form field naming
scheme used to determine what is used to represent a dict. This
option is only applicable when used with variable_decode=True.
``list_char``
Passed through to FormEncode. Toggles the form field naming
scheme used to determine what is used to represent a list. This
option is only applicable when used with variable_decode=True.
``post_only``
Boolean that indicates whether or not GET (query) variables
should be included during validation.
.. warning::
``post_only`` applies to *where* the arguments to be
validated come from. It does *not* restrict the form to
only working with post, merely only checking POST vars.
``state``
Passed through to FormEncode for use in validators that utilize
a state object.
``on_get``
Whether to validate on GET requests. By default only POST
requests are validated.
Example::
class SomeController(BaseController):
def create(self, id):
return render('/myform.mako')
@validate(schema=model.forms.myshema(), form='create')
def update(self, id):
# Do something with self.form_result
pass
"""
if state is None:
state = PylonsFormEncodeState
def wrapper(func, self, *args, **kwargs):
"""Decorator Wrapper function"""
request = self._py_object.request
errors = {}
# Skip the validation if on_get is False and its a GET
if not on_get and request.environ['REQUEST_METHOD'] == 'GET':
return func(self, *args, **kwargs)
# If they want post args only, use just the post args
if post_only:
params = request.POST
else:
params = request.params
params = params.mixed()
if variable_decode:
log.debug("Running variable_decode on params")
decoded = variabledecode.variable_decode(params, dict_char,
list_char)
else:
decoded = params
if schema:
log.debug("Validating against a schema")
try:
self.form_result = schema.to_python(decoded, state)
except formencode.Invalid, e:
errors = e.unpack_errors(variable_decode, dict_char, list_char)
if validators:
log.debug("Validating against provided validators")
if isinstance(validators, dict):
if not hasattr(self, 'form_result'):
self.form_result = {}
for field, validator in validators.iteritems():
try:
self.form_result[field] = \
validator.to_python(decoded.get(field), state)
except formencode.Invalid, error:
errors[field] = error
if errors:
log.debug("Errors found in validation, parsing form with htmlfill "
"for errors")
request.environ['REQUEST_METHOD'] = 'GET'
self._py_object.tmpl_context.form_errors = errors
# If there's no form supplied, just continue with the current
# function call.
if not form:
return func(self, *args, **kwargs)
request.environ['pylons.routes_dict']['action'] = form
response = self._dispatch_call()
# If the form_content is an exception response, return it
if hasattr(response, '_exception'):
return response
htmlfill_kwargs2 = htmlfill_kwargs.copy()
htmlfill_kwargs2.setdefault('encoding', request.charset)
return htmlfill.render(response, defaults=params, errors=errors,
**htmlfill_kwargs2)
return func(self, *args, **kwargs)
return decorator(wrapper)
def pylons_formencode_gettext(value):
"""Translates a string ``value`` using pylons gettext first and if
that fails, formencode gettext.
This allows to "merge" localized error messages from built-in
FormEncode's validators with application-specific validators.
"""
trans = pylons_gettext(value)
if trans == value:
# translation failed, try formencode
trans = api._stdtrans(value)
return trans
class PylonsFormEncodeState(object):
"""A ``state`` for FormEncode validate API that includes smart
``_`` hook.
The FormEncode library used by validate() decorator has some
provision for localizing error messages. In particular, it looks
for attribute ``_`` in the application-specific state object that
gets passed to every ``.to_python()`` call. If it is found, the
``_`` is assumed to be a gettext-like function and is called to
localize error messages.
One complication is that FormEncode ships with localized error
messages for standard validators so the user may want to re-use
them instead of gathering and translating everything from scratch.
To allow this, we pass as ``_`` a function which looks up
translation both in application and formencode message catalogs.
"""
_ = staticmethod(pylons_formencode_gettext)
|
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from flask.ext.restful import fields
from flask_restful_swagger import swagger
@swagger.model
class BlueprintState(object):
resource_fields = {
'id': fields.String,
'plan': fields.Raw,
'created_at': fields.String,
'updated_at': fields.String
}
def __init__(self, **kwargs):
self.plan = kwargs['plan']
self.id = kwargs['id']
self.created_at = kwargs['created_at']
self.updated_at = kwargs['updated_at']
@swagger.model
class BlueprintValidationStatus(object):
resource_fields = {
'blueprintId': fields.String(attribute='blueprint_id'),
'status': fields.String
}
def __init__(self, **kwargs):
self.blueprint_id = kwargs['blueprint_id']
self.status = kwargs['status']
@swagger.model
class Workflow(object):
resource_fields = {
'name': fields.String,
'created_at': fields.String,
'parameters': fields.Raw
}
def __init__(self, **kwargs):
self.name = kwargs['name']
self.created_at = kwargs['created_at']
self.parameters = kwargs['parameters']
@swagger.model
@swagger.nested(workflows=Workflow.__name__)
class Deployment(object):
resource_fields = {
'id': fields.String,
'created_at': fields.String,
'updated_at': fields.String,
'blueprint_id': fields.String,
'workflows': fields.List(fields.Nested(Workflow.resource_fields)),
'inputs': fields.Raw,
'policy_types': fields.Raw,
'policy_triggers': fields.Raw,
'groups': fields.Raw,
'outputs': fields.Raw
}
def __init__(self, **kwargs):
self.id = kwargs['id']
self.permalink = kwargs['permalink']
self.created_at = kwargs['created_at']
self.updated_at = kwargs['updated_at']
self.blueprint_id = kwargs['blueprint_id']
self.workflows = self._responsify_workflows_field(kwargs['workflows'])
self.inputs = kwargs['inputs']
self.policy_types = kwargs['policy_types']
self.policy_triggers = kwargs['policy_triggers']
self.groups = kwargs['groups']
self.outputs = kwargs['outputs']
@staticmethod
def _responsify_workflows_field(deployment_workflows):
if deployment_workflows is None:
return None
return [Workflow(name=wf_name,
created_at=None,
parameters=wf.get('parameters', dict()))
for wf_name, wf in deployment_workflows.iteritems()]
@swagger.model
class DeploymentOutputs(object):
resource_fields = {
'deployment_id': fields.String,
'outputs': fields.Raw
}
def __init__(self, **kwargs):
self.deployment_id = kwargs['deployment_id']
self.outputs = kwargs['outputs']
@swagger.model
class DeploymentModification(object):
resource_fields = {
'id': fields.String,
'status': fields.String,
'deployment_id': fields.String,
'node_instances': fields.Raw,
'created_at': fields.String,
'ended_at': fields.String,
'modified_nodes': fields.Raw,
'context': fields.Raw
}
def __init__(self, **kwargs):
self.id = kwargs['id']
self.status = kwargs['status']
self.deployment_id = kwargs['deployment_id']
self.node_instances = kwargs['node_instances']
self.created_at = kwargs['created_at']
self.ended_at = kwargs['ended_at']
self.modified_nodes = kwargs['modified_nodes']
self.context = kwargs['context']
@swagger.model
class Execution(object):
resource_fields = {
'id': fields.String,
'workflow_id': fields.String,
'blueprint_id': fields.String,
'deployment_id': fields.String,
'status': fields.String,
'error': fields.String,
'created_at': fields.String,
'parameters': fields.Raw,
'is_system_workflow': fields.Boolean
}
def __init__(self, **kwargs):
self.id = kwargs['id']
self.status = kwargs['status']
self.deployment_id = kwargs['deployment_id']
self.workflow_id = kwargs['workflow_id']
self.blueprint_id = kwargs['blueprint_id']
self.created_at = kwargs['created_at']
self.error = kwargs['error']
self.parameters = kwargs['parameters']
self.is_system_workflow = kwargs['is_system_workflow']
@swagger.model
class Node(object):
resource_fields = {
'id': fields.String,
'deployment_id': fields.String,
'blueprint_id': fields.String,
'type': fields.String,
'type_hierarchy': fields.Raw,
'number_of_instances': fields.String,
'planned_number_of_instances': fields.String,
'deploy_number_of_instances': fields.String,
'host_id': fields.String,
'properties': fields.Raw,
'operations': fields.Raw,
'plugins': fields.Raw,
'plugins_to_install': fields.Raw,
'relationships': fields.Raw
}
def __init__(self, **kwargs):
self.id = kwargs['id']
self.deployment_id = kwargs['deployment_id']
self.blueprint_id = kwargs['blueprint_id']
self.type = kwargs['type']
self.type_hierarchy = kwargs['type_hierarchy']
self.number_of_instances = kwargs['number_of_instances']
self.planned_number_of_instances = kwargs[
'planned_number_of_instances']
self.deploy_number_of_instances = kwargs['deploy_number_of_instances']
self.host_id = kwargs['host_id']
self.properties = kwargs['properties']
self.operations = kwargs['operations']
self.plugins = kwargs['plugins']
self.plugins_to_install = kwargs['plugins_to_install']
self.relationships = kwargs['relationships']
@swagger.model
class NodeInstance(object):
resource_fields = {
'id': fields.String,
'node_id': fields.String,
'host_id': fields.String,
'relationships': fields.Raw,
'deployment_id': fields.String,
'runtime_properties': fields.Raw,
'version': fields.Raw,
'state': fields.String
}
def __init__(self, **kwargs):
self.id = kwargs['id']
self.deployment_id = kwargs['deployment_id']
self.runtime_properties = kwargs['runtime_properties']
self.version = kwargs['version']
self.state = kwargs['state']
self.node_id = kwargs['node_id']
self.relationships = kwargs['relationships']
self.host_id = kwargs['host_id']
@swagger.model
class Status(object):
resource_fields = {
'status': fields.String,
'services': fields.Raw
}
def __init__(self, **kwargs):
self.status = kwargs['status']
self.services = kwargs['services']
@swagger.model
class ProviderContextPostStatus(object):
resource_fields = {
'status': fields.String
}
def __init__(self, **kwargs):
self.status = kwargs['status']
@swagger.model
class ProviderContext(object):
resource_fields = {
'name': fields.String,
'context': fields.Raw
}
def __init__(self, **kwargs):
self.context = kwargs['context']
self.name = kwargs['name']
@swagger.model
class Version(object):
resource_fields = {
'version': fields.String,
'build': fields.String,
'date': fields.String,
'commit': fields.String,
}
def __init__(self, **kwargs):
self.version = kwargs['version']
self.build = kwargs['build']
self.date = kwargs['date']
self.commit = kwargs['commit']
@swagger.model
class EvaluatedFunctions(object):
resource_fields = {
'deployment_id': fields.String,
'payload': fields.Raw
}
def __init__(self, **kwargs):
self.deployment_id = kwargs['deployment_id']
self.payload = kwargs['payload']
@swagger.model
class Tokens(object):
resource_fields = {
'value': fields.String
}
def __init__(self, **kwargs):
self.value = kwargs['value']
|
|
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for cinder.volume.rpcapi
"""
import copy
import mock
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from cinder import context
from cinder import db
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_backup
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils
CONF = cfg.CONF
class VolumeRpcAPITestCase(test.TestCase):
def setUp(self):
super(VolumeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
vol = {}
vol['host'] = 'fake_host'
vol['availability_zone'] = CONF.storage_availability_zone
vol['status'] = "available"
vol['attach_status'] = "detached"
vol['metadata'] = {"test_key": "test_val"}
vol['size'] = 1
volume = db.volume_create(self.context, vol)
kwargs = {
'status': "creating",
'progress': '0%',
'display_name': 'fake_name',
'display_description': 'fake_description'}
snapshot = tests_utils.create_snapshot(self.context, vol['id'],
**kwargs)
source_group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
host='fakehost@fakedrv#fakepool')
cgsnapshot = tests_utils.create_cgsnapshot(
self.context,
consistencygroup_id=source_group.id)
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
host='fakehost@fakedrv#fakepool',
cgsnapshot_id=cgsnapshot.id)
group2 = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
host='fakehost@fakedrv#fakepool',
source_cgid=source_group.id)
group = objects.ConsistencyGroup.get_by_id(self.context, group.id)
group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id)
self.fake_volume = jsonutils.to_primitive(volume)
self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol)
self.fake_volume_metadata = volume["volume_metadata"]
self.fake_snapshot = snapshot
self.fake_reservations = ["RESERVATION"]
self.fake_cg = group
self.fake_cg2 = group2
self.fake_src_cg = jsonutils.to_primitive(source_group)
self.fake_cgsnap = cgsnapshot
self.fake_backup_obj = fake_backup.fake_backup_obj(self.context)
def test_serialized_volume_has_id(self):
self.assertIn('id', self.fake_volume)
def _test_volume_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
if 'rpcapi_class' in kwargs:
rpcapi_class = kwargs['rpcapi_class']
del kwargs['rpcapi_class']
else:
rpcapi_class = volume_rpcapi.VolumeAPI
rpcapi = rpcapi_class()
expected_retval = 'foo' if method == 'call' else None
target = {
"version": kwargs.pop('version', rpcapi.RPC_API_VERSION)
}
if 'request_spec' in kwargs:
spec = jsonutils.to_primitive(kwargs['request_spec'])
kwargs['request_spec'] = spec
expected_msg = copy.deepcopy(kwargs)
if 'volume' in expected_msg:
volume = expected_msg['volume']
# NOTE(thangp): copy.deepcopy() is making oslo_versionedobjects
# think that 'metadata' was changed.
if isinstance(volume, objects.Volume):
volume.obj_reset_changes()
del expected_msg['volume']
expected_msg['volume_id'] = volume['id']
expected_msg['volume'] = volume
if 'snapshot' in expected_msg:
snapshot = expected_msg['snapshot']
del expected_msg['snapshot']
expected_msg['snapshot_id'] = snapshot.id
expected_msg['snapshot'] = snapshot
if 'cgsnapshot' in expected_msg:
cgsnapshot = expected_msg['cgsnapshot']
if cgsnapshot:
cgsnapshot.consistencygroup
kwargs['cgsnapshot'].consistencygroup
if 'backup' in expected_msg:
backup = expected_msg['backup']
del expected_msg['backup']
expected_msg['backup_id'] = backup.id
expected_msg['backup'] = backup
if 'host' in expected_msg:
del expected_msg['host']
if 'dest_host' in expected_msg:
dest_host = expected_msg['dest_host']
dest_host_dict = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
del expected_msg['dest_host']
expected_msg['host'] = dest_host_dict
if 'new_volume' in expected_msg:
volume = expected_msg['new_volume']
expected_msg['new_volume_id'] = volume['id']
if 'host' in kwargs:
host = kwargs['host']
elif 'group' in kwargs:
host = kwargs['group']['host']
elif 'volume' in kwargs:
host = kwargs['volume']['host']
elif 'snapshot' in kwargs:
host = 'fake_host'
elif 'cgsnapshot' in kwargs:
host = kwargs['cgsnapshot'].consistencygroup.host
target['server'] = utils.extract_host(host)
target['topic'] = '%s.%s' % (CONF.volume_topic, host)
self.fake_args = None
self.fake_kwargs = None
def _fake_prepare_method(*args, **kwds):
for kwd in kwds:
self.assertEqual(kwds[kwd], target[kwd])
return rpcapi.client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method)
self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
expected_args = [ctxt, method]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(expected_arg, arg)
for kwarg, value in self.fake_kwargs.items():
if isinstance(value, objects.Snapshot):
expected_snapshot = expected_msg[kwarg].obj_to_primitive()
snapshot = value.obj_to_primitive()
self.assertEqual(expected_snapshot, snapshot)
elif isinstance(value, objects.ConsistencyGroup):
expected_cg = expected_msg[kwarg].obj_to_primitive()
cg = value.obj_to_primitive()
self.assertEqual(expected_cg, cg)
elif isinstance(value, objects.CGSnapshot):
expected_cgsnapshot = expected_msg[kwarg].obj_to_primitive()
cgsnapshot = value.obj_to_primitive()
self.assertEqual(expected_cgsnapshot, cgsnapshot)
elif isinstance(value, objects.Volume):
expected_volume = expected_msg[kwarg].obj_to_primitive()
volume = value.obj_to_primitive()
self.assertEqual(expected_volume, volume)
elif isinstance(value, objects.Backup):
expected_backup = expected_msg[kwarg].obj_to_primitive()
backup = value.obj_to_primitive()
self.assertEqual(expected_backup, backup)
else:
self.assertEqual(expected_msg[kwarg], value)
def test_create_consistencygroup(self):
self._test_volume_api('create_consistencygroup', rpc_method='cast',
group=self.fake_cg, host='fake_host1',
version='1.26')
def test_delete_consistencygroup(self):
self._test_volume_api('delete_consistencygroup', rpc_method='cast',
group=self.fake_cg, version='1.26')
def test_update_consistencygroup(self):
self._test_volume_api('update_consistencygroup', rpc_method='cast',
group=self.fake_cg, add_volumes=['vol1'],
remove_volumes=['vol2'], version='1.26')
def test_create_cgsnapshot(self):
self._test_volume_api('create_cgsnapshot', rpc_method='cast',
cgsnapshot=self.fake_cgsnap, version='1.31')
def test_delete_cgsnapshot(self):
self._test_volume_api('delete_cgsnapshot', rpc_method='cast',
cgsnapshot=self.fake_cgsnap, version='1.31')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_create_volume(self, can_send_version):
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
host='fake_host1',
request_spec='fake_request_spec',
filter_properties='fake_properties',
allow_reschedule=True,
version='1.32')
can_send_version.assert_called_once_with('1.32')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_create_volume_old(self, can_send_version):
# Tests backwards compatibility with older clients
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
host='fake_host1',
request_spec='fake_request_spec',
filter_properties='fake_properties',
allow_reschedule=True,
version='1.24')
can_send_version.assert_called_once_with('1.32')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_create_volume_serialization(self, can_send_version):
request_spec = {"metadata": self.fake_volume_metadata}
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
host='fake_host1',
request_spec=request_spec,
filter_properties='fake_properties',
allow_reschedule=True,
version='1.32')
can_send_version.assert_called_once_with('1.32')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_delete_volume(self, can_send_version):
self._test_volume_api('delete_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
unmanage_only=False,
version='1.33')
can_send_version.assert_called_once_with('1.33')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_delete_volume_old(self, can_send_version):
self._test_volume_api('delete_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
unmanage_only=False,
version='1.15')
can_send_version.assert_called_once_with('1.33')
def test_create_snapshot(self):
self._test_volume_api('create_snapshot',
rpc_method='cast',
volume=self.fake_volume,
snapshot=self.fake_snapshot,
version='1.20')
def test_delete_snapshot(self):
self._test_volume_api('delete_snapshot',
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host',
unmanage_only=False,
version='1.20')
def test_delete_snapshot_with_unmanage_only(self):
self._test_volume_api('delete_snapshot',
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host',
unmanage_only=True,
version='1.20')
def test_attach_volume_to_instance(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid='fake_uuid',
host_name=None,
mountpoint='fake_mountpoint',
mode='ro',
version='1.11')
def test_attach_volume_to_host(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid=None,
host_name='fake_host',
mountpoint='fake_mountpoint',
mode='rw',
version='1.11')
def test_detach_volume(self):
self._test_volume_api('detach_volume',
rpc_method='call',
volume=self.fake_volume,
attachment_id='fake_uuid',
version="1.20")
def test_copy_volume_to_image(self):
self._test_volume_api('copy_volume_to_image',
rpc_method='cast',
volume=self.fake_volume,
image_meta={'id': 'fake_image_id',
'container_format': 'fake_type',
'disk_format': 'fake_type'},
version='1.3')
def test_initialize_connection(self):
self._test_volume_api('initialize_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector',
version='1.0')
def test_terminate_connection(self):
self._test_volume_api('terminate_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector',
force=False,
version='1.0')
def test_accept_transfer(self):
self._test_volume_api('accept_transfer',
rpc_method='call',
volume=self.fake_volume,
new_user='e5565fd0-06c8-11e3-'
'8ffd-0800200c9b77',
new_project='e4465fd0-06c8-11e3'
'-8ffd-0800200c9a66',
version='1.9')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_extend_volume(self, can_send_version):
self._test_volume_api('extend_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
new_size=1,
reservations=self.fake_reservations,
version='1.35')
can_send_version.assert_called_once_with('1.35')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_extend_volume_old(self, can_send_version):
self._test_volume_api('extend_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
new_size=1,
reservations=self.fake_reservations,
version='1.14')
can_send_version.assert_called_once_with('1.35')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_migrate_volume(self, can_send_version):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
self._test_volume_api('migrate_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
dest_host=dest_host,
force_host_copy=True,
version='1.36')
can_send_version.assert_called_once_with('1.36')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_migrate_volume_old(self, can_send_version):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
self._test_volume_api('migrate_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
dest_host=dest_host,
force_host_copy=True,
version='1.8')
can_send_version.assert_called_once_with('1.36')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_migrate_volume_completion(self, can_send_version):
self._test_volume_api('migrate_volume_completion',
rpc_method='call',
volume=self.fake_volume_obj,
new_volume=self.fake_volume_obj,
error=False,
version='1.36')
can_send_version.assert_called_once_with('1.36')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_migrate_volume_completion_old(self, can_send_version):
self._test_volume_api('migrate_volume_completion',
rpc_method='call',
volume=self.fake_volume_obj,
new_volume=self.fake_volume_obj,
error=False,
version='1.10')
can_send_version.assert_called_once_with('1.36')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
@mock.patch('cinder.quota.DbQuotaDriver.rollback')
def test_retype(self, rollback, can_send_version):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
self._test_volume_api('retype',
rpc_method='cast',
volume=self.fake_volume_obj,
new_type_id='fake',
dest_host=dest_host,
migration_policy='never',
reservations=self.fake_reservations,
old_reservations=self.fake_reservations,
version='1.37')
rollback.assert_not_called()
can_send_version.assert_called_once_with('1.37')
@mock.patch('cinder.quota.DbQuotaDriver.rollback')
def test_retype_version_134(self, rollback):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
with mock.patch.object(messaging.RPCClient,
'can_send_version',
side_effect=[False, True]) as can_send_version:
self._test_volume_api('retype',
rpc_method='cast',
volume=self.fake_volume_obj,
new_type_id='fake',
dest_host=dest_host,
migration_policy='never',
reservations=self.fake_reservations,
old_reservations=self.fake_reservations,
version='1.34')
self.assertTrue(rollback.called)
can_send_version.assert_any_call('1.37')
can_send_version.assert_any_call('1.34')
@mock.patch('cinder.quota.DbQuotaDriver.rollback')
def test_retype_version_112(self, rollback):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
with mock.patch.object(messaging.RPCClient,
'can_send_version',
side_effect=[False, False]) as can_send_version:
self._test_volume_api('retype',
rpc_method='cast',
volume=self.fake_volume_obj,
new_type_id='fake',
dest_host=dest_host,
migration_policy='never',
reservations=self.fake_reservations,
old_reservations=self.fake_reservations,
version='1.12')
self.assertTrue(rollback.called)
can_send_version.assert_any_call('1.37')
can_send_version.assert_any_call('1.34')
def test_manage_existing(self):
self._test_volume_api('manage_existing',
rpc_method='cast',
volume=self.fake_volume,
ref={'lv_name': 'foo'},
version='1.15')
def test_manage_existing_snapshot(self):
volume_update = {'host': 'fake_host'}
snpshot = {
'id': 1,
'volume_id': 'fake_id',
'status': "creating",
'progress': '0%',
'volume_size': 0,
'display_name': 'fake_name',
'display_description': 'fake_description',
'volume': fake_volume.fake_db_volume(**volume_update),
'expected_attrs': ['volume'], }
my_fake_snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snpshot)
self._test_volume_api('manage_existing_snapshot',
rpc_method='cast',
snapshot=my_fake_snapshot_obj,
ref='foo',
host='fake_host',
version='1.28')
def test_promote_replica(self):
self._test_volume_api('promote_replica',
rpc_method='cast',
volume=self.fake_volume,
version='1.17')
def test_reenable_replica(self):
self._test_volume_api('reenable_replication',
rpc_method='cast',
volume=self.fake_volume,
version='1.17')
def test_create_consistencygroup_from_src_cgsnapshot(self):
self._test_volume_api('create_consistencygroup_from_src',
rpc_method='cast',
group=self.fake_cg,
cgsnapshot=self.fake_cgsnap,
source_cg=None,
version='1.31')
def test_create_consistencygroup_from_src_cg(self):
self._test_volume_api('create_consistencygroup_from_src',
rpc_method='cast',
group=self.fake_cg2,
cgsnapshot=None,
source_cg=self.fake_src_cg,
version='1.31')
def test_get_capabilities(self):
self._test_volume_api('get_capabilities',
rpc_method='call',
host='fake_host',
discover=True,
version='1.29')
def test_remove_export(self):
self._test_volume_api('remove_export',
rpc_method='cast',
volume=self.fake_volume,
version='1.30')
def test_get_backup_device(self):
self._test_volume_api('get_backup_device',
rpc_method='call',
backup=self.fake_backup_obj,
volume=self.fake_volume_obj,
version='1.38')
def test_secure_file_operations_enabled(self):
self._test_volume_api('secure_file_operations_enabled',
rpc_method='call',
volume=self.fake_volume_obj,
version='1.38')
|
|
"""
Author: Keith Bourgoin, Emmett Butler
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["ResponseFuture", "Handler", "ThreadingHandler", "RequestHandler"]
from collections import namedtuple
import gevent
import gevent.event
import gevent.lock
import gevent.queue
import gevent.socket as gsocket
import logging
import socket as pysocket
import sys as _sys
import threading
import time
from .utils.compat import Queue, Empty, Semaphore
log = logging.getLogger(__name__)
class ResponseFuture(object):
"""A response which may have a value at some point."""
def __init__(self, handler):
"""
:type handler: :class:`pykafka.handlers.Handler`
"""
self.handler = handler
self.error = False
self._ready = handler.Event()
def set_response(self, response):
"""Set response data and trigger get method."""
self.response = response
self._ready.set()
def set_error(self, error):
"""Set error and trigger get method."""
self.error = error
self._ready.set()
def get(self, response_cls=None, timeout=None):
"""Block until data is ready and return.
Raises an exception if there was an error.
"""
self._ready.wait(timeout)
if self.error:
raise self.error
if response_cls:
return response_cls(self.response)
else:
return self.response
class Handler(object):
"""Base class for Handler classes"""
def spawn(self, target, *args, **kwargs):
"""Create the worker that will process the work to be handled"""
raise NotImplementedError
class ThreadingHandler(Handler):
"""A handler that uses a :class:`threading.Thread` to perform its work"""
Queue = Queue
Event = threading.Event
Lock = threading.Lock
Semaphore = Semaphore
Socket = pysocket
_workers_spawned = 0
def sleep(self, seconds=0):
time.sleep(seconds)
# turn off RLock's super annoying default logging if possible
def RLock(*args, **kwargs):
kwargs['verbose'] = False
try:
return threading.RLock(*args[1:], **kwargs)
except TypeError:
kwargs.pop('verbose')
return threading.RLock(*args[1:], **kwargs)
def spawn(self, target, *args, **kwargs):
if 'name' in kwargs:
kwargs['name'] = "{}: {}".format(ThreadingHandler._workers_spawned, kwargs['name'])
t = threading.Thread(target=target, *args, **kwargs)
t.daemon = True
t.start()
ThreadingHandler._workers_spawned += 1
return t
class GEventHandler(Handler):
"""A handler that uses a greenlet to perform its work"""
Queue = gevent.queue.JoinableQueue
Event = gevent.event.Event
Lock = gevent.lock.RLock # fixme
RLock = gevent.lock.RLock
Semaphore = gevent.lock.Semaphore
Socket = gsocket
def sleep(self, seconds=0):
gevent.sleep(seconds)
def spawn(self, target, *args, **kwargs):
# Greenlets don't support naming
if 'name' in kwargs:
kwargs.pop('name')
t = gevent.spawn(target, *args, **kwargs)
return t
class RequestHandler(object):
"""Uses a Handler instance to dispatch requests."""
Task = namedtuple('Task', ['request', 'future'])
Shared = namedtuple('Shared', ['connection', 'requests', 'ending'])
def __init__(self, handler, connection):
"""
:type handler: :class:`pykafka.handlers.Handler`
:type connection: :class:`pykafka.connection.BrokerConnection`
"""
self.handler = handler
# NB self.shared is referenced directly by _start_thread(), so be careful not to
# rebind it
self.shared = self.Shared(connection=connection,
requests=handler.Queue(),
ending=handler.Event())
def __del__(self):
self.stop()
def request(self, request, has_response=True):
"""Construct a new request
:type request: :class:`pykafka.protocol.Request`
:param has_response: Whether this request will return a response
:returns: :class:`pykafka.handlers.ResponseFuture`
"""
future = None
if has_response:
future = ResponseFuture(self.handler)
task = self.Task(request, future)
self.shared.requests.put(task)
return future
def start(self):
"""Start the request processor."""
self.t = self._start_thread()
def stop(self):
"""Stop the request processor."""
shared = self.shared
self.shared = None
log.info("RequestHandler.stop: about to flush requests queue")
shared.requests.join()
shared.ending.set()
def _start_thread(self):
"""Run the request processor"""
# We pass a direct reference to `shared` into the worker, to avoid
# that thread holding a ref to `self`, which would prevent GC. A
# previous version of this used a weakref to `self`, but would
# potentially abort the thread before the requests queue was empty
shared = self.shared
def worker():
try:
while not shared.ending.is_set():
try:
# set a timeout so we check `ending` every so often
task = shared.requests.get(timeout=1)
except Empty:
continue
try:
shared.connection.request(task.request)
if task.future:
res = shared.connection.response()
task.future.set_response(res)
except Exception as e:
if task.future:
task.future.set_error(e)
finally:
shared.requests.task_done()
log.info("RequestHandler worker: exiting cleanly")
except:
# deal with interpreter shutdown in the same way that
# python 3.x's threading module does, swallowing any
# errors raised when core modules such as sys have
# already been destroyed
if _sys is None:
return
raise
name = "pykafka.RequestHandler.worker for {}:{}".format(
self.shared.connection.host, self.shared.connection.port)
return self.handler.spawn(worker, name=name)
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
import pytest
import inspect
import logging
import fnmatch
from osbs.core import Openshift
from osbs.http import HttpResponse
from osbs.conf import Configuration
from osbs.api import OSBS
from tests.constants import (TEST_BUILD, TEST_CANCELLED_BUILD, TEST_ORCHESTRATOR_BUILD,
TEST_GIT_BRANCH, TEST_BUILD_CONFIG, TEST_GIT_URI_HUMAN_NAME,
TEST_KOJI_TASK_ID, TEST_IMAGESTREAM)
from tempfile import NamedTemporaryFile
from textwrap import dedent
try:
# py2
import httplib
import urlparse
except ImportError:
# py3
import http.client as httplib
import urllib.parse as urlparse
logger = logging.getLogger("osbs.tests")
API_VER = Configuration.get_openshift_api_version()
OAPI_PREFIX = "/oapi/{v}/".format(v=API_VER)
API_PREFIX = "/api/{v}/".format(v=API_VER)
class StreamingResponse(object):
def __init__(self, status_code=200, content=b'', headers=None):
self.status_code = status_code
self.content = content
self.headers = headers or {}
def iter_lines(self):
yield self.content
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
class Connection(object):
def __init__(self, version="0.5.4"):
self.version = version
self.response_mapping = ResponseMapping(version,
lookup=self.get_definition_for)
# mapping of urls or tuples of urls to responses; use get_definition_for
# to get values from this dict
#
# The files are captured using the command line tool's
# --capture-dir parameter, and edited as needed.
self.DEFINITION = {
(OAPI_PREFIX + "namespaces/default/builds",
OAPI_PREFIX + "namespaces/default/builds/"): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
},
"post": {
# Contains a single build named test-build-123
"file": "build_test-build-123.json",
},
},
(OAPI_PREFIX + "namespaces/default/builds?fieldSelector=status%3DRunning",
OAPI_PREFIX + "namespaces/default/builds/?fieldSelector=status%3DRunning"): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
}
},
OAPI_PREFIX + "namespaces/default/builds/"
"?labelSelector=koji-task-id%3D{task}".format(task=TEST_KOJI_TASK_ID): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
}
},
# Some 'builds' requests are with a trailing slash, some without:
(OAPI_PREFIX + "namespaces/default/builds/%s" % TEST_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/" % TEST_BUILD): {
"get": {
# Contains a single build in Completed phase
# named test-build-123
"file": "build_test-build-123.json",
},
"put": {
"file": "build_test-build-123.json",
}
},
# Some 'builds' requests are with a trailing slash, some without:
(OAPI_PREFIX + "namespaces/default/builds/%s" % TEST_ORCHESTRATOR_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/" % TEST_ORCHESTRATOR_BUILD): {
"get": {
# Contains a single build in Completed phase
# named test-orchestrator-build-123
"file": "build_test-orchestrator-build-123.json",
},
"put": {
"file": "build_test-orchestrator-build-123.json",
}
},
# Simulate build cancellation
(OAPI_PREFIX + "namespaces/default/builds/%s" % TEST_CANCELLED_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/" % TEST_CANCELLED_BUILD): {
"get": {
# Contains a single build in Completed phase
# named test-build-123
"file": "build_test-build-cancel-123_get.json",
},
"put": {
"file": "build_test-build-cancel-123_put.json",
}
},
(OAPI_PREFIX + "namespaces/default/builds/%s/log/" % TEST_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/log/?follow=0" % TEST_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/log/?follow=1" % TEST_BUILD): {
"get": {
# Lines of text
"file": "build_test-build-123_logs.txt",
},
},
(OAPI_PREFIX + "namespaces/default/builds/%s/log/" % TEST_ORCHESTRATOR_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/log/?follow=0" % TEST_ORCHESTRATOR_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/log/?follow=1"
% TEST_ORCHESTRATOR_BUILD): {
"get": {
# Lines of text
"file": "build_test-orchestrator-build-123_logs.txt",
},
},
("/oauth/authorize",
"/oauth/authorize?client_id=openshift-challenging-client&response_type=token",
"/oauth/authorize?response_type=token&client_id=openshift-challenging-client"): {
"get": {
"file": "authorize.txt",
"custom_callback": self.process_authorize,
}
},
OAPI_PREFIX + "users/~/": {
"get": {
"file": "get_user.json",
}
},
OAPI_PREFIX + "watch/namespaces/default/builds/%s/" % TEST_BUILD: {
"get": {
# Single MODIFIED item, with a Build object in
# Completed phase named test-build-123
"file": "watch_build_test-build-123.json",
}
},
OAPI_PREFIX + "watch/namespaces/default/builds/%s/" % TEST_ORCHESTRATOR_BUILD: {
"get": {
# Single MODIFIED item, with a Build object in
# Completed phase named test-build-123
"file": "watch_build_test-orchestrator-build-123.json",
}
},
OAPI_PREFIX + "namespaces/default/buildconfigs/": {
"post": {
# Contains a BuildConfig named test-build-config-123
"file": "created_build_config_test-build-config-123.json",
}
},
OAPI_PREFIX + "namespaces/default/buildconfigs/%s/instantiate" % TEST_BUILD_CONFIG: {
"post": {
# A Build named test-build-123 instantiated from a
# BuildConfig named test-build-config-123
"file": "instantiated_test-build-config-123.json",
}
},
# use both version with ending slash and without it
(OAPI_PREFIX + "namespaces/default/buildconfigs/%s" % TEST_BUILD_CONFIG,
OAPI_PREFIX + "namespaces/default/buildconfigs/%s/" % TEST_BUILD_CONFIG,
((OAPI_PREFIX + "namespaces/default/buildconfigs/?labelSelector=" +
"git-repo-name%%3D%s" "%%2C" "git-branch%%3D%s"
) % (TEST_GIT_URI_HUMAN_NAME, TEST_GIT_BRANCH)),
): {
"get": {
"custom_callback":
self.with_status_code(httplib.NOT_FOUND),
# Empty file (no response content as the status is 404
"file": None,
}
},
OAPI_PREFIX + "namespaces/default/builds/?labelSelector=buildconfig%%3D%s" %
TEST_BUILD_CONFIG: {
"get": {
# Contains a BuildList with Builds labeled with
# buildconfig=fedora23-something, none of which
# are running
"file": "builds_list.json"
}
},
OAPI_PREFIX + "namespaces/default/imagestreams/%s" %
TEST_IMAGESTREAM: {
"get": {
# Contains imagestream
# with 3 tags
"file": "imagestream.json"
},
"put": {
# Contains imagestream
# with 3 tags
"file": "imagestream.json"
}
},
API_PREFIX + "namespaces/default/pods/?labelSelector=openshift.io%%2Fbuild.name%%3D%s" %
TEST_BUILD: {
"get": {
# Contains a list of build pods, just needs not to
# be empty
"file": "pods.json",
},
},
API_PREFIX + "namespaces/default/pods/?labelSelector=openshift.io%%2Fbuild.name%%3D%s" %
TEST_ORCHESTRATOR_BUILD: {
"get": {
# Contains a list of build pods, just needs not to
# be empty
"file": "pods.json",
},
},
API_PREFIX + "namespaces/default/resourcequotas/": {
# Make the POST fail so we can test PUT
"post": {
"custom_callback": self.with_status_code(httplib.CONFLICT),
# Reponse is not really empty but it isn't relevant to
# the testing
"file": None,
},
},
API_PREFIX + "namespaces/default/resourcequotas/pause": {
"put": {
"file": None,
},
"delete": {
"file": None, # not really empty but not relevant
},
},
(API_PREFIX + "namespaces/default/configmaps/",
API_PREFIX + "namespaces/default/configmaps/special-config"): {
"post": {
# Contains a configMap
"file": "create_config_map.json",
},
"get": {
# Contains a configMap
"file": "create_config_map.json",
},
"delete": {
# doesn't return anything
"file": None,
},
},
}
@staticmethod
def process_authorize(key, content):
match = re.findall("[Ll]ocation: (.+)", content.decode("utf-8"))
headers = {
"location": match[0],
}
logger.debug("headers: %s", headers)
return {
"headers": headers
}
@staticmethod
def with_status_code(status_code):
def custom_func(key, content):
return {
"content": content,
"status_code": status_code,
}
return custom_func
def get_definition_for(self, key):
"""
Returns key and value associated with given key in DEFINITION dict.
This means that either key is an actual dict key in DEFINITION or it is member
of a tuple that serves as a dict key in DEFINITION.
"""
try:
# Try a direct look-up
return key, self.DEFINITION[key]
except KeyError:
# Try all the tuples
for k, v in self.DEFINITION.items():
if isinstance(k, tuple):
for tup in k:
if fnmatch.fnmatch(key, tup):
return k, v
else:
if fnmatch.fnmatch(key, k):
return k, v
raise ValueError("Can't find '%s' in url mapping definition" % key)
@staticmethod
def response(status_code=200, content=b'', headers=None):
return HttpResponse(status_code, headers or {}, content=content)
def request(self, url, method, stream=None, *args, **kwargs):
parsed_url = urlparse.urlparse(url)
# fragment = parsed_url.fragment
# parsed_fragment = urlparse.parse_qs(fragment)
url_path = parsed_url.path
if parsed_url.query:
url_path += '?' + parsed_url.query
logger.info("URL path is '%s'", url_path)
kwargs = self.response_mapping.response_mapping(url_path, method)
if stream:
return StreamingResponse(**kwargs)
else:
return self.response(**kwargs)
def get(self, url, *args, **kwargs):
return self.request(url, "get", *args, **kwargs)
def post(self, url, *args, **kwargs):
return self.request(url, "post", *args, **kwargs)
def put(self, url, *args, **kwargs):
return self.request(url, "put", *args, **kwargs)
def delete(self, url, *args, **kwargs):
return self.request(url, "delete", *args, **kwargs)
@pytest.fixture(params=["0.5.4", "1.0.4"])
def openshift(request):
os_inst = Openshift(OAPI_PREFIX, API_VER, "/oauth/authorize",
k8s_api_url=API_PREFIX)
os_inst._con = Connection(request.param)
return os_inst
@pytest.fixture
def osbs(openshift, kwargs=None, additional_config=None, platform_descriptors=None):
kwargs = kwargs or {}
platform_descriptors = platform_descriptors or {}
kwargs.setdefault('build_json_dir', 'inputs')
kwargs.setdefault('registry_uri', 'registry.example.com')
kwargs.setdefault('additional_general', '')
with NamedTemporaryFile(mode="wt") as fp:
config = dedent("""\
[general]
build_json_dir = {build_json_dir}
{additional_general}
[default]
openshift_url = /
registry_uri = {registry_uri}
sources_command = fedpkg sources
vendor = Example, Inc.
build_host = localhost
authoritative_registry = registry.example.com
distribution_scope = authoritative-source-only
koji_root = http://koji.example.com/kojiroot
koji_hub = http://koji.example.com/kojihub
use_auth = false
can_orchestrate = true
""")
if additional_config is not None:
config += additional_config
config += '\n'
for platform, platform_info in platform_descriptors.items():
if not platform_info:
continue
config += '[platform:{0}]\n'.format(platform)
for item, value in platform_info.items():
config += '{0} = {1}\n'.format(item, value)
fp.write(config.format(**kwargs))
fp.flush()
dummy_config = Configuration(fp.name)
osbs = OSBS(dummy_config, dummy_config)
osbs.os = openshift
return osbs
@pytest.fixture
def osbs_with_pulp(openshift, platform_descriptors=None):
additional_config = dedent("""\
pulp_registry_name = pulp
pulp_secret = secret""")
kwargs = {'registry_uri': 'registry.example.com/v2'}
return osbs(openshift, kwargs=kwargs,
additional_config=additional_config,
platform_descriptors=platform_descriptors)
@pytest.fixture
def osbs_cant_orchestrate(openshift):
with NamedTemporaryFile(mode="wt") as fp:
fp.write("""
[general]
build_json_dir = {build_json_dir}
[default]
openshift_url = /
registry_uri = registry.example.com
sources_command = fedpkg sources
vendor = Example, Inc.
build_host = localhost
authoritative_registry = registry.example.com
distribution_scope = authoritative-source-only
koji_root = http://koji.example.com/kojiroot
koji_hub = http://koji.example.com/kojihub
use_auth = false
""".format(build_json_dir="inputs"))
fp.flush()
dummy_config = Configuration(fp.name)
osbs = OSBS(dummy_config, dummy_config)
osbs.os = openshift
return osbs
@pytest.fixture
def osbs106(openshift):
with NamedTemporaryFile(mode="wt") as fp:
fp.write("""
[general]
build_json_dir = {build_json_dir}
openshift_required_version = 1.0.6
[default]
openshift_url = /
registry_uri = registry.example.com
sources_command = fedpkg sources
vendor = Example, Inc.
build_host = localhost
authoritative_registry = registry.example.com
distribution_scope = authoritative-source-only
koji_root = http://koji.example.com/kojiroot
koji_hub = http://koji.example.com/kojihub
use_auth = false
""".format(build_json_dir="inputs"))
fp.flush()
dummy_config = Configuration(fp.name)
osbs = OSBS(dummy_config, dummy_config)
osbs.os = openshift
return osbs
class ResponseMapping(object):
def __init__(self, version, lookup):
self.version = version
self.lookup = lookup
def get_response_content(self, file_name):
this_file = inspect.getfile(ResponseMapping)
this_dir = os.path.dirname(this_file)
json_path = os.path.join(this_dir, "mock_jsons", self.version, file_name)
logger.debug("File: %s", json_path)
with open(json_path, "rb") as fd:
return fd.read()
def response_mapping(self, url_path, method):
key, value_to_use = self.lookup(url_path)
file_name = value_to_use[method]["file"]
logger.debug("API response content: %s", file_name)
custom_callback = value_to_use[method].get("custom_callback", None)
if file_name is None:
content = b''
else:
content = self.get_response_content(file_name)
if custom_callback:
logger.debug("Custom API callback: %s", custom_callback)
return custom_callback(key, content)
else:
return {"content": content}
|
|
"""
Copyright (c) 2015-2017, 2020 Rocky Bernstein
Copyright (c) 1998-2002 John Aycock
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os, pickle, re, sys
if sys.version[0:3] <= "2.3":
from sets import Set as set
def sorted(iterable):
temp = [x for x in iterable]
temp.sort()
return temp
def _namelist(instance):
namelist, namedict, classlist = [], {}, [instance.__class__]
for c in classlist:
for b in c.__bases__:
classlist.append(b)
for name in list(c.__dict__.keys()):
if name not in namedict:
namelist.append(name)
namedict[name] = 1
return namelist
def rule2str(rule):
return ("%s ::= %s" % (rule[0], " ".join(rule[1]))).rstrip()
class _State:
"""
Extracted from GenericParser and made global so that [un]picking works.
"""
def __init__(self, stateno, items):
self.T, self.complete, self.items = [], [], items
self.stateno = stateno
# DEFAULT_DEBUG = {'rules': True, 'transition': True, 'reduce' : True,
# 'errorstack': 'full', 'dups': False }
# DEFAULT_DEBUG = {'rules': False, 'transition': False, 'reduce' : True,
# 'errorstack': 'plain', 'dups': False }
DEFAULT_DEBUG = {
"rules": False,
"transition": False,
"reduce": False,
"errorstack": None,
"context": True,
"dups": False,
}
class GenericParser(object):
"""
An Earley parser, as per J. Earley, "An Efficient Context-Free
Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
"An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
Carnegie-Mellon University, August 1968. New formulation of
the parser according to J. Aycock, "Practical Earley Parsing
and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
2001, and J. Aycock and R. N. Horspool, "Practical Earley
Parsing", unpublished paper, 2001.
"""
def __init__(self, start, debug=DEFAULT_DEBUG, coverage_path=None):
"""_start_ : grammar start symbol;
_debug_ : produce optional parsing debug information
_profile_ : if not None should be a file path to open
with where to store profile is stored
"""
self.rules = {}
self.rule2func = {}
self.rule2name = {}
# grammar coverage information
self.coverage_path = coverage_path
if coverage_path:
self.profile_info = {}
if isinstance(coverage_path, str):
if os.path.exists(coverage_path):
self.profile_info = pickle.load(open(coverage_path, "rb"))
else:
self.profile_info = None
# When set, shows additional debug output
self.debug = debug
# Have a place to tag list-like rules. These include rules of the form:
# a ::= x+
# b ::= x*
#
# These kinds of rules, we should create as a list when building a
# parse tree rather than a sequence of nested derivations
self.list_like_nt = set()
self.optional_nt = set()
self.collectRules()
if start not in self.rules:
raise TypeError('Start symbol "%s" is not in LHS of any rule' % start)
self.augment(start)
self.ruleschanged = True
# The key is an LHS non-terminal string. The value
# should be AST if you want to pass an AST to the routine
# to do the checking. The routine called is
# self.reduce_is_invalid and is passed the rule,
# the list of tokens, the current state item,
# and index of the next last token index and
# the first token index for the reduction.
self.check_reduce = {}
_NULLABLE = r"\e_"
_START = "START"
_BOF = "|-"
#
# When pickling, take the time to generate the full state machine;
# some information is then extraneous, too. Unfortunately we
# can't save the rule2func map.
#
def __getstate__(self):
if self.ruleschanged:
#
# XXX - duplicated from parse()
#
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = False
self.edges, self.cores = {}, {}
self.states = {0: self.makeState0()}
self.makeState(0, self._BOF)
#
# XXX - should find a better way to do this..
#
changes = True
while changes:
changes = False
for k, v in list(self.edges.items()):
if v is None:
state, sym = k
if state in self.states:
self.goto(state, sym)
changes = True
rv = self.__dict__.copy()
for s in list(self.states.values()):
del s.items
del rv["rule2func"]
del rv["nullable"]
del rv["cores"]
return rv
def __setstate__(self, D):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
start = D["rules"][self._START][0][1][1] # Blech.
self.augment(start)
D["rule2func"] = self.rule2func
D["makeSet"] = self.makeSet_fast
self.__dict__ = D
#
# A hook for GenericASTBuilder and GenericASTMatcher. Mess
# thee not with this; nor shall thee toucheth the _preprocess
# argument to addRule.
#
def preprocess(self, rule, func):
return rule, func
def addRule(self, doc, func, _preprocess=True):
"""Add a grammar rules to _self.rules_, _self.rule2func_,
and _self.rule2name_
Comments, lines starting with # and blank lines are stripped from
doc. We also allow limited form of * and + when there it is of
the RHS has a single item, e.g.
stmts ::= stmt+
"""
fn = func
# remove blanks lines and comment lines, e.g. lines starting with "#"
doc = os.linesep.join(
[s for s in doc.splitlines() if s and not re.match(r"^\s*#", s)]
)
rules = doc.split()
index = []
for i in range(len(rules)):
if rules[i] == "::=":
index.append(i - 1)
index.append(len(rules))
for i in range(len(index) - 1):
lhs = rules[index[i]]
rhs = rules[index[i] + 2 : index[i + 1]]
rule = (lhs, tuple(rhs))
if _preprocess:
rule, fn = self.preprocess(rule, func)
# Handle a stripped-down form of *, +, and ?:
# allow only one nonterminal on the right-hand side
if len(rule[1]) == 1:
if rule[1][0] == rule[0]:
raise TypeError("Complete recursive rule %s" % rule2str(rule))
repeat = rule[1][-1][-1]
if repeat in ("*", "+", "?"):
nt = rule[1][-1][:-1]
if repeat == "?":
new_rule_pair = [rule[0], list((nt,))]
self.optional_nt.add(rule[0])
else:
self.list_like_nt.add(rule[0])
new_rule_pair = [rule[0], [rule[0]] + list((nt,))]
new_rule = rule2str(new_rule_pair)
self.addRule(new_rule, func, _preprocess)
if repeat == "+":
second_rule_pair = (lhs, (nt,))
else:
second_rule_pair = (lhs, tuple())
new_rule = rule2str(second_rule_pair)
self.addRule(new_rule, func, _preprocess)
continue
if lhs in self.rules:
if rule in self.rules[lhs]:
if "dups" in self.debug and self.debug["dups"]:
self.duplicate_rule(rule)
continue
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [rule]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = True
# Note: In empty rules, i.e. len(rule[1] == 0, we don't
# call reductions on explicitly. Instead it is computed
# implicitly.
if self.profile_info is not None and len(rule[1]) > 0:
rule_str = self.reduce_string(rule)
if rule_str not in self.profile_info:
self.profile_info[rule_str] = 0
pass
return
def remove_rules(self, doc):
"""Remove a grammar rules from _self.rules_, _self.rule2func_,
and _self.rule2name_
"""
# remove blanks lines and comment lines, e.g. lines starting with "#"
doc = os.linesep.join(
[s for s in doc.splitlines() if s and not re.match(r"^\s*#", s)]
)
rules = doc.split()
index = []
for i in range(len(rules)):
if rules[i] == "::=":
index.append(i - 1)
index.append(len(rules))
for i in range(len(index) - 1):
lhs = rules[index[i]]
rhs = rules[index[i] + 2 : index[i + 1]]
rule = (lhs, tuple(rhs))
if lhs not in self.rules:
return
if rule in self.rules[lhs]:
self.rules[lhs].remove(rule)
del self.rule2func[rule]
del self.rule2name[rule]
self.ruleschanged = True
# If we are profiling, remove this rule from that as well
if self.profile_info is not None and len(rule[1]) > 0:
rule_str = self.reduce_string(rule)
if rule_str and rule_str in self.profile_info:
del self.profile_info[rule_str]
pass
pass
pass
return
remove_rule = remove_rules
def collectRules(self):
for name in _namelist(self):
if name[:2] == "p_":
func = getattr(self, name)
doc = func.__doc__
self.addRule(doc, func)
def augment(self, start):
rule = "%s ::= %s %s" % (self._START, self._BOF, start)
self.addRule(rule, lambda args: args[1], False)
def computeNull(self):
self.nullable = {}
tbd = []
for rulelist in list(self.rules.values()):
# FIXME: deleting a rule may leave a null entry.
# Perhaps we should improve deletion so it doesn't leave a trace?
if not rulelist:
continue
lhs = rulelist[0][0]
self.nullable[lhs] = 0
for rule in rulelist:
rhs = rule[1]
if len(rhs) == 0:
self.nullable[lhs] = 1
continue
#
# We only need to consider rules which
# consist entirely of nonterminal symbols.
# This should be a savings on typical
# grammars.
#
for sym in rhs:
if sym not in self.rules:
break
else:
tbd.append(rule)
changes = 1
while changes:
changes = 0
for lhs, rhs in tbd:
if self.nullable[lhs]:
continue
for sym in rhs:
if not self.nullable[sym]:
break
else:
self.nullable[lhs] = 1
changes = 1
def makeState0(self):
s0 = _State(0, [])
for rule in self.newrules[self._START]:
s0.items.append((rule, 0))
return s0
def finalState(self, tokens):
#
# Yuck.
#
if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
return 1
start = self.rules[self._START][0][1][1]
return self.goto(1, start)
def makeNewRules(self):
worklist = []
for rulelist in list(self.rules.values()):
for rule in rulelist:
worklist.append((rule, 0, 1, rule))
for rule, i, candidate, oldrule in worklist:
lhs, rhs = rule
n = len(rhs)
while i < n:
sym = rhs[i]
if sym not in self.rules or not (
sym in self.nullable and self.nullable[sym]
):
candidate = 0
i += 1
continue
newrhs = list(rhs)
newrhs[i] = self._NULLABLE + sym
newrule = (lhs, tuple(newrhs))
worklist.append((newrule, i + 1, candidate, oldrule))
candidate = 0
i = i + 1
else:
if candidate:
lhs = self._NULLABLE + lhs
rule = (lhs, rhs)
if lhs in self.newrules:
self.newrules[lhs].append(rule)
else:
self.newrules[lhs] = [rule]
self.new2old[rule] = oldrule
def typestring(self, token):
return None
def duplicate_rule(self, rule):
print("Duplicate rule:\n\t%s" % rule2str(rule))
def error(self, tokens, index):
print("Syntax error at or near token %d: `%s'" % (index, tokens[index]))
if "context" in self.debug and self.debug["context"]:
start = index - 2 if index - 2 >= 0 else 0
tokens = [str(tokens[i]) for i in range(start, index + 1)]
print("Token context:\n\t%s" % ("\n\t".join(tokens)))
raise SystemExit
def errorstack(self, tokens, i, full=False):
"""Show the stacks of completed symbols.
We get this by inspecting the current transitions
possible and from that extracting the set of states
we are in, and from there we look at the set of
symbols before the "dot". If full is True, we
show the entire rule with the dot placement.
Otherwise just the rule up to the dot.
"""
print("\n-- Stacks of completed symbols:")
states = [s for s in self.edges.values() if s]
# States now has the set of states we are in
state_stack = set()
for state in states:
# Find rules which can follow, but keep only
# the part before the dot
for rule, dot in self.states[state].items:
lhs, rhs = rule
if dot > 0:
if full:
state_stack.add(
"%s ::= %s . %s"
% (lhs, " ".join(rhs[:dot]), " ".join(rhs[dot:]))
)
else:
state_stack.add("%s ::= %s" % (lhs, " ".join(rhs[:dot])))
pass
pass
pass
for stack in sorted(state_stack):
print(stack)
def parse(self, tokens, debug=None):
"""This is the main entry point from outside.
Passing in a debug dictionary changes the default debug
setting.
"""
self.tokens = tokens
if debug:
self.debug = debug
sets = [[(1, 0), (2, 0)]]
self.links = {}
if self.ruleschanged:
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = False
self.edges, self.cores = {}, {}
self.states = {0: self.makeState0()}
self.makeState(0, self._BOF)
for i in range(len(tokens)):
sets.append([])
if sets[i] == []:
break
self.makeSet(tokens, sets, i)
else:
sets.append([])
self.makeSet(None, sets, len(tokens))
finalitem = (self.finalState(tokens), 0)
if finalitem not in sets[-2]:
if len(tokens) > 0:
if self.debug.get("errorstack", False):
self.errorstack(
tokens, i - 1, str(self.debug["errorstack"]) == "full"
)
self.error(tokens, i - 1)
else:
self.error(None, None)
if self.profile_info is not None:
self.dump_profile_info()
return self.buildTree(self._START, finalitem, tokens, len(sets) - 2)
def isnullable(self, sym):
# For symbols in G_e only.
return sym.startswith(self._NULLABLE)
def skip(self, xxx_todo_changeme, pos=0):
(lhs, rhs) = xxx_todo_changeme
n = len(rhs)
while pos < n:
if not self.isnullable(rhs[pos]):
break
pos = pos + 1
return pos
def makeState(self, state, sym):
assert sym is not None
# print(sym) # debug
#
# Compute \epsilon-kernel state's core and see if
# it exists already.
#
kitems = []
for rule, pos in self.states[state].items:
lhs, rhs = rule
if rhs[pos : pos + 1] == (sym,):
kitems.append((rule, self.skip(rule, pos + 1)))
tcore = tuple(sorted(kitems))
if tcore in self.cores:
return self.cores[tcore]
#
# Nope, doesn't exist. Compute it and the associated
# \epsilon-nonkernel state together; we'll need it right away.
#
k = self.cores[tcore] = len(self.states)
K, NK = _State(k, kitems), _State(k + 1, [])
self.states[k] = K
predicted = {}
edges = self.edges
rules = self.newrules
for X in K, NK:
worklist = X.items
for item in worklist:
rule, pos = item
lhs, rhs = rule
if pos == len(rhs):
X.complete.append(rule)
continue
nextSym = rhs[pos]
key = (X.stateno, nextSym)
if nextSym not in rules:
if key not in edges:
edges[key] = None
X.T.append(nextSym)
else:
edges[key] = None
if nextSym not in predicted:
predicted[nextSym] = 1
for prule in rules[nextSym]:
ppos = self.skip(prule)
new = (prule, ppos)
NK.items.append(new)
#
# Problem: we know K needs generating, but we
# don't yet know about NK. Can't commit anything
# regarding NK to self.edges until we're sure. Should
# we delay committing on both K and NK to avoid this
# hacky code? This creates other problems..
#
if X is K:
edges = {}
if NK.items == []:
return k
#
# Check for \epsilon-nonkernel's core. Unfortunately we
# need to know the entire set of predicted nonterminals
# to do this without accidentally duplicating states.
#
tcore = tuple(sorted(predicted.keys()))
if tcore in self.cores:
self.edges[(k, None)] = self.cores[tcore]
return k
nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
self.edges.update(edges)
self.states[nk] = NK
return k
def goto(self, state, sym):
key = (state, sym)
if key not in self.edges:
#
# No transitions from state on sym.
#
return None
rv = self.edges[key]
if rv is None:
#
# Target state isn't generated yet. Remedy this.
#
rv = self.makeState(state, sym)
self.edges[key] = rv
return rv
def gotoT(self, state, t):
if self.debug["rules"]:
print("Terminal", t, state)
return [self.goto(state, t)]
def gotoST(self, state, st):
if self.debug["transition"]:
print("GotoST", st, state)
rv = []
for t in self.states[state].T:
if st == t:
rv.append(self.goto(state, t))
return rv
def add(self, set, item, i=None, predecessor=None, causal=None):
if predecessor is None:
if item not in set:
set.append(item)
else:
key = (item, i)
if item not in set:
self.links[key] = []
set.append(item)
self.links[key].append((predecessor, causal))
def makeSet(self, tokens, sets, i):
cur, next = sets[i], sets[i + 1]
if tokens is not None:
token = tokens[i]
ttype = self.typestring(token)
else:
ttype = None
token = None
if ttype is not None:
fn, arg = self.gotoT, ttype
else:
fn, arg = self.gotoST, token
for item in cur:
ptr = (item, i)
state, parent = item
add = fn(state, arg)
for k in add:
if k is not None:
self.add(next, (k, parent), i + 1, ptr)
nk = self.goto(k, None)
if nk is not None:
self.add(next, (nk, i + 1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
if self.debug["reduce"]:
self.debug_reduce(rule, tokens, parent, i)
if self.profile_info is not None:
self.profile_rule(rule)
if lhs in self.check_reduce:
if self.check_reduce[lhs] == "AST" and (
tokens or hasattr(self, "tokens")
):
if hasattr(self, "tokens"):
tokens = self.tokens
ast = self.reduce_ast(rule, self.tokens, item, i, sets)
else:
ast = None
invalid = self.reduce_is_invalid(rule, ast, self.tokens, parent, i)
if ast:
del ast
if invalid:
if self.debug["reduce"]:
print("Reduce %s invalid by check" % lhs)
continue
pass
pass
for pitem in sets[parent]:
pstate, pparent = pitem
k = self.goto(pstate, lhs)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
self.add(cur, (k, pparent), i, pptr, why)
nk = self.goto(k, None)
if nk is not None:
self.add(cur, (nk, i))
def makeSet_fast(self, token, sets, i):
#
# Call *only* when the entire state machine has been built!
# It relies on self.edges being filled in completely, and
# then duplicates and inlines code to boost speed at the
# cost of extreme ugliness.
#
cur, next = sets[i], sets[i + 1]
ttype = token is not None and self.typestring(token) or None
for item in cur:
ptr = (item, i)
state, parent = item
if ttype is not None:
k = self.edges.get((state, ttype), None)
if k is not None:
# self.add(next, (k, parent), i+1, ptr)
# INLINED --------v
new = (k, parent)
key = (new, i + 1)
if new not in next:
self.links[key] = []
next.append(new)
self.links[key].append((ptr, None))
# INLINED --------^
# nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
# self.add(next, (nk, i+1))
# INLINED -------------v
new = (nk, i + 1)
if new not in next:
next.append(new)
# INLINED ---------------^
else:
add = self.gotoST(state, token)
for k in add:
if k is not None:
self.add(next, (k, parent), i + 1, ptr)
# nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
self.add(next, (nk, i + 1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
# k = self.goto(pstate, lhs)
k = self.edges.get((pstate, lhs), None)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
# self.add(cur, (k, pparent), i, pptr, why)
# INLINED ---------v
new = (k, pparent)
key = (new, i)
if new not in cur:
self.links[key] = []
cur.append(new)
self.links[key].append((pptr, why))
# INLINED ----------^
# nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
# self.add(cur, (nk, i))
# INLINED ---------v
new = (nk, i)
if new not in cur:
cur.append(new)
# INLINED ----------^
def predecessor(self, key, causal):
for p, c in self.links[key]:
if c == causal:
return p
assert 0
def causal(self, key):
links = self.links[key]
if len(links) == 1:
return links[0][1]
choices = []
rule2cause = {}
for p, c in links:
rule = c[2]
choices.append(rule)
rule2cause[rule] = c
return rule2cause[self.ambiguity(choices)]
def deriveEpsilon(self, nt):
if len(self.newrules[nt]) > 1:
rule = self.ambiguity(self.newrules[nt])
else:
rule = self.newrules[nt][0]
# print(rule) # debug
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs) - 1, -1, -1):
attr[i] = self.deriveEpsilon(rhs[i])
return self.rule2func[self.new2old[rule]](attr)
def buildTree(self, nt, item, tokens, k):
if self.debug["rules"]:
print("NT", nt)
state, parent = item
choices = []
for rule in self.states[state].complete:
if rule[0] == nt:
choices.append(rule)
rule = choices[0]
if len(choices) > 1:
rule = self.ambiguity(choices)
# print(rule) # debug
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs) - 1, -1, -1):
sym = rhs[i]
if sym not in self.newrules:
if sym != self._BOF:
attr[i] = tokens[k - 1]
key = (item, k)
item, k = self.predecessor(key, None)
# elif self.isnullable(sym):
elif self._NULLABLE == sym[0 : len(self._NULLABLE)]:
attr[i] = self.deriveEpsilon(sym)
else:
key = (item, k)
why = self.causal(key)
attr[i] = self.buildTree(sym, why[0], tokens, why[1])
item, k = self.predecessor(key, why)
return self.rule2func[self.new2old[rule]](attr)
def ambiguity(self, rules):
#
# XXX - problem here and in collectRules() if the same rule
# appears in >1 method. Also undefined results if rules
# causing the ambiguity appear in the same method.
#
sortlist = []
name2index = {}
for i in range(len(rules)):
lhs, rhs = rule = rules[i]
name = self.rule2name[self.new2old[rule]]
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
list = [a_b[1] for a_b in sortlist]
return rules[name2index[self.resolve(list)]]
def resolve(self, list):
"""
Resolve ambiguity in favor of the shortest RHS.
Since we walk the tree from the top down, this
should effectively resolve in favor of a "shift".
"""
return list[0]
def dump_grammar(self, out=sys.stdout):
"""
Print grammar rules
"""
for rule in sorted(self.rule2name.items()):
out.write("%s\n" % rule2str(rule[0]))
return
def check_grammar(self, ok_start_symbols=set(), out=sys.stderr):
"""
Check grammar for:
- unused left-hand side nonterminals that are neither start symbols
or listed in ok_start_symbols
- unused right-hand side nonterminals, i.e. not tokens
- right-recursive rules. These can slow down parsing.
"""
warnings = 0
(lhs, rhs, tokens, right_recursive, dup_rhs) = self.check_sets()
if lhs - ok_start_symbols:
warnings += 1
out.write("LHS symbols not used on the RHS:\n")
out.write(" " + (", ".join(sorted(lhs)) + "\n"))
if rhs:
warnings += 1
out.write("RHS symbols not used on the LHS:\n")
out.write((", ".join(sorted(rhs))) + "\n")
if right_recursive:
warnings += 1
out.write("Right recursive rules:\n")
for rule in sorted(right_recursive):
out.write(" %s ::= %s\n" % (rule[0], " ".join(rule[1])))
pass
pass
if dup_rhs:
warnings += 1
out.write("Nonterminals with the same RHS\n")
for rhs in sorted(dup_rhs.keys()):
out.write(" RHS: %s\n" % " ".join(rhs))
out.write(" LHS: %s\n" % ", ".join(dup_rhs[rhs]))
out.write(" ---\n")
pass
pass
return warnings
def check_sets(self):
"""
Check grammar
"""
lhs_set = set()
rhs_set = set()
rhs_rules_set = {}
token_set = set()
right_recursive = set()
dup_rhs = {}
for lhs in self.rules:
rules_for_lhs = self.rules[lhs]
lhs_set.add(lhs)
for rule in rules_for_lhs:
rhs = rule[1]
if len(rhs) > 0 and rhs in rhs_rules_set:
li = dup_rhs.get(rhs, [])
li.append(lhs)
dup_rhs[rhs] = li
else:
rhs_rules_set[rhs] = lhs
for sym in rhs:
# We assume any symbol starting with an uppercase letter is
# terminal, and anything else is a nonterminal
if re.match("^[A-Z]", sym):
token_set.add(sym)
else:
rhs_set.add(sym)
if len(rhs) > 0 and lhs == rhs[-1]:
right_recursive.add((lhs, rhs))
pass
pass
lhs_set.remove(self._START)
rhs_set.remove(self._BOF)
missing_lhs = lhs_set - rhs_set
missing_rhs = rhs_set - lhs_set
# dup_rhs is missing first entry found, so add that
for rhs in dup_rhs:
dup_rhs[rhs].append(rhs_rules_set[rhs])
pass
return (missing_lhs, missing_rhs, token_set, right_recursive, dup_rhs)
def reduce_string(self, rule, last_token_pos=-1):
if last_token_pos >= 0:
return "%s ::= %s (%d)" % (rule[0], " ".join(rule[1]), last_token_pos)
else:
return "%s ::= %s" % (rule[0], " ".join(rule[1]))
# Note the unused parameters here are used in subclassed
# routines that need more information
def debug_reduce(self, rule, tokens, parent, i):
print(self.reduce_string(rule, i))
def profile_rule(self, rule):
"""Bump count of the number of times _rule_ was used"""
rule_str = self.reduce_string(rule)
if rule_str not in self.profile_info:
self.profile_info[rule_str] = 1
else:
self.profile_info[rule_str] += 1
def get_profile_info(self):
"""Show the accumulated results of how many times each rule was used"""
return sorted(self.profile_info.items(), key=lambda kv: kv[1], reverse=False)
return
def dump_profile_info(self):
if isinstance(self.coverage_path, str):
with open(self.coverage_path, "wb") as fp:
pickle.dump(self.profile_info, fp)
else:
for rule, count in self.get_profile_info():
self.coverage_path.write("%s -- %d\n" % (rule, count))
pass
self.coverage_path.write("-" * 40 + "\n")
def reduce_ast(self, rule, tokens, item, k, sets):
rhs = rule[1]
ast = [None] * len(rhs)
for i in range(len(rhs) - 1, -1, -1):
sym = rhs[i]
if sym not in self.newrules:
if sym != self._BOF:
ast[i] = tokens[k - 1]
key = (item, k)
item, k = self.predecessor(key, None)
elif self._NULLABLE == sym[0 : len(self._NULLABLE)]:
ast[i] = self.deriveEpsilon(sym)
else:
key = (item, k)
why = self.causal(key)
ast[i] = self.buildTree(sym, why[0], tokens, why[1])
item, k = self.predecessor(key, why)
pass
pass
return ast
#
#
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
# for a given input. The extra argument is a class (not an instance!)
# which supports the "__setslice__" and "__len__" methods.
#
# XXX - silently overrides any user code in methods.
#
class GenericASTBuilder(GenericParser):
def __init__(self, AST, start, debug=DEFAULT_DEBUG):
if "SPARK_PARSER_COVERAGE" in os.environ:
coverage_path = os.environ["SPARK_PARSER_COVERAGE"]
else:
coverage_path = None
GenericParser.__init__(self, start, debug=debug, coverage_path=coverage_path)
self.AST = AST
def preprocess(self, rule, func):
rebind = lambda lhs, self=self: lambda args, lhs=lhs, self=self: self.buildASTNode(
args, lhs
)
lhs, rhs = rule
return rule, rebind(lhs)
def buildASTNode(self, args, lhs):
children = []
for arg in args:
if isinstance(arg, self.AST):
children.append(arg)
else:
children.append(self.terminal(arg))
return self.nonterminal(lhs, children)
def terminal(self, token):
return token
def nonterminal(self, type, args):
rv = self.AST(type)
rv[: len(args)] = args
return rv
|
|
###############################################################################
#
# XMLwriter - A base class for XlsxWriter classes.
#
# Used in conjunction with XlsxWriter.
#
# Copyright 2013-2016, John McNamara, [email protected]
#
# Standard packages.
import re
import codecs
# Standard packages in Python 2/3 compatibility mode.
from .compatibility import StringIO
class XMLwriter(object):
"""
Simple XML writer class.
"""
def __init__(self):
self.fh = None
self.escapes = re.compile('["&<>\n]')
self.internal_fh = False
def _set_filehandle(self, filehandle):
# Set the writer filehandle directly. Mainly for testing.
self.fh = filehandle
self.internal_fh = False
def _set_xml_writer(self, filename):
# Set the XML writer filehandle for the object.
if isinstance(filename, StringIO):
self.internal_fh = False
self.fh = filename
else:
self.internal_fh = True
self.fh = codecs.open(filename, 'w', 'utf-8')
def _xml_close(self):
# Close the XML filehandle if we created it.
if self.internal_fh:
self.fh.close()
def _xml_declaration(self):
# Write the XML declaration.
self.fh.write(
"""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n""")
def _xml_start_tag(self, tag, attributes=[]):
# Write an XML start tag with optional attributes.
for key, value in attributes:
value = self._escape_attributes(value)
tag += ' %s="%s"' % (key, value)
self.fh.write("<%s>" % tag)
def _xml_start_tag_unencoded(self, tag, attributes=[]):
# Write an XML start tag with optional, unencoded, attributes.
# This is a minor speed optimization for elements that don't
# need encoding.
for key, value in attributes:
tag += ' %s="%s"' % (key, value)
self.fh.write("<%s>" % tag)
def _xml_end_tag(self, tag):
# Write an XML end tag.
self.fh.write("</%s>" % tag)
def _xml_empty_tag(self, tag, attributes=[]):
# Write an empty XML tag with optional attributes.
for key, value in attributes:
value = self._escape_attributes(value)
tag += ' %s="%s"' % (key, value)
self.fh.write("<%s/>" % tag)
def _xml_empty_tag_unencoded(self, tag, attributes=[]):
# Write an empty XML tag with optional, unencoded, attributes.
# This is a minor speed optimization for elements that don't
# need encoding.
for key, value in attributes:
tag += ' %s="%s"' % (key, value)
self.fh.write("<%s/>" % tag)
def _xml_data_element(self, tag, data, attributes=[]):
# Write an XML element containing data with optional attributes.
end_tag = tag
for key, value in attributes:
value = self._escape_attributes(value)
tag += ' %s="%s"' % (key, value)
data = self._escape_data(data)
self.fh.write("<%s>%s</%s>" % (tag, data, end_tag))
def _xml_string_element(self, index, attributes=[]):
# Optimized tag writer for <c> cell string elements in the inner loop.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
self.fh.write("""<c%s t="s"><v>%d</v></c>""" % (attr, index))
def _xml_si_element(self, string, attributes=[]):
# Optimized tag writer for shared strings <si> elements.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
string = self._escape_data(string)
self.fh.write("""<si><t%s>%s</t></si>""" % (attr, string))
def _xml_rich_si_element(self, string):
# Optimized tag writer for shared strings <si> rich string elements.
self.fh.write("""<si>%s</si>""" % string)
def _xml_number_element(self, number, attributes=[]):
# Optimized tag writer for <c> cell number elements in the inner loop.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
self.fh.write("""<c%s><v>%.16g</v></c>""" % (attr, number))
def _xml_formula_element(self, formula, result, attributes=[]):
# Optimized tag writer for <c> cell formula elements in the inner loop.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
self.fh.write("""<c%s><f>%s</f><v>%s</v></c>"""
% (attr, self._escape_data(formula),
self._escape_data(result)))
def _xml_inline_string(self, string, preserve, attributes=[]):
# Optimized tag writer for inlineStr cell elements in the inner loop.
attr = ''
t_attr = ''
# Set the <t> attribute to preserve whitespace.
if preserve:
t_attr = ' xml:space="preserve"'
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
string = self._escape_data(string)
self.fh.write("""<c%s t="inlineStr"><is><t%s>%s</t></is></c>""" %
(attr, t_attr, string))
def _xml_rich_inline_string(self, string, attributes=[]):
# Optimized tag writer for rich inlineStr in the inner loop.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr += ' %s="%s"' % (key, value)
self.fh.write("""<c%s t="inlineStr"><is>%s</is></c>""" %
(attr, string))
def _escape_attributes(self, attribute):
# Escape XML characters in attributes.
try:
if not self.escapes.search(attribute):
return attribute
except TypeError:
return attribute
attribute = attribute.replace('&', '&')
attribute = attribute.replace('"', '"')
attribute = attribute.replace('<', '<')
attribute = attribute.replace('>', '>')
attribute = attribute.replace('\n', '
')
return attribute
def _escape_data(self, data):
# Escape XML characters in data sections of tags. Note, this
# is different from _escape_attributes() in that double quotes
# are not escaped by Excel.
try:
if not self.escapes.search(data):
return data
except TypeError:
return data
data = data.replace('&', '&')
data = data.replace('<', '<')
data = data.replace('>', '>')
return data
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import testcase
from sahara.tests.integration.configs import config as cfg
from sahara.tests.integration.tests import base as b
from sahara.tests.integration.tests import cinder
from sahara.tests.integration.tests import cluster_configs
from sahara.tests.integration.tests import edp
from sahara.tests.integration.tests import map_reduce
from sahara.tests.integration.tests import scaling
from sahara.tests.integration.tests import swift
from sahara.utils import edp as utils_edp
class VanillaTwoGatingTest(cluster_configs.ClusterConfigTest,
map_reduce.MapReduceTest, swift.SwiftTest,
scaling.ScalingTest, cinder.CinderVolumeTest,
edp.EDPTest):
vanilla_two_config = cfg.ITConfig().vanilla_two_config
SKIP_MAP_REDUCE_TEST = vanilla_two_config.SKIP_MAP_REDUCE_TEST
SKIP_SWIFT_TEST = vanilla_two_config.SKIP_SWIFT_TEST
SKIP_SCALING_TEST = vanilla_two_config.SKIP_SCALING_TEST
SKIP_CINDER_TEST = vanilla_two_config.SKIP_CINDER_TEST
SKIP_EDP_TEST = vanilla_two_config.SKIP_EDP_TEST
def setUp(self):
super(VanillaTwoGatingTest, self).setUp()
self.cluster_id = None
self.cluster_template_id = None
self.ng_template_ids = []
def get_plugin_config(self):
return cfg.ITConfig().vanilla_two_config
ng_params = {
'MapReduce': {
'yarn.app.mapreduce.am.resource.mb': 256,
'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
},
'YARN': {
'yarn.scheduler.minimum-allocation-mb': 256,
'yarn.scheduler.maximum-allocation-mb': 1024,
'yarn.nodemanager.vmem-check-enabled': False
}
}
@b.errormsg("Failure while 'nm-dn' node group template creation: ")
def _create_nm_dn_ng_template(self):
template = {
'name': 'test-node-group-template-vanilla-nm-dn',
'plugin_config': self.plugin_config,
'description': 'test node group template for Vanilla plugin',
'node_processes': ['nodemanager', 'datanode'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'node_configs': self.ng_params
}
self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_nm_dn_id)
@b.errormsg("Failure while 'nm' node group template creation: ")
def _create_nm_ng_template(self):
template = {
'name': 'test-node-group-template-vanilla-nm',
'plugin_config': self.plugin_config,
'description': 'test node group template for Vanilla plugin',
'volumes_per_node': self.volumes_per_node,
'volumes_size': self.volumes_size,
'node_processes': ['nodemanager'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'node_configs': self.ng_params
}
self.ng_tmpl_nm_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_nm_id)
@b.errormsg("Failure while 'dn' node group template creation: ")
def _create_dn_ng_template(self):
template = {
'name': 'test-node-group-template-vanilla-dn',
'plugin_config': self.plugin_config,
'description': 'test node group template for Vanilla plugin',
'volumes_per_node': self.volumes_per_node,
'volumes_size': self.volumes_size,
'node_processes': ['datanode'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'node_configs': self.ng_params
}
self.ng_tmpl_dn_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_dn_id)
@b.errormsg("Failure while cluster template creation: ")
def _create_cluster_template(self):
template = {
'name': 'test-cluster-template-vanilla',
'plugin_config': self.plugin_config,
'description': 'test cluster template for Vanilla plugin',
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
},
'node_groups': [
{
'name': 'master-node-rm-nn',
'flavor_id': self.flavor_id,
'node_processes': ['namenode', 'resourcemanager'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'count': 1,
'node_configs': self.ng_params
},
{
'name': 'master-node-oo-hs',
'flavor_id': self.flavor_id,
'node_processes': ['oozie', 'historyserver',
'secondarynamenode'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'count': 1,
'node_configs': self.ng_params
},
{
'name': 'worker-node-nm-dn',
'node_group_template_id': self.ng_tmpl_nm_dn_id,
'count': 2
},
{
'name': 'worker-node-dn',
'node_group_template_id': self.ng_tmpl_dn_id,
'count': 1
},
{
'name': 'worker-node-nm',
'node_group_template_id': self.ng_tmpl_nm_id,
'count': 1
}
],
'net_id': self.internal_neutron_net
}
self.cluster_template_id = self.create_cluster_template(**template)
@b.errormsg("Failure while cluster creation: ")
def _create_cluster(self):
cluster_name = '%s-%s-v2' % (self.common_config.CLUSTER_NAME,
self.plugin_config.PLUGIN_NAME)
cluster = {
'name': cluster_name,
'plugin_config': self.plugin_config,
'cluster_template_id': self.cluster_template_id,
'description': 'test cluster',
'cluster_configs': {}
}
cluster_id = self.create_cluster(**cluster)
self.poll_cluster_state(cluster_id)
self.cluster_info = self.get_cluster_info(self.plugin_config)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.plugin_config)
@b.errormsg("Failure while Cinder testing: ")
def _check_cinder(self):
self.cinder_volume_testing(self.cluster_info)
@b.errormsg("Failure while Map Reduce testing: ")
def _check_mapreduce(self):
self.map_reduce_testing(self.cluster_info)
@b.errormsg("Failure during check of Swift availability: ")
def _check_swift(self):
self.check_swift_availability(self.cluster_info)
@b.errormsg("Failure while EDP testing: ")
def _check_edp(self):
self.poll_jobs_status(list(self._run_edp_tests()))
def _run_edp_tests(self):
skipped_edp_job_types = self.plugin_config.SKIP_EDP_JOB_TYPES
if utils_edp.JOB_TYPE_PIG not in skipped_edp_job_types:
yield self._edp_pig_test()
if utils_edp.JOB_TYPE_MAPREDUCE not in skipped_edp_job_types:
yield self._edp_mapreduce_test()
if utils_edp.JOB_TYPE_MAPREDUCE_STREAMING not in skipped_edp_job_types:
yield self._edp_mapreduce_streaming_test()
if utils_edp.JOB_TYPE_JAVA not in skipped_edp_job_types:
yield self._edp_java_test()
def _edp_pig_test(self):
pig_job = self.edp_info.read_pig_example_script()
pig_lib = self.edp_info.read_pig_example_jar()
return self.edp_testing(
job_type=utils_edp.JOB_TYPE_PIG,
job_data_list=[{'pig': pig_job}],
lib_data_list=[{'jar': pig_lib}],
swift_binaries=True,
hdfs_local_output=True)
def _edp_mapreduce_test(self):
mapreduce_jar = self.edp_info.read_mapreduce_example_jar()
mapreduce_configs = self.edp_info.mapreduce_example_configs()
return self.edp_testing(
job_type=utils_edp.JOB_TYPE_MAPREDUCE,
job_data_list=[],
lib_data_list=[{'jar': mapreduce_jar}],
configs=mapreduce_configs,
swift_binaries=True,
hdfs_local_output=True)
def _edp_mapreduce_streaming_test(self):
return self.edp_testing(
job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
job_data_list=[],
lib_data_list=[],
configs=self.edp_info.mapreduce_streaming_configs())
def _edp_java_test(self):
java_jar = self.edp_info.read_java_example_lib(2)
java_configs = self.edp_info.java_example_configs(2)
return self.edp_testing(
utils_edp.JOB_TYPE_JAVA,
job_data_list=[],
lib_data_list=[{'jar': java_jar}],
configs=java_configs)
@b.errormsg("Failure while cluster scaling: ")
def _check_scaling(self):
change_list = [
{
'operation': 'resize',
'info': ['worker-node-nm-dn', 1]
},
{
'operation': 'resize',
'info': ['worker-node-dn', 0]
},
{
'operation': 'resize',
'info': ['worker-node-nm', 0]
},
{
'operation': 'add',
'info': [
'new-worker-node-nm', 1, '%s' % self.ng_tmpl_nm_id
]
},
{
'operation': 'add',
'info': [
'new-worker-node-dn', 1, '%s' % self.ng_tmpl_dn_id
]
}
]
self.cluster_info = self.cluster_scaling(self.cluster_info,
change_list)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.plugin_config)
@b.errormsg("Failure while Cinder testing after cluster scaling: ")
def _check_cinder_after_scaling(self):
self.cinder_volume_testing(self.cluster_info)
@b.errormsg("Failure while Map Reduce testing after cluster scaling: ")
def _check_mapreduce_after_scaling(self):
self.map_reduce_testing(self.cluster_info)
@b.errormsg(
"Failure during check of Swift availability after cluster scaling: ")
def _check_swift_after_scaling(self):
self.check_swift_availability(self.cluster_info)
@b.errormsg("Failure while EDP testing after cluster scaling: ")
def _check_edp_after_scaling(self):
self._check_edp()
@testcase.skipIf(
cfg.ITConfig().vanilla_two_config.SKIP_ALL_TESTS_FOR_PLUGIN,
"All tests for Vanilla plugin were skipped")
@testcase.attr('vanilla2')
def test_vanilla_two_plugin_gating(self):
self._create_nm_dn_ng_template()
self._create_nm_ng_template()
self._create_dn_ng_template()
self._create_cluster_template()
self._create_cluster()
self._check_cinder()
self._check_mapreduce()
self._check_swift()
self._check_edp()
if not self.plugin_config.SKIP_SCALING_TEST:
self._check_scaling()
self._check_cinder_after_scaling()
self._check_mapreduce_after_scaling()
self._check_swift_after_scaling()
self._check_edp_after_scaling()
def tearDown(self):
self.delete_objects(self.cluster_id, self.cluster_template_id,
self.ng_template_ids)
super(VanillaTwoGatingTest, self).tearDown()
|
|
#! /usr/bin/env python
"""Write structured grids to NetCDF files.
Write netcdf
++++++++++++
.. autosummary::
:toctree: generated/
~landlab.io.netcdf.write.write_netcdf
"""
import os
import warnings
import six
import numpy as np
try:
import netCDF4 as nc4
except ImportError:
warnings.warn('Unable to import netCDF4.', ImportWarning)
from scipy.io import netcdf as nc
from landlab.io.netcdf._constants import (_AXIS_DIMENSION_NAMES,
_AXIS_COORDINATE_NAMES,
_NP_TO_NC_TYPE)
def _set_netcdf_attributes(root, attrs):
"""Set attributes of a netcdf file.
Set attributes of the netCDF Database object, *root*. Attributes are
given as key/value pairs from *attrs*.
Parameters
----------
root : netcdf_file
A NetCDF file.
attrs : dict
Attributes as key-value pairs.
"""
for (key, val) in attrs.items():
setattr(root, key, val)
def _get_dimension_names(shape):
"""Get dimension names.
Parameters
----------
shape : tuple of int
Shape of a structured grid.
Returns
-------
tuple of str
Dimension names for the NetCDF file.
Examples
--------
>>> from landlab.io.netcdf.write import _get_dimension_names
>>> _get_dimension_names((4, ))
['ni']
>>> _get_dimension_names((4, 5))
['nj', 'ni']
>>> _get_dimension_names((4, 5, 6))
['nk', 'nj', 'ni']
"""
names = _AXIS_DIMENSION_NAMES[- 1: - (len(shape) + 1): - 1]
return names[::-1]
def _get_dimension_sizes(shape):
"""Get dimension sizes.
Parameters
----------
shape : tuple of int
Shape of a structured grid.
Returns
-------
dict
Dimension sizes.
Examples
--------
>>> from landlab.io.netcdf.write import _get_dimension_sizes
>>> _get_dimension_sizes((4, ))
{'ni': 4}
>>> sizes = _get_dimension_sizes((4, 5))
>>> sizes['ni'], sizes['nj']
(5, 4)
>>> sizes = _get_dimension_sizes((4, 5, 6))
>>> sizes['ni'], sizes['nj'], sizes['nk']
(6, 5, 4)
"""
names = _AXIS_DIMENSION_NAMES[- 1: - (len(shape) + 1): - 1]
sizes = dict()
for (axis, name) in enumerate(names):
sizes[name] = shape[- (axis + 1)]
return sizes
def _get_axes_names(shape):
"""Get names of the axes.
Parameters
----------
shape : tuple of int
Shape of a structured grid.
Returns
-------
tuple of str
Names of the axes for the NetCDF file.
Examples
--------
>>> from landlab.io.netcdf.write import _get_axes_names
>>> _get_axes_names((2, ))
['x']
>>> _get_axes_names((2, 3))
['y', 'x']
>>> _get_axes_names((2, 3, 4))
['z', 'y', 'x']
"""
names = _AXIS_COORDINATE_NAMES[- 1: - (len(shape) + 1): - 1]
return names[::-1]
def _get_cell_bounds(shape, spacing=(1., 1.), origin=(0., 0.)):
"""Get bounds arrays for square cells.
Parameters
----------
shape : tuple of int
Shape of the grid in cell corners.
spacing : tuple of float
Height and width of cells.
origin : tuple of float
Coordinates of lower-left corner of lower-left cell.
Returns
-------
(y, x) : tuple of ndarray
Tuple of the *y* and *x* coordinates of each cell corner (ordered
counter-clockwise starting from lower-right. The shape of the returned
arrays will be *(rows, cols, 4)*.
Examples
--------
>>> from landlab.io.netcdf.write import _get_cell_bounds
>>> bounds = _get_cell_bounds((3, 4))
>>> bounds['y_bnds'] # doctest: +NORMALIZE_WHITESPACE
array([[[ 0., 1., 1., 0.], [ 0., 1., 1., 0.], [ 0., 1., 1., 0.]],
[[ 1., 2., 2., 1.], [ 1., 2., 2., 1.], [ 1., 2., 2., 1.]]])
>>> bounds['x_bnds'] # doctest: +NORMALIZE_WHITESPACE
array([[[ 1., 1., 0., 0.], [ 2., 2., 1., 1.], [ 3., 3., 2., 2.]],
[[ 1., 1., 0., 0.], [ 2., 2., 1., 1.], [ 3., 3., 2., 2.]]])
"""
rows = np.arange(shape[0]) * spacing[0] + origin[0]
cols = np.arange(shape[1]) * spacing[1] + origin[1]
corner_y, corner_x = np.meshgrid(rows, cols, indexing='ij')
y_bnds = np.vstack((corner_y[:-1, 1:].flat, corner_y[1:, 1:].flat,
corner_y[1:, :-1].flat, corner_y[:-1, :-1].flat)).T
x_bnds = np.vstack((corner_x[:-1, 1:].flat, corner_x[1:, 1:].flat,
corner_x[1:, :-1].flat, corner_x[:-1, :-1].flat)).T
return {'y_bnds': y_bnds.reshape((shape[0] - 1, shape[1] - 1, 4)),
'x_bnds': x_bnds.reshape((shape[0] - 1, shape[1] - 1, 4)),
}
def _set_netcdf_cell_structured_dimensions(root, shape):
"""Set dimensions for a structured grid of cells.
Parameters
----------
root : netcdf_file
A NetCDF file.
shape : tuple of int
Shape of the cell grid (rows of cells, columns of cells).
"""
if len(shape) < 1 or len(shape) > 3:
raise ValueError('grid dimension must be 1, 2, or 3')
dimensions = _get_dimension_sizes(shape)
dims = root.dimensions
if 'nt' not in dims:
root.createDimension('nt', None)
for (name, dim_size) in dimensions.items():
if name not in dims:
root.createDimension(name, dim_size - 2)
root.createDimension('nv', 4)
def _set_netcdf_structured_dimensions(root, shape):
"""Set dimensions for a structured grid.
Add dimensions to *root* for a structured grid of size *shape*. The
dimension names will be 'ni', 'nj', and 'nk'. 'ni' is the length of the
fast dimension, followed by 'nj', and then 'nk'.
For example, a grid with shape (3, 4, 5) will have dimensions ni=5,
nj=4, and nk=3. Lower dimension grids simply drop the slowest dimension.
Thus, a grid with shape (3, 4) has dimensions ni=4, and nj=3.
Parameters
----------
root : netcdf_file
A NetCDF file.
shape : tuple of int
Shape of the grid.
"""
if len(shape) < 1 or len(shape) > 3:
raise ValueError('grid dimension must be 1, 2, or 3')
dimensions = _get_dimension_sizes(shape)
dims = root.dimensions
if 'nt' not in dims:
root.createDimension('nt', None)
for (name, dim_size) in dimensions.items():
if name not in dims:
root.createDimension(name, dim_size)
def _set_netcdf_variables(root, fields, **kwds):
"""Set the field variables.
First set the variables that define the grid and then the variables at
the grid nodes and cells.
"""
names = kwds.pop('names', None)
_add_spatial_variables(root, fields, **kwds)
_add_variables_at_points(root, fields, names=names)
def _set_netcdf_cell_variables(root, fields, **kwds):
"""Set the cell field variables.
First set the variables that define the grid and then the variables at
the grid nodes and cells.
"""
names = kwds.pop('names', None)
_add_cell_spatial_variables(root, fields, **kwds)
_add_variables_at_cells(root, fields, names=names)
def _add_cell_spatial_variables(root, grid, **kwds):
"""Add the spatial variables that describe the cell grid."""
long_name = kwds.get('long_name', {})
cell_grid_shape = [dim - 1 for dim in grid.shape]
spatial_variable_shape = _get_dimension_names(cell_grid_shape)
bounds = _get_cell_bounds(cell_grid_shape,
spacing=(grid.dy, grid.dx),
origin=(grid.dy * .5, grid.dx * .5))
shape = spatial_variable_shape + ['nv']
for name, values in bounds.items():
# var = root.createVariable(name, 'f8', shape)
# var[:] = values
try:
var = root.variables[name]
except KeyError:
var = root.createVariable(name, 'f8', shape)
var[:] = values
axis = grid.axis_name.index(name[0])
var.units = grid.axis_units[axis]
try:
var.long_name = long_name[name]
except KeyError:
var.long_name = grid.axis_name[axis]
def _add_spatial_variables(root, grid, **kwds):
"""Add spatial variables to a NetCDF file.
Add the variables to *root* that define the structured grid, *grid*.
Parameters
----------
root : netcdf_file
A NetCDF file.
grid : RasterModelGrid
A structured grid.
long_name : dict, optional
Long name for each spatial variable to add. Keys are grid field
names, values are corresponding long names.
"""
long_name = kwds.get('long_name', {})
netcdf_vars = root.variables
spatial_variable_names = _get_axes_names(grid.shape)
spatial_variable_shape = _get_dimension_names(grid.shape)
for (axis, name) in enumerate(spatial_variable_names):
try:
var = netcdf_vars[name]
except KeyError:
var = root.createVariable(name, 'f8', spatial_variable_shape)
coords = grid.node_axis_coordinates(axis=axis).view()
coords.shape = var.shape
var[:] = coords
var.units = grid.axis_units[axis]
try:
var.long_name = long_name[name]
except KeyError:
var.long_name = grid.axis_name[axis]
def _add_variables_at_points(root, fields, names=None):
if isinstance(names, six.string_types):
names = [names]
names = names or fields['node'].keys()
netcdf_vars = root.variables
spatial_variable_shape = _get_dimension_names(fields.shape)
try:
n_times = len(netcdf_vars['t']) - 1
except KeyError:
n_times = 0
node_fields = fields['node']
for var_name in names:
try:
var = netcdf_vars[var_name]
except KeyError:
var = root.createVariable(
var_name, _NP_TO_NC_TYPE[str(node_fields[var_name].dtype)],
['nt'] + spatial_variable_shape)
if node_fields[var_name].size > 1:
data = node_fields[var_name].view()
data.shape = var.shape[1:]
try:
var[n_times, :] = data
except ValueError:
raise
else:
var[n_times] = node_fields[var_name].flat[0]
var.units = node_fields.units[var_name] or '?'
var.long_name = var_name
def _add_variables_at_cells(root, fields, names=None):
if isinstance(names, six.string_types):
names = [names]
names = names or fields['cell'].keys()
netcdf_vars = root.variables
cell_grid_shape = [dim - 1 for dim in fields.shape]
spatial_variable_shape = _get_dimension_names(cell_grid_shape)
try:
n_times = len(netcdf_vars['t']) - 1
except KeyError:
n_times = 0
cell_fields = fields['cell']
for var_name in names:
try:
var = netcdf_vars[var_name]
except KeyError:
var = root.createVariable(
var_name, _NP_TO_NC_TYPE[str(cell_fields[var_name].dtype)],
['nt'] + spatial_variable_shape)
if cell_fields[var_name].size > 1:
data = cell_fields[var_name].view()
data.shape = var.shape[1:]
try:
var[n_times, :] = data
except ValueError:
raise
else:
var[n_times] = cell_fields[var_name].flat[0]
var.units = cell_fields.units[var_name] or '?'
var.long_name = var_name
def _add_time_variable(root, time, **kwds):
"""Add a time value to a NetCDF file.
Append a new time value to the time variable of a NetCDF file. If there
is not time variable, create one. The time variable is named, ``t``.
Parameters
----------
root : netcdf_file
A NetCDF file.
time : float
The time.
units : str, optional
Time units.
reference : str, optional
Reference time.
"""
units = kwds.get('units', 'days')
reference = kwds.get('reference', '00:00:00 UTC')
netcdf_vars = root.variables
try:
time_var = netcdf_vars['t']
except KeyError:
time_var = root.createVariable('t', 'f8', ('nt', ))
time_var.units = ' '.join([units, 'since', reference])
time_var.long_name = 'time'
n_times = len(time_var)
if time is not None:
time_var[n_times] = time
else:
time_var[n_times] = n_times
_VALID_NETCDF_FORMATS = set([
'NETCDF3_CLASSIC',
'NETCDF3_64BIT',
'NETCDF4_CLASSIC',
'NETCDF4',
])
def _guess_at_location(fields, names):
"""Guess where the values should be located."""
node_fields = set(fields['node'].keys())
cell_fields = set(fields['cell'].keys())
if names is None or len(names) == 0:
if len(fields['node']) > 0:
at = 'node'
else:
at = 'cell'
else:
if node_fields.issuperset(names):
at = 'node'
elif cell_fields.issuperset(names):
at = 'cell'
else:
at = None
return at
def write_netcdf(path, fields, attrs=None, append=False,
format='NETCDF3_64BIT', names=None, at=None):
"""Write landlab fields to netcdf.
Write the data and grid information for *fields* to *path* as NetCDF.
If the *append* keyword argument in True, append the data to an existing
file, if it exists. Otherwise, clobber an existing files.
Parameters
----------
path : str
Path to output file.
fields : field-like
Landlab field object that holds a grid and associated values.
append : boolean, optional
Append data to an existing file, otherwise clobber the file.
format : {'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', 'NETCDF4'}
Format of output netcdf file.
attrs : dict
Attributes to add to netcdf file.
names : iterable of str, optional
Names of the fields to include in the netcdf file. If not provided,
write all fields.
at : {'node', 'cell'}, optional
The location where values are defined.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.io.netcdf import write_netcdf
Create a uniform rectilinear grid with four rows and 3 columns, and add
some data fields to it.
>>> rmg = RasterModelGrid(4, 3)
>>> _ = rmg.add_field('node', 'topographic__elevation', np.arange(12.))
>>> _ = rmg.add_field('node', 'uplift_rate', 2. * np.arange(12.))
Create a temporary directory to write the netcdf file into.
>>> import tempfile, os
>>> temp_dir = tempfile.mkdtemp()
>>> os.chdir(temp_dir)
Write the grid to a netcdf3 file but only include the *uplift_rate*
data in the file.
>>> write_netcdf('test.nc', rmg, format='NETCDF3_64BIT',
... names='uplift_rate')
Read the file back in and check its contents.
>>> from scipy.io import netcdf
>>> fp = netcdf.netcdf_file('test.nc', 'r')
>>> 'uplift_rate' in fp.variables
True
>>> 'topographic__elevation' in fp.variables
False
>>> fp.variables['uplift_rate'][:].flatten()
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20.,
22.])
>>> _ = rmg.add_field('cell', 'air__temperature', np.arange(2.))
>>> write_netcdf('test-cell.nc', rmg, format='NETCDF3_64BIT',
... names='air__temperature', at='cell')
"""
if format not in _VALID_NETCDF_FORMATS:
raise ValueError('format not understood')
if at not in (None, 'cell', 'node'):
raise ValueError('value location not understood')
if isinstance(names, six.string_types):
names = (names, )
at = at or _guess_at_location(fields, names) or 'node'
names = names or fields[at].keys()
if not set(fields[at].keys()).issuperset(names):
raise ValueError('values must be on either cells or nodes, not both')
attrs = attrs or {}
if os.path.isfile(path) and append:
mode = 'a'
else:
mode = 'w'
if format == 'NETCDF3_CLASSIC':
root = nc.netcdf_file(path, mode, version=1)
elif format == 'NETCDF3_64BIT':
root = nc.netcdf_file(path, mode, version=2)
else:
root = nc4.Dataset(path, mode, format=format)
_set_netcdf_attributes(root, attrs)
if at == 'node':
_set_netcdf_structured_dimensions(root, fields.shape)
_set_netcdf_variables(root, fields, names=names)
else:
_set_netcdf_cell_structured_dimensions(root, fields.shape)
_set_netcdf_cell_variables(root, fields, names=names)
root.close()
|
|
from __future__ import print_function #Fixes crossplatform print issues
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
from popupcad.widgets.dragndroptree import DraggableTreeWidget
import qt.QtCore as qc
import qt.QtGui as qg
import popupcad
from popupcad.filetypes.operationoutput import OperationOutput
from popupcad.filetypes.operation2 import Operation2, LayerBasedOperation
from popupcad.filetypes.laminate import Laminate
from popupcad.widgets.table_editor_popup import Table,SingleItemListElement_old, MultiItemListElement, FloatElement, Row,Delegate,TableControl
from popupcad.widgets.listmanager import SketchListManager
try:
import itertools.izip as zip
except ImportError:
pass
class JointRow(Row):
def __init__(self, get_sketches, get_layers):
elements = []
elements.append(SingleItemListElement_old('joint sketch', get_sketches))
elements.append(SingleItemListElement_old('joint layer', get_layers))
elements.append(MultiItemListElement('sublaminate layers', get_layers))
elements.append(FloatElement('hinge width'))
self.elements = elements
class JointDef(object):
def __init__(self,sketch,joint_layer,sublaminate_layers,width):
self.sketch = sketch
self.joint_layer = joint_layer
self.sublaminate_layers = sublaminate_layers
self.width = width
def copy(self):
new = type(self)(
self.sketch,
self.joint_layer,
self.sublaminate_layers,
self.width)
return new
class MainWidget(qg.QDialog):
def __init__(self, design, sketches, layers, operations, jointop=None,buffer = .01):
super(MainWidget, self).__init__()
self.design = design
self.sketches = sketches
self.layers = layers
self.operations = operations
self.operation_list = DraggableTreeWidget()
self.operation_list.linklist(self.operations)
self.table = Table(JointRow(self.get_sketches, self.get_layers),Delegate)
table_control= TableControl(self.table, self)
button_ok = qg.QPushButton('Ok')
button_cancel = qg.QPushButton('Cancel')
button_ok.clicked.connect(self.accept)
button_cancel.clicked.connect(self.reject)
self.buffer_val = qg.QLineEdit()
sublayout2 = qg.QHBoxLayout()
sublayout2.addStretch()
sublayout2.addWidget(button_ok)
sublayout2.addWidget(button_cancel)
sublayout2.addStretch()
layout = qg.QVBoxLayout()
layout.addWidget(qg.QLabel('Device'))
layout.addWidget(self.operation_list)
layout.addWidget(table_control)
layout.addWidget(qg.QLabel('Buffer'))
layout.addWidget(self.buffer_val)
layout.addLayout(sublayout2)
self.setLayout(layout)
if jointop is not None:
try:
op_ref, output_ii = jointop.operation_links['parent'][0]
op_ii = design.operation_index(op_ref)
self.operation_list.selectIndeces([(op_ii, output_ii)])
except(IndexError, KeyError):
pass
try:
fixed_ref, fixed_output_ii = jointop.operation_links[
'fixed'][0]
fixed_ii = design.operation_index(fixed_ref)
self.fixed.selectIndeces([(fixed_ii, fixed_output_ii)])
except(IndexError, KeyError):
pass
for item in jointop.joint_defs:
sketch = self.design.sketches[item.sketch]
joint_layer = self.design.return_layer_definition().getlayer(
item.joint_layer)
sublaminate_layers = [self.design.return_layer_definition().getlayer(
item2) for item2 in item.sublaminate_layers]
self.table.row_add(
sketch,
joint_layer,
sublaminate_layers,
item.width)
else:
self.table.row_add_empty()
self.buffer_val.setText(str(buffer))
self.table.resizeColumnsToContents()
self.table.reset_min_width()
self.table.setHorizontalScrollBarPolicy(qc.Qt.ScrollBarAlwaysOff)
def contact_sketch(self):
try:
return self.sketchwidget.itemlist.selectedItems()[0].value
except IndexError:
return None
def get_sketches(self):
return self.sketches
def get_layers(self):
return self.layers
def acceptdata(self):
jointdefs = []
for ii in range(self.table.rowCount()):
sketch = self.table.item(ii, 0).data(qc.Qt.UserRole)
joint_layer = self.table.item(
ii, 1).data(
qc.Qt.UserRole)
sublaminate_layers = self.table.item(
ii, 2).data(
qc.Qt.UserRole)
width = (self.table.item(ii, 3).data(qc.Qt.UserRole))
jointdefs.append(JointDef(sketch.id,joint_layer.id,[item.id for item in sublaminate_layers],width))
operation_links = {}
operation_links['parent'] = self.operation_list.currentRefs()
sketch_links = {}
return operation_links,sketch_links,jointdefs,float(self.buffer_val.text())
class HoleOperation(Operation2, LayerBasedOperation):
name = 'HoleOp'
resolution = 2
def copy(self):
new = type(self)(self.operation_links, self.sketch_links,[item.copy() for item in self.joint_defs],self.buffer_val)
new.id = self.id
new.customname = self.customname
return new
def __init__(self, *args):
super(HoleOperation, self).__init__()
self.editdata(*args)
self.id = id(self)
def editdata(self, operation_links,sketch_links,joint_defs,buffer_val):
super(HoleOperation,self).editdata(operation_links,sketch_links,{})
self.joint_defs = joint_defs
self.buffer_val = buffer_val
@classmethod
def buildnewdialog(cls, design, currentop):
dialog = MainWidget(
design,
design.sketches.values(),
design.return_layer_definition().layers,
design.operations)
return dialog
def buildeditdialog(self, design):
dialog = MainWidget(
design,
design.sketches.values(),
design.return_layer_definition().layers,
design.prioroperations(self),
self,self.buffer_val)
return dialog
def sketchrefs(self):
items = super(HoleOperation,self).sketchrefs()
items.extend([item.sketch for item in self.joint_defs])
return items
def gen_geoms(self, joint_def, layerdef, design,split_buffer):
print('Generating geometry')
hinge_gap = joint_def.width *popupcad.csg_processing_scaling
# split_buffer = .1 * hinge_gap
sublaminate_layers = [
layerdef.getlayer(item) for item in joint_def.sublaminate_layers]
hingelayer = layerdef.getlayer(joint_def.joint_layer)
operationgeom = design.sketches[joint_def.sketch].output_csg()
sketch_result = Laminate(design.return_layer_definition())
sketch_result.replacelayergeoms(hingelayer, operationgeom)
hingelines = sketch_result.to_generic_laminate().geoms[hingelayer]
hingelines = [item for item in hingelines if item.is_valid_bool()]
buffered_split = sketch_result.buffer(split_buffer,resolution=self.resolution)
allgeoms4 = []
for geom in hingelines:
geom = geom.to_shapely(scaling = popupcad.csg_processing_scaling)
laminate = Laminate(layerdef)
for layer in sublaminate_layers:
laminate.replacelayergeoms(layer, [geom])
allgeoms4.append(
laminate.buffer(hinge_gap,resolution=self.resolution))
return allgeoms4, buffered_split, hingelines
def operate(self, design):
safe_buffer1 = self.buffer_val * popupcad.csg_processing_scaling
safe_buffer2 = self.buffer_val * popupcad.csg_processing_scaling
safe_buffer3 = self.buffer_val * popupcad.csg_processing_scaling
parent_id, parent_output_index = self.operation_links['parent'][0]
parent_index = design.operation_index(parent_id)
parent = design.operations[parent_index].output[
parent_output_index].csg
layerdef = design.return_layer_definition()
allgeoms = []
allhingelines = []
buffered_splits = []
for joint_def in self.joint_defs:
allgeoms4, buffered_split, hingelines = self.gen_geoms(joint_def, layerdef, design,self.buffer_val)
allgeoms.extend(allgeoms4)
allhingelines.extend(hingelines)
buffered_splits.append(buffered_split)
safe_sections = []
for ii in range(len(allgeoms)):
unsafe = Laminate.unaryoperation(allgeoms[:ii] +allgeoms[ii +1:],
'union')
unsafe_buffer = unsafe.buffer(safe_buffer1,resolution=self.resolution)
safe_sections.append(allgeoms[ii].difference(unsafe_buffer))
safe = Laminate.unaryoperation(safe_sections, 'union')
safe_buffer = safe.buffer(safe_buffer2, resolution=self.resolution)
unsafe = Laminate.unaryoperation(allgeoms,'union').difference(safe_buffer)
unsafe2 = unsafe.buffer(safe_buffer3, resolution=self.resolution)
split1 = parent.difference(unsafe2)
return split1
# self.output = []
# self.output.append(OperationOutput(safe,'Safe',self))
# self.output.append(OperationOutput(unsafe,'Unsafe',self))
# self.output.append(OperationOutput(split1,'Split1',self))
def switch_layer_defs(self, layerdef_old, layerdef_new):
new = self.copy()
for joint_def in new.joint_defs:
joint_def.joint_layer = new.convert_layer_links(
[joint_def.joint_layer], layerdef_old, layerdef_new)[0]
joint_def.sublaminate_layers = new.convert_layer_links(
[joint_def.sublaminate_layers], layerdef_old, layerdef_new)[0]
return new
|
|
#!/usr/bin/python
#
# Author: Jashua R. Cloutier (contact via sourceforge username:senexcanis)
#
# Copyright (C) 2010, Jashua R. Cloutier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Jashua R. Cloutier nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The CppHeaderParser.py script is written in Python 2.4 and released to
# the open source community for continuous improvements under the BSD
# 2.0 new license, which can be found at:
#
# http://www.opensource.org/licenses/bsd-license.php
#
"""Parse C++ header files and generate a data structure
representing the class
"""
import ply.lex as lex
import os
import sys
import re
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
__version__ = "1.9"
version = "1.9"
tokens = [
'NUMBER',
'NAME',
'OPEN_PAREN',
'CLOSE_PAREN',
'OPEN_BRACE',
'CLOSE_BRACE',
'COLON',
'SEMI_COLON',
'COMMA',
'COMMENT_SINGLELINE',
'COMMENT_MULTILINE',
'PRECOMP_MACRO',
'PRECOMP_MACRO_CONT',
'ASTERISK',
'AMPERSTAND',
'EQUALS',
'MINUS',
'PLUS',
'DIVIDE',
'CHAR_LITERAL',
'STRING_LITERAL',
'OPERATOR_DIVIDE_OVERLOAD',
'NEW_LINE',
]
t_ignore = " \t\r[].|!?%@"
t_NUMBER = r'[0-9][0-9XxA-Fa-f]*'
t_NAME = r'[<>A-Za-z_~][A-Za-z0-9_]*'
t_OPERATOR_DIVIDE_OVERLOAD = r'/='
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_OPEN_BRACE = r'{'
t_CLOSE_BRACE = r'}'
t_SEMI_COLON = r';'
t_COLON = r':'
t_COMMA = r','
t_PRECOMP_MACRO = r'\#.*'
t_PRECOMP_MACRO_CONT = r'.*\\\n'
def t_COMMENT_SINGLELINE(t):
r'\/\/.*\n'
global doxygenCommentCache
if t.value.startswith("///") or t.value.startswith("//!"):
if doxygenCommentCache:
doxygenCommentCache += "\n"
if t.value.endswith("\n"):
doxygenCommentCache += t.value[:-1]
else:
doxygenCommentCache += t.value
t_ASTERISK = r'\*'
t_MINUS = r'\-'
t_PLUS = r'\+'
t_DIVIDE = r'/[^/]'
t_AMPERSTAND = r'&'
t_EQUALS = r'='
t_CHAR_LITERAL = "'.'"
#found at http://wordaligned.org/articles/string-literals-and-regular-expressions
#TODO: This does not work with the string "bla \" bla"
t_STRING_LITERAL = r'"([^"\\]|\\.)*"'
#Found at http://ostermiller.org/findcomment.html
def t_COMMENT_MULTILINE(t):
r'/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/'
global doxygenCommentCache
if t.value.startswith("/**") or t.value.startswith("/*!"):
#not sure why, but get double new lines
v = t.value.replace("\n\n", "\n")
#strip prefixing whitespace
v = re.sub("\n[\s]+\*", "\n*", v)
doxygenCommentCache += v
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(v):
print "Lex error: ", v
lex.lex()
debug = 0
supportedAccessSpecifier = [
'public',
'protected',
'private'
]
doxygenCommentCache = ""
def is_namespace(nameStack):
"""Determines if a namespace is being specified"""
if len(nameStack) == 0:
return False
if nameStack[0] == "namespace":
return True
return False
def is_enum_namestack(nameStack):
"""Determines if a namestack is an enum namestack"""
if len(nameStack) == 0:
return False
if nameStack[0] == "enum":
return True
if len(nameStack) > 1 and nameStack[0] == "typedef" and nameStack[1] == "enum":
return True
return False
class CppParseError(Exception): pass
class CppClass(dict):
"""Takes a name stack and turns it into a class
Contains the following Keys:
self['name'] - Name of the class
self['doxygen'] - Doxygen comments associated with the class if they exist
self['inherits'] - List of Classes that this one inherits where the values
are of the form {"access": Anything in supportedAccessSpecifier
"class": Name of the class
self['methods'] - Dictionary where keys are from supportedAccessSpecifier
and values are a lists of CppMethod's
self['properties'] - Dictionary where keys are from supportedAccessSpecifier
and values are lists of CppVariable's
self['enums'] - Dictionary where keys are from supportedAccessSpecifier and
values are lists of CppEnum's
An example of how this could look is as follows:
#self =
{
'name': ""
'inherits':[]
'methods':
{
'public':[],
'protected':[],
'private':[]
},
'properties':
{
'public':[],
'protected':[],
'private':[]
},
'enums':
{
'public':[],
'protected':[],
'private':[]
}
}
"""
def __init__(self, nameStack):
if (debug): print "Class: ", nameStack
if (len(nameStack) < 2):
print "Error detecting class"
return
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
self["name"] = nameStack[1]
inheritList = []
if ":" in nameStack:
nameStack = nameStack[nameStack.index(":") + 1:]
while len(nameStack):
tmpStack = []
tmpInheritClass = {"access":"private"}
if "," in nameStack:
tmpStack = nameStack[:nameStack.index(",")]
nameStack = nameStack[nameStack.index(",") + 1:]
else:
tmpStack = nameStack
nameStack = []
if len(tmpStack) == 0:
break;
elif len(tmpStack) == 1:
tmpInheritClass["class"] = tmpStack[0]
elif len(tmpStack) == 2:
tmpInheritClass["access"] = tmpStack[0]
tmpInheritClass["class"] = tmpStack[1]
else:
print "Warning: Cant figure out class inheriting %s\n"%(" ".join(tmpStack))
continue
inheritList.append(tmpInheritClass)
methodAccessSpecificList = {}
propertyAccessSpecificList = {}
enumAccessSpecificList = {}
for accessSpecifier in supportedAccessSpecifier:
methodAccessSpecificList[accessSpecifier] = []
propertyAccessSpecificList[accessSpecifier] = []
enumAccessSpecificList[accessSpecifier] = []
self['inherits'] = inheritList
self['methods'] = methodAccessSpecificList
self['properties'] = propertyAccessSpecificList
self['enums'] = enumAccessSpecificList
self['namespace'] = ""
def __repr__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "class %s\n"%(namespace_prefix + self["name"])
try:
print self["doxygen"],
except: pass
if "inherits" in self.keys():
rtn += "Inherits: "
for inheritClass in self["inherits"]:
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += "{\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += "%s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " // Enums\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " // Properties\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " // Method\n"
for method in self["methods"][accessSpecifier]:
rtn += " %s\n"%(repr(method))
rtn += "}\n"
return rtn
class CppMethod(dict):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['rtnType'] - Return type of the method (ex. "int")
self['name'] - Name of the method (ex. "getSize")
self['doxygen'] - Doxygen comments associated with the method if they exist
self['parameters'] - List of CppVariables
"""
def __init__(self, nameStack, curClass):
if (debug): print "Method: ", nameStack
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "operator" in nameStack:
self["rtnType"] = " ".join(nameStack[:nameStack.index('operator')])
self["name"] = "".join(nameStack[nameStack.index('operator'):nameStack.index('(')])
else:
self["rtnType"] = " ".join(nameStack[:nameStack.index('(') - 1])
self["name"] = " ".join(nameStack[nameStack.index('(') - 1:nameStack.index('(')])
if len(self["rtnType"]) == 0 or self["name"] == curClass:
self["rtnType"] = "void"
paramsStack = nameStack[nameStack.index('(') + 1: ]
#Remove things from the stack till we hit the last paren, this helps handle abstract and normal methods
while (paramsStack[-1] != ")"):
paramsStack.pop()
paramsStack.pop()
params = []
#See if there is a doxygen comment for the variable
doxyVarDesc = {}
#TODO: Put this into a class
if self.has_key("doxygen"):
doxyLines = self["doxygen"].split("\n")
lastParamDesc = ""
for doxyLine in doxyLines:
if " @param " in doxyLine or " \param " in doxyLine:
try:
#Strip out the param
doxyLine = doxyLine[doxyLine.find("param ") + 6:]
(var, desc) = doxyLine.split(" ", 1)
doxyVarDesc[var] = desc.strip()
lastParamDesc = var
except: pass
elif " @return " in doxyLine or " \return " in doxyLine:
lastParamDesc = ""
# not handled for now
elif lastParamDesc:
try:
doxyLine = doxyLine.strip()
if " " not in doxyLine:
lastParamDesc = ""
continue
doxyLine = doxyLine[doxyLine.find(" ") + 1:]
doxyVarDesc[lastParamDesc] += " " + doxyLine
except: pass
#Create the variable now
while (len(paramsStack)):
if (',' in paramsStack):
params.append(CppVariable(paramsStack[0:paramsStack.index(',')], doxyVarDesc=doxyVarDesc))
paramsStack = paramsStack[paramsStack.index(',') + 1:]
else:
param = CppVariable(paramsStack, doxyVarDesc=doxyVarDesc)
if len(param.keys()):
params.append(param)
break
self["parameters"] = params
class CppVariable(dict):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['type'] - Type for the variable (ex. "const string &")
self['name'] - Name of the variable (ex. "numItems")
self['namespace'] - Namespace containing the enum
self['desc'] - Description of the variable if part of a method (optional)
self['doxygen'] - Doxygen comments associated with the method if they exist
self['defaltValue'] - Default value of the variable, this key will only
exist if there is a default value
"""
def __init__(self, nameStack, **kwargs):
if (debug): print "Variable: ", nameStack
if (len(nameStack) < 2):
return
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if ("=" in nameStack):
self["type"] = " ".join(nameStack[:nameStack.index("=") - 1])
self["name"] = nameStack[nameStack.index("=") - 1]
self["defaltValue"] = " ".join(nameStack[nameStack.index("=") + 1:])
else:
self["type"] = " ".join(nameStack[:-1])
self["name"] = nameStack[-1]
self["type"] = self["type"].replace(" :",":")
self["type"] = self["type"].replace(": ",":")
self["type"] = self["type"].replace(" <","<")
self["type"] = self["type"].replace(" >",">")
#Optional doxygen description
try:
self["desc"] = kwargs["doxyVarDesc"][self["name"]]
except: pass
class CppEnum(dict):
"""Takes a name stack and turns it into an Enum
Contains the following Keys:
self['name'] - Name of the enum (ex. "ItemState")
self['namespace'] - Namespace containing the enum
self['values'] - List of values where the values are a dictionary of the
form {"name": name of the key (ex. "PARSING_HEADER"),
"value": Specified value of the enum, this key will only exist
if a value for a given enum value was defined
}
"""
def __init__(self, nameStack):
if len(nameStack) < 4 or "{" not in nameStack or "}" not in nameStack:
#Not enough stuff for an enum
return
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
valueList = []
#Figure out what values it has
valueStack = nameStack[nameStack.index('{') + 1: nameStack.index('}')]
while len(valueStack):
tmpStack = []
if "," in valueStack:
tmpStack = valueStack[:valueStack.index(",")]
valueStack = valueStack[valueStack.index(",") + 1:]
else:
tmpStack = valueStack
valueStack = []
if len(tmpStack) == 1:
valueList.append({"name": tmpStack[0]})
elif len(tmpStack) >= 3 and tmpStack[1] == "=":
valueList.append({"name": tmpStack[0], "value": " ".join(tmpStack[2:])})
elif len(tmpStack) == 2 and tmpStack[1] == "=":
if (debug): print "Missed value for %s"%tmpStack[0]
valueList.append({"name": tmpStack[0]})
if len(valueList):
self["values"] = valueList
else:
#An enum without any values is useless, dont bother existing
return
#Figure out if it has a name
preBraceStack = nameStack[:nameStack.index("{")]
postBraceStack = nameStack[nameStack.index("}") + 1:]
if (len(preBraceStack) == 2 and "typedef" not in nameStack):
self["name"] = preBraceStack[1]
elif len(postBraceStack) and "typedef" in nameStack:
self["name"] = " ".join(postBraceStack)
#See if there are instances of this
if "typedef" not in nameStack and len(postBraceStack):
self["instances"] = []
for var in postBraceStack:
if "," in var:
continue
self["instances"].append(var)
self["namespace"] = ""
class CppHeader:
"""Parsed C++ class header
Variables produced:
self.classes - Dictionary of classes found in a given header file where the
key is the name of the class
"""
def __init__(self, headerFileName, argType = "file"):
if (argType == "file"):
self.headerFileName = os.path.expandvars(headerFileName)
self.mainClass = os.path.split(self.headerFileName)[1][:-2]
headerFileStr = ""
# if headerFileName[-2:] != ".h":
# raise Exception("file must be a header file and end with .h")
elif argType == "string":
self.headerFileName = ""
self.mainClass = "???"
headerFileStr = headerFileName
else:
raise Exception("Arg type must be either file or string")
self.curClass = ""
self.classes = {}
self.enums = []
self.nameStack = []
self.nameSpaces = []
self.curAccessSpecifier = 'private'
if (len(self.headerFileName)):
headerFileStr = "\n".join(open(self.headerFileName).readlines())
self.braceDepth = 0
lex.input(headerFileStr)
curLine = 0
curChar = 0
try:
while True:
tok = lex.token()
# Example: LexToken(COLON,';',1,373)
# where (tok.name, tok.value, ?, ?)
if not tok:
break
curLine = tok.lineno
curChar = tok.lexpos
if (tok.type == 'OPEN_BRACE'):
if len(self.nameStack) and is_namespace(self.nameStack):
self.nameSpaces.append(self.nameStack[1])
if len(self.nameStack) and not is_enum_namestack(self.nameStack):
self.evaluate_stack()
else:
self.nameStack.append(tok.value)
self.braceDepth += 1
elif (tok.type == 'CLOSE_BRACE'):
if self.braceDepth == 0:
continue
if (self.braceDepth == len(self.nameSpaces)):
tmp = self.nameSpaces.pop()
if len(self.nameStack) and is_enum_namestack(self.nameStack):
self.nameStack.append(tok.value)
elif self.braceDepth < 10:
self.evaluate_stack()
else:
self.nameStack = []
self.braceDepth -= 1
if (self.braceDepth == 0):
self.curClass = ""
if (tok.type == 'OPEN_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'EQUALS'):
self.nameStack.append(tok.value)
elif (tok.type == 'COMMA'):
self.nameStack.append(tok.value)
elif (tok.type == 'NUMBER'):
self.nameStack.append(tok.value)
elif (tok.type == 'MINUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'PLUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'STRING_LITERAL'):
self.nameStack.append(tok.value)
elif (tok.type == 'NAME' or tok.type == 'AMPERSTAND' or tok.type == 'ASTERISK'):
if (tok.value == 'class'):
self.nameStack.append(tok.value)
elif (tok.value in supportedAccessSpecifier and self.braceDepth == len(self.nameSpaces) + 1):
self.curAccessSpecifier = tok.value
else:
self.nameStack.append(tok.value)
elif (tok.type == 'COLON'):
#Dont want colon to be first in stack
if len(self.nameStack) == 0:
continue
self.nameStack.append(tok.value)
elif (tok.type == 'SEMI_COLON'):
if (self.braceDepth < 10):
self.evaluate_stack()
except:
raise CppParseError("Not able to parse %s on line %d evaluating \"%s\"\nError around: %s"
% (self.headerFileName, tok.lineno, tok.value, " ".join(self.nameStack)))
def evaluate_stack(self):
"""Evaluates the current name stack"""
global doxygenCommentCache
if (debug): print "Evaluating stack %s at..."%self.nameStack
if (len(self.curClass)):
if (debug): print "%s (%s) "%(self.curClass, self.curAccessSpecifier),
if (len(self.nameStack) == 0):
if (debug): print "line ",lineno()
if (debug): print "(Empty Stack)"
return
elif (self.nameStack[0] == "namespace"):
#Taken care of outside of here
pass
elif (self.nameStack[0] == "class"):
if (debug): print "line ",lineno()
self.evaluate_class_stack()
elif (self.nameStack[0] == "struct"):
if (debug): print "line ",lineno()
self.curAccessSpecifier = "public"
self.evaluate_class_stack()
elif (len(self.curClass) == 0):
if (debug): print "line ",lineno()
if is_enum_namestack(self.nameStack):
self.evaluate_enum_stack()
self.nameStack = []
doxygenCommentCache = ""
return
elif (self.braceDepth < 1):
if (debug): print "line ",lineno()
#Ignore global stuff for now
if (debug): print "Global stuff: ", self.nameStack
self.nameStack = []
doxygenCommentCache = ""
return
elif (self.braceDepth > len(self.nameSpaces) + 1):
if (debug): print "line ",lineno()
self.nameStack = []
doxygenCommentCache = ""
return
elif is_enum_namestack(self.nameStack):
if (debug): print "line ",lineno()
#elif self.nameStack[0] == "enum":
self.evaluate_enum_stack()
elif ('(' in self.nameStack):
if (debug): print "line ",lineno()
self.evaluate_method_stack()
else:
if (debug): print "line ",lineno()
self.evaluate_property_stack()
self.nameStack = []
doxygenCommentCache = ""
def evaluate_class_stack(self):
"""Create a Class out of the name stack (but not its parts)"""
#dont support sub classes today
if self.braceDepth != len(self.nameSpaces):
return
newClass = CppClass(self.nameStack)
if len(newClass.keys()):
self.curClass = newClass["name"]
self.classes[self.curClass] = newClass
else:
self.curClass = ""
newClass["namespace"] = self.cur_namespace()
def evaluate_method_stack(self):
"""Create a method out of the name stack"""
newMethod = CppMethod(self.nameStack, self.curClass)
if len(newMethod.keys()):
self.classes[self.curClass]["methods"][self.curAccessSpecifier].append(newMethod)
def evaluate_property_stack(self):
"""Create a Property out of the name stack"""
newVar = CppVariable(self.nameStack)
if len(newVar.keys()):
self.classes[self.curClass]["properties"][self.curAccessSpecifier].append(newVar)
def evaluate_enum_stack(self):
"""Create an Enum out of the name stack"""
newEnum = CppEnum(self.nameStack)
if len(newEnum.keys()):
if len(self.curClass):
newEnum["namespace"] = self.cur_namespace()
self.classes[self.curClass]["enums"][self.curAccessSpecifier].append(newEnum)
else:
newEnum["namespace"] = self.cur_namespace()
# print "Adding global enum"
self.enums.append(newEnum)
#This enum has instances, turn them into properties
if newEnum.has_key("instances"):
instanceType = "enum"
if newEnum.has_key("name"):
instanceType = newEnum["name"]
for instance in newEnum["instances"]:
self.nameStack = [instanceType, instance]
self.evaluate_property_stack()
del newEnum["instances"]
def cur_namespace(self, add_double_colon = False):
rtn = ""
i = 0
while i < len(self.nameSpaces):
rtn += self.nameSpaces[i]
if add_double_colon or i < len(self.nameSpaces) - 1:
rtn += "::"
i+=1
return rtn
def __repr__(self):
rtn = ""
for className in self.classes.keys():
rtn += repr(self.classes[className])
return rtn
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from mock import MagicMock
from mock import patch
from oslo_utils import netutils
from testtools.matchers import Is, Equals, Not
from trove.common.instance import ServiceStatuses
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.couchdb import (
manager as couchdb_manager)
from trove.guestagent.datastore.experimental.couchdb import (
service as couchdb_service)
from trove.guestagent import pkg as pkg
from trove.guestagent import volume
from trove.tests.unittests.guestagent.test_datastore_manager import \
DatastoreManagerTest
class GuestAgentCouchDBManagerTest(DatastoreManagerTest):
def setUp(self):
super(GuestAgentCouchDBManagerTest, self).setUp('couchdb')
self.real_status = couchdb_service.CouchDBAppStatus.set_status
class FakeInstanceServiceStatus(object):
status = ServiceStatuses.NEW
def save(self):
pass
couchdb_service.CouchDBAppStatus.set_status = MagicMock(
return_value=FakeInstanceServiceStatus())
self.manager = couchdb_manager.Manager()
self.pkg = couchdb_service.packager
self.real_db_app_status = couchdb_service.CouchDBAppStatus
self.origin_os_path_exists = os.path.exists
self.origin_format = volume.VolumeDevice.format
self.origin_migrate_data = volume.VolumeDevice.migrate_data
self.origin_mount = volume.VolumeDevice.mount
self.origin_mount_points = volume.VolumeDevice.mount_points
self.origin_stop_db = couchdb_service.CouchDBApp.stop_db
self.origin_start_db = couchdb_service.CouchDBApp.start_db
self.original_get_ip = netutils.get_my_ipv4
self.orig_make_host_reachable = (
couchdb_service.CouchDBApp.make_host_reachable)
self.orig_backup_restore = backup.restore
self.orig_create_users = couchdb_service.CouchDBAdmin.create_user
self.orig_delete_user = couchdb_service.CouchDBAdmin.delete_user
self.orig_list_users = couchdb_service.CouchDBAdmin.list_users
self.orig_get_user = couchdb_service.CouchDBAdmin.get_user
self.orig_grant_access = couchdb_service.CouchDBAdmin.grant_access
self.orig_revoke_access = couchdb_service.CouchDBAdmin.revoke_access
self.orig_list_access = couchdb_service.CouchDBAdmin.list_access
self.orig_enable_root = couchdb_service.CouchDBAdmin.enable_root
self.orig_is_root_enabled = (
couchdb_service.CouchDBAdmin.is_root_enabled)
self.orig_create_databases = (
couchdb_service.CouchDBAdmin.create_database)
self.orig_list_databases = couchdb_service.CouchDBAdmin.list_databases
self.orig_delete_database = (
couchdb_service.CouchDBAdmin.delete_database)
def tearDown(self):
super(GuestAgentCouchDBManagerTest, self).tearDown()
couchdb_service.packager = self.pkg
couchdb_service.CouchDBAppStatus.set_status = self.real_db_app_status
os.path.exists = self.origin_os_path_exists
volume.VolumeDevice.format = self.origin_format
volume.VolumeDevice.migrate_data = self.origin_migrate_data
volume.VolumeDevice.mount = self.origin_mount
volume.VolumeDevice.mount_points = self.origin_mount_points
couchdb_service.CouchDBApp.stop_db = self.origin_stop_db
couchdb_service.CouchDBApp.start_db = self.origin_start_db
netutils.get_my_ipv4 = self.original_get_ip
couchdb_service.CouchDBApp.make_host_reachable = (
self.orig_make_host_reachable)
backup.restore = self.orig_backup_restore
couchdb_service.CouchDBAdmin.create_user = self.orig_create_users
couchdb_service.CouchDBAdmin.delete_user = self.orig_delete_user
couchdb_service.CouchDBAdmin.list_users = self.orig_list_users
couchdb_service.CouchDBAdmin.get_user = self.orig_get_user
couchdb_service.CouchDBAdmin.grant_access = self.orig_grant_access
couchdb_service.CouchDBAdmin.revoke_access = self.orig_revoke_access
couchdb_service.CouchDBAdmin.list_access = self.orig_list_access
couchdb_service.CouchDBAdmin.enable_root = self.orig_enable_root
couchdb_service.CouchDBAdmin.is_root_enabled = (
self.orig_is_root_enabled)
couchdb_service.CouchDBAdmin.create_database = (
self.orig_create_databases)
couchdb_service.CouchDBAdmin.list_databases = self.orig_list_databases
couchdb_service.CouchDBAdmin.delete_database = (
self.orig_delete_database)
def test_update_status(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
self.manager.update_status(self.context)
mock_status.update.assert_any_call()
def _prepare_dynamic(self, packages=None, databases=None,
config_content=None, device_path='/dev/vdb',
is_db_installed=True, backup_id=None,
overrides=None):
mock_status = MagicMock()
mock_app = MagicMock()
self.manager.appStatus = mock_status
self.manager.app = mock_app
mount_point = '/var/lib/couchdb'
mock_status.begin_install = MagicMock(return_value=None)
mock_app.install_if_needed = MagicMock(return_value=None)
mock_app.make_host_reachable = MagicMock(return_value=None)
mock_app.restart = MagicMock(return_value=None)
mock_app.start_db = MagicMock(return_value=None)
mock_app.stop_db = MagicMock(return_value=None)
os.path.exists = MagicMock(return_value=True)
volume.VolumeDevice.format = MagicMock(return_value=None)
volume.VolumeDevice.migrate_data = MagicMock(return_value=None)
volume.VolumeDevice.mount = MagicMock(return_value=None)
volume.VolumeDevice.mount_points = MagicMock(return_value=[])
backup.restore = MagicMock(return_value=None)
backup_info = {'id': backup_id,
'location': 'fake-location',
'type': 'CouchDBBackup',
'checksum': 'fake-checksum'} if backup_id else None
couchdb_service.CouchDBAdmin.create_database = MagicMock(
return_value=None)
couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None)
with patch.object(pkg.Package, 'pkg_is_installed',
return_value=MagicMock(
return_value=is_db_installed)):
self.manager.prepare(context=self.context, packages=packages,
config_contents=config_content,
databases=databases,
memory_mb='2048', users=None,
device_path=device_path,
mount_point=mount_point,
backup_info=backup_info,
overrides=None,
cluster_config=None)
# verification/assertion
mock_status.begin_install.assert_any_call()
mock_app.install_if_needed.assert_any_call(packages)
mock_app.make_host_reachable.assert_any_call()
mock_app.change_permissions.assert_any_call()
if backup_id:
backup.restore.assert_any_call(self.context,
backup_info,
mount_point)
def test_prepare_pkg(self):
self._prepare_dynamic(['couchdb'])
def test_prepare_no_pkg(self):
self._prepare_dynamic([])
def test_prepare_from_backup(self):
self._prepare_dynamic(['couchdb'], backup_id='123abc456')
def test_prepare_database(self):
self._prepare_dynamic(databases=['db1'])
def test_restart(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
with patch.object(couchdb_service.CouchDBApp, 'restart',
return_value=None):
# invocation
self.manager.restart(self.context)
# verification/assertion
couchdb_service.CouchDBApp.restart.assert_any_call()
def test_stop_db(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBApp.stop_db = MagicMock(return_value=None)
# invocation
self.manager.stop_db(self.context)
# verification/assertion
couchdb_service.CouchDBApp.stop_db.assert_any_call(
do_not_start_on_reboot=False)
def test_reset_configuration(self):
try:
configuration = {'config_contents': 'some junk'}
self.manager.reset_configuration(self.context, configuration)
except Exception:
self.fail("reset_configuration raised exception unexpectedly.")
def test_rpc_ping(self):
output = self.manager.rpc_ping(self.context)
self.assertTrue(output)
def test_create_user(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None)
self.manager.create_user(self.context, ['user1'])
couchdb_service.CouchDBAdmin.create_user.assert_any_call(['user1'])
def test_delete_user(self):
user = ['user1']
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.delete_user = MagicMock(return_value=None)
self.manager.delete_user(self.context, user)
couchdb_service.CouchDBAdmin.delete_user.assert_any_call(user)
def test_list_users(self):
couchdb_service.CouchDBAdmin.list_users = MagicMock(
return_value=['user1'])
users = self.manager.list_users(self.context)
self.assertThat(users, Equals(['user1']))
couchdb_service.CouchDBAdmin.list_users.assert_any_call(
None, None, False)
def test_get_user(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.get_user = MagicMock(
return_value=['user1'])
self.manager.get_user(self.context, 'user1', None)
couchdb_service.CouchDBAdmin.get_user.assert_any_call(
'user1', None)
def test_grant_access(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.grant_access = MagicMock(
return_value=None)
self.manager.grant_access(self.context, 'user1', None, ['db1'])
couchdb_service.CouchDBAdmin.grant_access.assert_any_call(
'user1', ['db1'])
def test_revoke_access(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.revoke_access = MagicMock(
return_value=None)
self.manager.revoke_access(self.context, 'user1', None, ['db1'])
couchdb_service.CouchDBAdmin.revoke_access.assert_any_call(
'user1', ['db1'])
def test_list_access(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.list_access = MagicMock(
return_value=['user1'])
self.manager.list_access(self.context, 'user1', None)
couchdb_service.CouchDBAdmin.list_access.assert_any_call(
'user1', None)
def test_enable_root(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.enable_root = MagicMock(
return_value=True)
result = self.manager.enable_root(self.context)
self.assertThat(result, Equals(True))
def test_is_root_enabled(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.is_root_enabled = MagicMock(
return_value=True)
result = self.manager.is_root_enabled(self.context)
self.assertThat(result, Equals(True))
def test_create_databases(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.create_database = MagicMock(
return_value=None)
self.manager.create_database(self.context, ['db1'])
couchdb_service.CouchDBAdmin.create_database.assert_any_call(['db1'])
def test_delete_database(self):
databases = ['db1']
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.delete_database = MagicMock(
return_value=None)
self.manager.delete_database(self.context, databases)
couchdb_service.CouchDBAdmin.delete_database.assert_any_call(
databases)
def test_list_databases(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.list_databases = MagicMock(
return_value=['database1'])
databases = self.manager.list_databases(self.context)
self.assertThat(databases, Not(Is(None)))
self.assertThat(databases, Equals(['database1']))
couchdb_service.CouchDBAdmin.list_databases.assert_any_call(
None, None, False)
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs training and eval on CQL-SAC on D4RL using the Actor-Learner API.
All default hyperparameters in train_eval come from the CQL paper:
https://arxiv.org/abs/2006.04779
"""
import os
from absl import app
from absl import flags
from absl import logging
import gin
import numpy as np
import tensorflow as tf
from tf_agents.agents.cql import cql_sac_agent
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.environments import tf_py_environment
from tf_agents.examples.cql_sac.kumar20.d4rl_utils import load_d4rl
from tf_agents.examples.cql_sac.kumar20.data_utils import create_tf_record_dataset
from tf_agents.metrics import py_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import greedy_policy
from tf_agents.policies import py_tf_eager_policy
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import triggers
from tf_agents.train.utils import strategy_utils
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import trajectory
FLAGS = flags.FLAGS
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('env_name', 'antmaze-medium-play-v0',
'Name of the environment.')
flags.DEFINE_string('dataset_path', None, 'TFRecord dataset path.')
flags.DEFINE_integer('learner_iterations_per_call', 500,
'Iterations per learner run call.')
flags.DEFINE_integer('policy_save_interval', 10000, 'Policy save interval.')
flags.DEFINE_integer('eval_interval', 10000, 'Evaluation interval.')
flags.DEFINE_integer('summary_interval', 1000, 'Summary interval.')
flags.DEFINE_integer('num_gradient_updates', 1000000,
'Total number of train iterations to perform.')
flags.DEFINE_bool(
'use_trajectories', False,
'Whether dataset samples are stored as trajectories. '
'If False, stored as transitions')
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_param', None, 'Gin binding parameters.')
@gin.configurable
def train_eval(
root_dir,
dataset_path,
env_name,
# Training params
tpu=False,
use_gpu=False,
num_gradient_updates=1000000,
actor_fc_layers=(256, 256),
critic_joint_fc_layers=(256, 256, 256),
# Agent params
batch_size=256,
bc_steps=0,
actor_learning_rate=3e-5,
critic_learning_rate=3e-4,
alpha_learning_rate=3e-4,
reward_scale_factor=1.0,
cql_alpha_learning_rate=3e-4,
cql_alpha=5.0,
cql_tau=10.0,
num_cql_samples=10,
reward_noise_variance=0.0,
include_critic_entropy_term=False,
use_lagrange_cql_alpha=True,
log_cql_alpha_clipping=None,
softmax_temperature=1.0,
# Data params
reward_shift=0.0,
action_clipping=None,
use_trajectories=False,
data_shuffle_buffer_size_per_record=1,
data_shuffle_buffer_size=100,
data_num_shards=1,
data_block_length=10,
data_parallel_reads=None,
data_parallel_calls=10,
data_prefetch=10,
data_cycle_length=10,
# Others
policy_save_interval=10000,
eval_interval=10000,
summary_interval=1000,
learner_iterations_per_call=1,
eval_episodes=10,
debug_summaries=False,
summarize_grads_and_vars=False,
seed=None):
"""Trains and evaluates CQL-SAC."""
logging.info('Training CQL-SAC on: %s', env_name)
tf.random.set_seed(seed)
np.random.seed(seed)
# Load environment.
env = load_d4rl(env_name)
tf_env = tf_py_environment.TFPyEnvironment(env)
strategy = strategy_utils.get_strategy(tpu, use_gpu)
if not dataset_path.endswith('.tfrecord'):
dataset_path = os.path.join(dataset_path, env_name,
'%s*.tfrecord' % env_name)
logging.info('Loading dataset from %s', dataset_path)
dataset_paths = tf.io.gfile.glob(dataset_path)
# Create dataset.
with strategy.scope():
dataset = create_tf_record_dataset(
dataset_paths,
batch_size,
shuffle_buffer_size_per_record=data_shuffle_buffer_size_per_record,
shuffle_buffer_size=data_shuffle_buffer_size,
num_shards=data_num_shards,
cycle_length=data_cycle_length,
block_length=data_block_length,
num_parallel_reads=data_parallel_reads,
num_parallel_calls=data_parallel_calls,
num_prefetch=data_prefetch,
strategy=strategy,
reward_shift=reward_shift,
action_clipping=action_clipping,
use_trajectories=use_trajectories)
# Create agent.
time_step_spec = tf_env.time_step_spec()
observation_spec = time_step_spec.observation
action_spec = tf_env.action_spec()
with strategy.scope():
train_step = train_utils.create_train_step()
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
fc_layer_params=actor_fc_layers,
continuous_projection_net=tanh_normal_projection_network
.TanhNormalProjectionNetwork)
critic_net = critic_network.CriticNetwork(
(observation_spec, action_spec),
joint_fc_layer_params=critic_joint_fc_layers,
kernel_initializer='glorot_uniform',
last_kernel_initializer='glorot_uniform')
agent = cql_sac_agent.CqlSacAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
actor_optimizer=tf.keras.optimizers.Adam(
learning_rate=actor_learning_rate),
critic_optimizer=tf.keras.optimizers.Adam(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.keras.optimizers.Adam(
learning_rate=alpha_learning_rate),
cql_alpha=cql_alpha,
num_cql_samples=num_cql_samples,
include_critic_entropy_term=include_critic_entropy_term,
use_lagrange_cql_alpha=use_lagrange_cql_alpha,
cql_alpha_learning_rate=cql_alpha_learning_rate,
target_update_tau=5e-3,
target_update_period=1,
random_seed=seed,
cql_tau=cql_tau,
reward_noise_variance=reward_noise_variance,
num_bc_steps=bc_steps,
td_errors_loss_fn=tf.math.squared_difference,
gamma=0.99,
reward_scale_factor=reward_scale_factor,
gradient_clipping=None,
log_cql_alpha_clipping=log_cql_alpha_clipping,
softmax_temperature=softmax_temperature,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step)
agent.initialize()
# Create learner.
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
collect_env_step_metric = py_metrics.EnvironmentSteps()
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
agent,
train_step,
interval=policy_save_interval,
metadata_metrics={
triggers.ENV_STEP_METADATA_KEY: collect_env_step_metric
}),
triggers.StepPerSecondLogTrigger(train_step, interval=100)
]
cql_learner = learner.Learner(
root_dir,
train_step,
agent,
experience_dataset_fn=lambda: dataset,
triggers=learning_triggers,
summary_interval=summary_interval,
strategy=strategy)
# Create actor for evaluation.
tf_greedy_policy = greedy_policy.GreedyPolicy(agent.policy)
eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_greedy_policy, use_tf_function=True)
eval_actor = actor.Actor(
env,
eval_greedy_policy,
train_step,
metrics=actor.eval_metrics(eval_episodes),
summary_dir=os.path.join(root_dir, 'eval'),
episodes_per_run=eval_episodes)
# Run.
dummy_trajectory = trajectory.mid((), (), (), 0., 1.)
num_learner_iterations = int(num_gradient_updates /
learner_iterations_per_call)
for _ in range(num_learner_iterations):
# Mimic collecting environment steps since we loaded a static dataset.
for _ in range(learner_iterations_per_call):
collect_env_step_metric(dummy_trajectory)
cql_learner.run(iterations=learner_iterations_per_call)
if eval_interval and train_step.numpy() % eval_interval == 0:
eval_actor.run_and_log()
def main(_):
logging.set_verbosity(logging.INFO)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
train_eval(
root_dir=FLAGS.root_dir,
dataset_path=FLAGS.dataset_path,
env_name=FLAGS.env_name,
tpu=FLAGS.tpu,
use_gpu=FLAGS.use_gpu,
num_gradient_updates=FLAGS.num_gradient_updates,
policy_save_interval=FLAGS.policy_save_interval,
eval_interval=FLAGS.eval_interval,
summary_interval=FLAGS.summary_interval,
learner_iterations_per_call=FLAGS.learner_iterations_per_call,
use_trajectories=FLAGS.use_trajectories)
if __name__ == '__main__':
flags.mark_flag_as_required('root_dir')
flags.mark_flag_as_required('dataset_path')
app.run(main)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.load_balancer.v2 import amphora as _amphora
from openstack.load_balancer.v2 import availability_zone as _availability_zone
from openstack.load_balancer.v2 import availability_zone_profile as \
_availability_zone_profile
from openstack.load_balancer.v2 import flavor as _flavor
from openstack.load_balancer.v2 import flavor_profile as _flavor_profile
from openstack.load_balancer.v2 import health_monitor as _hm
from openstack.load_balancer.v2 import l7_policy as _l7policy
from openstack.load_balancer.v2 import l7_rule as _l7rule
from openstack.load_balancer.v2 import listener as _listener
from openstack.load_balancer.v2 import load_balancer as _lb
from openstack.load_balancer.v2 import member as _member
from openstack.load_balancer.v2 import pool as _pool
from openstack.load_balancer.v2 import provider as _provider
from openstack.load_balancer.v2 import quota as _quota
from openstack import proxy
from openstack import resource
class Proxy(proxy.Proxy):
def create_load_balancer(self, **attrs):
"""Create a new load balancer from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.
load_balancer.LoadBalancer`,
comprised of the properties on the
LoadBalancer class.
:returns: The results of load balancer creation
:rtype: :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer`
"""
return self._create(_lb.LoadBalancer, **attrs)
def get_load_balancer(self, *attrs):
"""Get a load balancer
:param load_balancer: The value can be the name of a load balancer
or :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer`
instance.
:returns: One
:class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer`
"""
return self._get(_lb.LoadBalancer, *attrs)
def get_load_balancer_statistics(self, name_or_id):
"""Get the load balancer statistics
:param name_or_id: The name or ID of a load balancer
:returns: One :class:`~openstack.load_balancer.v2.load_balancer.
LoadBalancerStats`
"""
return self._get(_lb.LoadBalancerStats, lb_id=name_or_id,
requires_id=False)
def load_balancers(self, **query):
"""Retrieve a generator of load balancers
:returns: A generator of load balancer instances
"""
return self._list(_lb.LoadBalancer, **query)
def delete_load_balancer(self, load_balancer, ignore_missing=True,
cascade=False):
"""Delete a load balancer
:param load_balancer: The load_balancer can be either the name or a
:class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer`
instance
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the load balancer does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent load balancer.
:param bool cascade: If true will delete all child objects of
the load balancer.
:returns: ``None``
"""
load_balancer = self._get_resource(_lb.LoadBalancer, load_balancer)
load_balancer.cascade = cascade
return self._delete(_lb.LoadBalancer, load_balancer,
ignore_missing=ignore_missing)
def find_load_balancer(self, name_or_id, ignore_missing=True):
"""Find a single load balancer
:param name_or_id: The name or ID of a load balancer
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the load balancer does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent load balancer.
:returns: ``None``
"""
return self._find(_lb.LoadBalancer, name_or_id,
ignore_missing=ignore_missing)
def update_load_balancer(self, load_balancer, **attrs):
"""Update a load balancer
:param load_balancer: The load_balancer can be either the name or a
:class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer`
instance
:param dict attrs: The attributes to update on the load balancer
represented by ``load_balancer``.
:returns: The updated load_balancer
:rtype: :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer`
"""
return self._update(_lb.LoadBalancer, load_balancer, **attrs)
def wait_for_load_balancer(self, name_or_id, status='ACTIVE',
failures=['ERROR'], interval=2, wait=300):
lb = self._find(_lb.LoadBalancer, name_or_id, ignore_missing=False)
return resource.wait_for_status(self, lb, status, failures, interval,
wait, attribute='provisioning_status')
def failover_load_balancer(self, name_or_id, **attrs):
"""Failover a load balancer
:param name_or_id: The name or ID of a load balancer
:returns: ``None``
"""
return self._update(_lb.LoadBalancerFailover, lb_id=name_or_id)
def create_listener(self, **attrs):
"""Create a new listener from attributes
:param dict attrs: Keyword arguments which will be used to create a
:class:`~openstack.load_balancer.v2.listener.Listener`,
comprised of the properties on the Listener class.
:returns: The results of listener creation
:rtype: :class:`~openstack.load_balancer.v2.listener.Listener`
"""
return self._create(_listener.Listener, **attrs)
def delete_listener(self, listener, ignore_missing=True):
"""Delete a listener
:param listener: The value can be either the ID of a listner or a
:class:`~openstack.load_balancer.v2.listener.Listener` instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the listner does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent listener.
:returns: ``None``
"""
self._delete(_listener.Listener, listener,
ignore_missing=ignore_missing)
def find_listener(self, name_or_id, ignore_missing=True):
"""Find a single listener
:param name_or_id: The name or ID of a listener.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:returns: One :class:`~openstack.load_balancer.v2.listener.Listener`
or None
"""
return self._find(_listener.Listener, name_or_id,
ignore_missing=ignore_missing)
def get_listener(self, listener):
"""Get a single listener
:param listener: The value can be the ID of a listener or a
:class:`~openstack.load_balancer.v2.listener.Listener`
instance.
:returns: One :class:`~openstack.load_balancer.v2.listener.Listener`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_listener.Listener, listener)
def get_listener_statistics(self, listener):
"""Get the listener statistics
:param listener: The value can be the ID of a listener or a
:class:`~openstack.load_balancer.v2.listener.Listener`
instance.
:returns: One :class:`~openstack.load_balancer.v2.listener.
ListenerStats`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_listener.ListenerStats, listener_id=listener,
requires_id=False)
def listeners(self, **query):
"""Return a generator of listeners
:param dict query: Optional query parameters to be sent to limit
the resources being returned. Valid parameters are:
:returns: A generator of listener objects
:rtype: :class:`~openstack.load_balancer.v2.listener.Listener`
"""
return self._list(_listener.Listener, **query)
def update_listener(self, listener, **attrs):
"""Update a listener
:param listener: Either the id of a listener or a
:class:`~openstack.load_balancer.v2.listener.Listener`
instance.
:param dict attrs: The attributes to update on the listener
represented by ``listener``.
:returns: The updated listener
:rtype: :class:`~openstack.load_balancer.v2.listener.Listener`
"""
return self._update(_listener.Listener, listener, **attrs)
def create_pool(self, **attrs):
"""Create a new pool from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.
pool.Pool`,
comprised of the properties on the
Pool class.
:returns: The results of Pool creation
:rtype: :class:`~openstack.load_balancer.v2.pool.Pool`
"""
return self._create(_pool.Pool, **attrs)
def get_pool(self, *attrs):
"""Get a pool
:param pool: Value is
:class:`~openstack.load_balancer.v2.pool.Pool`
instance.
:returns: One
:class:`~openstack.load_balancer.v2.pool.Pool`
"""
return self._get(_pool.Pool, *attrs)
def pools(self, **query):
"""Retrieve a generator of pools
:returns: A generator of Pool instances
"""
return self._list(_pool.Pool, **query)
def delete_pool(self, pool, ignore_missing=True):
"""Delete a pool
:param pool: The pool is a
:class:`~openstack.load_balancer.v2.pool.Pool`
instance
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the pool does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent pool.
:returns: ``None``
"""
return self._delete(_pool.Pool, pool,
ignore_missing=ignore_missing)
def find_pool(self, name_or_id, ignore_missing=True):
"""Find a single pool
:param name_or_id: The name or ID of a pool
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the pool does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent pool.
:returns: ``None``
"""
return self._find(_pool.Pool, name_or_id,
ignore_missing=ignore_missing)
def update_pool(self, pool, **attrs):
"""Update a pool
:param pool: Either the id of a pool or a
:class:`~openstack.load_balancer.v2.pool.Pool`
instance.
:param dict attrs: The attributes to update on the pool
represented by ``pool``.
:returns: The updated pool
:rtype: :class:`~openstack.load_balancer.v2.pool.Pool`
"""
return self._update(_pool.Pool, pool, **attrs)
def create_member(self, pool, **attrs):
"""Create a new member from attributes
:param pool: The pool can be either the ID of a pool or a
:class:`~openstack.load_balancer.v2.pool.Pool` instance
that the member will be created in.
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.member.Member`,
comprised of the properties on the Member class.
:returns: The results of member creation
:rtype: :class:`~openstack.load_balancer.v2.member.Member`
"""
poolobj = self._get_resource(_pool.Pool, pool)
return self._create(_member.Member, pool_id=poolobj.id,
**attrs)
def delete_member(self, member, pool, ignore_missing=True):
"""Delete a member
:param member:
The member can be either the ID of a member or a
:class:`~openstack.load_balancer.v2.member.Member` instance.
:param pool: The pool can be either the ID of a pool or a
:class:`~openstack.load_balancer.v2.pool.Pool` instance
that the member belongs to.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the member does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent member.
:returns: ``None``
"""
poolobj = self._get_resource(_pool.Pool, pool)
self._delete(_member.Member, member,
ignore_missing=ignore_missing, pool_id=poolobj.id)
def find_member(self, name_or_id, pool, ignore_missing=True):
"""Find a single member
:param str name_or_id: The name or ID of a member.
:param pool: The pool can be either the ID of a pool or a
:class:`~openstack.load_balancer.v2.pool.Pool` instance
that the member belongs to.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:returns: One :class:`~openstack.load_balancer.v2.member.Member`
or None
"""
poolobj = self._get_resource(_pool.Pool, pool)
return self._find(_member.Member, name_or_id,
ignore_missing=ignore_missing, pool_id=poolobj.id)
def get_member(self, member, pool):
"""Get a single member
:param member: The member can be the ID of a member or a
:class:`~openstack.load_balancer.v2.member.Member`
instance.
:param pool: The pool can be either the ID of a pool or a
:class:`~openstack.load_balancer.v2.pool.Pool` instance
that the member belongs to.
:returns: One :class:`~openstack.load_balancer.v2.member.Member`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
poolobj = self._get_resource(_pool.Pool, pool)
return self._get(_member.Member, member,
pool_id=poolobj.id)
def members(self, pool, **query):
"""Return a generator of members
:param pool: The pool can be either the ID of a pool or a
:class:`~openstack.load_balancer.v2.pool.Pool` instance
that the member belongs to.
:param dict query: Optional query parameters to be sent to limit
the resources being returned. Valid parameters are:
:returns: A generator of member objects
:rtype: :class:`~openstack.load_balancer.v2.member.Member`
"""
poolobj = self._get_resource(_pool.Pool, pool)
return self._list(_member.Member, pool_id=poolobj.id, **query)
def update_member(self, member, pool, **attrs):
"""Update a member
:param member: Either the ID of a member or a
:class:`~openstack.load_balancer.v2.member.Member`
instance.
:param pool: The pool can be either the ID of a pool or a
:class:`~openstack.load_balancer.v2.pool.Pool` instance
that the member belongs to.
:param dict attrs: The attributes to update on the member
represented by ``member``.
:returns: The updated member
:rtype: :class:`~openstack.load_balancer.v2.member.Member`
"""
poolobj = self._get_resource(_pool.Pool, pool)
return self._update(_member.Member, member,
pool_id=poolobj.id, **attrs)
def find_health_monitor(self, name_or_id, ignore_missing=True):
"""Find a single health monitor
:param name_or_id: The name or ID of a health monitor
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the health monitor does not exist.
When set to ``True``, no exception will be set when attempting
to find a nonexistent health monitor.
:returns: The
:class:`openstack.load_balancer.v2.healthmonitor.HealthMonitor`
object matching the given name or id or None if nothing matches.
:raises: :class:`openstack.exceptions.DuplicateResource` if more
than one resource is found for this request.
:raises: :class:`openstack.exceptions.ResourceNotFound` if nothing
is found and ignore_missing is ``False``.
"""
return self._find(_hm.HealthMonitor, name_or_id,
ignore_missing=ignore_missing)
def create_health_monitor(self, **attrs):
"""Create a new health monitor from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.
healthmonitor.HealthMonitor`,
comprised of the properties on the
HealthMonitor class.
:returns: The results of HealthMonitor creation
:rtype: :class:`~openstack.load_balancer.v2.
healthmonitor.HealthMonitor`
"""
return self._create(_hm.HealthMonitor, **attrs)
def get_health_monitor(self, healthmonitor):
"""Get a health monitor
:param healthmonitor: The value can be the ID of a health monitor or
:class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor`
instance.
:returns: One health monitor
:rtype: :class:`~openstack.load_balancer.v2.
healthmonitor.HealthMonitor`
"""
return self._get(_hm.HealthMonitor, healthmonitor)
def health_monitors(self, **query):
"""Retrieve a generator of health monitors
:param dict query: Optional query parameters to be sent to limit
the resources being returned. Valid parameters are:
'name', 'created_at', 'updated_at', 'delay',
'expected_codes', 'http_method', 'max_retries',
'max_retries_down', 'pool_id',
'provisioning_status', 'operating_status',
'timeout', 'project_id', 'type', 'url_path',
'is_admin_state_up'.
:returns: A generator of health monitor instances
"""
return self._list(_hm.HealthMonitor, **query)
def delete_health_monitor(self, healthmonitor, ignore_missing=True):
"""Delete a health monitor
:param healthmonitor: The healthmonitor can be either the ID of the
health monitor or a
:class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor`
instance
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the healthmonitor does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent healthmonitor.
:returns: ``None``
"""
return self._delete(_hm.HealthMonitor, healthmonitor,
ignore_missing=ignore_missing)
def update_health_monitor(self, healthmonitor, **attrs):
"""Update a health monitor
:param healthmonitor: The healthmonitor can be either the ID of the
health monitor or a
:class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor`
instance
:param dict attrs: The attributes to update on the health monitor
represented by ``healthmonitor``.
:returns: The updated health monitor
:rtype: :class:`~openstack.load_balancer.v2.
healthmonitor.HealthMonitor`
"""
return self._update(_hm.HealthMonitor, healthmonitor,
**attrs)
def create_l7_policy(self, **attrs):
"""Create a new l7policy from attributes
:param dict attrs: Keyword arguments which will be used to create a
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`,
comprised of the properties on the L7Policy class.
:returns: The results of l7policy creation
:rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
"""
return self._create(_l7policy.L7Policy, **attrs)
def delete_l7_policy(self, l7_policy, ignore_missing=True):
"""Delete a l7policy
:param l7_policy: The value can be either the ID of a l7policy or a
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the l7policy does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent l7policy.
:returns: ``None``
"""
self._delete(_l7policy.L7Policy, l7_policy,
ignore_missing=ignore_missing)
def find_l7_policy(self, name_or_id, ignore_missing=True):
"""Find a single l7policy
:param name_or_id: The name or ID of a l7policy.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:returns: One :class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
or None
"""
return self._find(_l7policy.L7Policy, name_or_id,
ignore_missing=ignore_missing)
def get_l7_policy(self, l7_policy):
"""Get a single l7policy
:param l7_policy: The value can be the ID of a l7policy or a
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
instance.
:returns: One :class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_l7policy.L7Policy, l7_policy)
def l7_policies(self, **query):
"""Return a generator of l7policies
:param dict query: Optional query parameters to be sent to limit
the resources being returned. Valid parameters are:
:returns: A generator of l7policy objects
:rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
"""
return self._list(_l7policy.L7Policy, **query)
def update_l7_policy(self, l7_policy, **attrs):
"""Update a l7policy
:param l7_policy: Either the id of a l7policy or a
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
instance.
:param dict attrs: The attributes to update on the l7policy
represented by ``l7policy``.
:returns: The updated l7policy
:rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
"""
return self._update(_l7policy.L7Policy, l7_policy, **attrs)
def create_l7_rule(self, l7_policy, **attrs):
"""Create a new l7rule from attributes
:param l7_policy: The l7_policy can be either the ID of a l7policy or
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
instance that the l7rule will be created in.
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.l7_rule.L7Rule`,
comprised of the properties on the L7Rule class.
:returns: The results of l7rule creation
:rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule`
"""
l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy)
return self._create(_l7rule.L7Rule, l7policy_id=l7policyobj.id,
**attrs)
def delete_l7_rule(self, l7rule, l7_policy, ignore_missing=True):
"""Delete a l7rule
:param l7rule:
The l7rule can be either the ID of a l7rule or a
:class:`~openstack.load_balancer.v2.l7_rule.L7Rule` instance.
:param l7_policy: The l7_policy can be either the ID of a l7policy or
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
instance that the l7rule belongs to.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the l7rule does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent l7rule.
:returns: ``None``
"""
l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy)
self._delete(_l7rule.L7Rule, l7rule, ignore_missing=ignore_missing,
l7policy_id=l7policyobj.id)
def find_l7_rule(self, name_or_id, l7_policy, ignore_missing=True):
"""Find a single l7rule
:param str name_or_id: The name or ID of a l7rule.
:param l7_policy: The l7_policy can be either the ID of a l7policy or
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
instance that the l7rule belongs to.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:returns: One :class:`~openstack.load_balancer.v2.l7_rule.L7Rule`
or None
"""
l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy)
return self._find(_l7rule.L7Rule, name_or_id,
ignore_missing=ignore_missing,
l7policy_id=l7policyobj.id)
def get_l7_rule(self, l7rule, l7_policy):
"""Get a single l7rule
:param l7rule: The l7rule can be the ID of a l7rule or a
:class:`~openstack.load_balancer.v2.l7_rule.L7Rule`
instance.
:param l7_policy: The l7_policy can be either the ID of a l7policy or
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
instance that the l7rule belongs to.
:returns: One :class:`~openstack.load_balancer.v2.l7_rule.L7Rule`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy)
return self._get(_l7rule.L7Rule, l7rule,
l7policy_id=l7policyobj.id)
def l7_rules(self, l7_policy, **query):
"""Return a generator of l7rules
:param l7_policy: The l7_policy can be either the ID of a l7_policy or
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
instance that the l7rule belongs to.
:param dict query: Optional query parameters to be sent to limit
the resources being returned. Valid parameters are:
:returns: A generator of l7rule objects
:rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule`
"""
l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy)
return self._list(_l7rule.L7Rule, l7policy_id=l7policyobj.id, **query)
def update_l7_rule(self, l7rule, l7_policy, **attrs):
"""Update a l7rule
:param l7rule: Either the ID of a l7rule or a
:class:`~openstack.load_balancer.v2.l7_rule.L7Rule`
instance.
:param l7_policy: The l7_policy can be either the ID of a l7policy or
:class:`~openstack.load_balancer.v2.l7_policy.L7Policy`
instance that the l7rule belongs to.
:param dict attrs: The attributes to update on the l7rule
represented by ``l7rule``.
:returns: The updated l7rule
:rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule`
"""
l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy)
return self._update(_l7rule.L7Rule, l7rule,
l7policy_id=l7policyobj.id, **attrs)
def quotas(self, **query):
"""Return a generator of quotas
:param dict query: Optional query parameters to be sent to limit
the resources being returned. Currently no query
parameter is supported.
:returns: A generator of quota objects
:rtype: :class:`~openstack.load_balancer.v2.quota.Quota`
"""
return self._list(_quota.Quota, **query)
def get_quota(self, quota):
"""Get a quota
:param quota: The value can be the ID of a quota or a
:class:`~openstack.load_balancer.v2.quota.Quota`
instance. The ID of a quota is the same as the project
ID for the quota.
:returns: One :class:`~openstack.load_balancer.v2.quota.Quota`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_quota.Quota, quota)
def update_quota(self, quota, **attrs):
"""Update a quota
:param quota: Either the ID of a quota or a
:class:`~openstack.load_balancer.v2.quota.Quota`
instance. The ID of a quota is the same as the
project ID for the quota.
:param dict attrs: The attributes to update on the quota represented
by ``quota``.
:returns: The updated quota
:rtype: :class:`~openstack.load_balancer.v2.quota.Quota`
"""
return self._update(_quota.Quota, quota, **attrs)
def get_quota_default(self):
"""Get a default quota
:returns: One :class:`~openstack.load_balancer.v2.quota.QuotaDefault`
"""
return self._get(_quota.QuotaDefault, requires_id=False)
def delete_quota(self, quota, ignore_missing=True):
"""Delete a quota (i.e. reset to the default quota)
:param quota: The value can be either the ID of a quota or a
:class:`~openstack.load_balancer.v2.quota.Quota`
instance. The ID of a quota is the same as the
project ID for the quota.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when quota does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent quota.
:returns: ``None``
"""
self._delete(_quota.Quota, quota, ignore_missing=ignore_missing)
def providers(self, **query):
"""Retrieve a generator of providers
:returns: A generator of providers instances
"""
return self._list(_provider.Provider, **query)
def provider_flavor_capabilities(self, provider, **query):
"""Retrieve a generator of provider flavor capabilities
:returns: A generator of provider flavor capabilities instances
"""
return self._list(_provider.ProviderFlavorCapabilities,
provider=provider, **query)
def create_flavor_profile(self, **attrs):
"""Create a new flavor profile from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.
flavor_profile.FlavorProfile`,
comprised of the properties on the
FlavorProfile class.
:returns: The results of profile creation creation
:rtype: :class:`~openstack.load_balancer.v2.flavor_profile.
FlavorProfile`
"""
return self._create(_flavor_profile.FlavorProfile, **attrs)
def get_flavor_profile(self, *attrs):
"""Get a flavor profile
:param flavor_profile: The value can be the name of a flavor profile
or :class:`~openstack.load_balancer.v2.flavor_profile.
FlavorProfile` instance.
:returns: One
:class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile`
"""
return self._get(_flavor_profile.FlavorProfile, *attrs)
def flavor_profiles(self, **query):
"""Retrieve a generator of flavor profiles
:returns: A generator of flavor profiles instances
"""
return self._list(_flavor_profile.FlavorProfile, **query)
def delete_flavor_profile(self, flavor_profile, ignore_missing=True):
"""Delete a flavor profile
:param flavor_profile: The flavor_profile can be either the name or a
:class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile`
instance
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the flavor profile does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent flavor profile.
:returns: ``None``
"""
self._delete(_flavor_profile.FlavorProfile, flavor_profile,
ignore_missing=ignore_missing)
def find_flavor_profile(self, name_or_id, ignore_missing=True):
"""Find a single flavor profile
:param name_or_id: The name or ID of a flavor profile
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the flavor profile does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent flavor profile.
:returns: ``None``
"""
return self._find(_flavor_profile.FlavorProfile, name_or_id,
ignore_missing=ignore_missing)
def update_flavor_profile(self, flavor_profile, **attrs):
"""Update a flavor profile
:param flavor_profile: The flavor_profile can be either the name or a
:class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile`
instance
:param dict attrs: The attributes to update on the flavor profile
represented by ``flavor_profile``.
:returns: The updated flavor profile
:rtype: :class:`~openstack.load_balancer.v2.flavor_profile.
FlavorProfile`
"""
return self._update(_flavor_profile.FlavorProfile, flavor_profile,
**attrs)
def create_flavor(self, **attrs):
"""Create a new flavor from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.
flavor.Flavor`, comprised of the properties on the
Flavorclass.
:returns: The results of flavor creation creation
:rtype: :class:`~openstack.load_balancer.v2.flavor.Flavor`
"""
return self._create(_flavor.Flavor, **attrs)
def get_flavor(self, *attrs):
"""Get a flavor
:param flavor: The value can be the name of a flavor
or :class:`~openstack.load_balancer.v2.flavor.Flavor` instance.
:returns: One
:class:`~openstack.load_balancer.v2.flavor.Flavor`
"""
return self._get(_flavor.Flavor, *attrs)
def flavors(self, **query):
"""Retrieve a generator of flavors
:returns: A generator of flavor instances
"""
return self._list(_flavor.Flavor, **query)
def delete_flavor(self, flavor, ignore_missing=True):
"""Delete a flavor
:param flavor: The flavorcan be either the name or a
:class:`~openstack.load_balancer.v2.flavor.Flavor` instance
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the flavor does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent flavor.
:returns: ``None``
"""
self._delete(_flavor.Flavor, flavor, ignore_missing=ignore_missing)
def find_flavor(self, name_or_id, ignore_missing=True):
"""Find a single flavor
:param name_or_id: The name or ID of a flavor
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the flavor does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent flavor.
:returns: ``None``
"""
return self._find(_flavor.Flavor, name_or_id,
ignore_missing=ignore_missing)
def update_flavor(self, flavor, **attrs):
"""Update a flavor
:param flavor: The flavor can be either the name or a
:class:`~openstack.load_balancer.v2.flavor.Flavor` instance
:param dict attrs: The attributes to update on the flavor
represented by ``flavor``.
:returns: The updated flavor
:rtype: :class:`~openstack.load_balancer.v2.flavor.Flavor`
"""
return self._update(_flavor.Flavor, flavor, **attrs)
def amphorae(self, **query):
"""Retrieve a generator of amphorae
:returns: A generator of amphora instances
"""
return self._list(_amphora.Amphora, **query)
def get_amphora(self, *attrs):
"""Get a amphora
:param amphora: The value can be the ID of an amphora
or :class:`~openstack.load_balancer.v2.amphora.Amphora` instance.
:returns: One
:class:`~openstack.load_balancer.v2.amphora.Amphora`
"""
return self._get(_amphora.Amphora, *attrs)
def find_amphora(self, amphora_id, ignore_missing=True):
"""Find a single amphora
:param amphora_id: The ID of a amphora
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the amphora does not exist.
When set to ``True``, no exception will be set when attempting
to find a nonexistent amphora.
:returns: ``None``
"""
return self._find(_amphora.Amphora, amphora_id,
ignore_missing=ignore_missing)
def configure_amphora(self, amphora_id, **attrs):
"""Update the configuration of an amphora agent
:param amphora_id: The ID of an amphora
:returns: ``None``
"""
return self._update(_amphora.AmphoraConfig, amphora_id=amphora_id)
def failover_amphora(self, amphora_id, **attrs):
"""Failover an amphora
:param amphora_id: The ID of an amphora
:returns: ``None``
"""
return self._update(_amphora.AmphoraFailover, amphora_id=amphora_id)
def create_availability_zone_profile(self, **attrs):
"""Create a new availability zone profile from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.
availability_zone_profile.AvailabilityZoneProfile`,
comprised of the properties on the
AvailabilityZoneProfile class.
:returns: The results of profile creation creation
:rtype: :class:`~openstack.load_balancer.v2.availability_zone_profile.
AvailabilityZoneProfile`
"""
return self._create(_availability_zone_profile.AvailabilityZoneProfile,
**attrs)
def get_availability_zone_profile(self, *attrs):
"""Get an availability zone profile
:param availability_zone_profile: The value can be the name of an
availability_zone profile
or :class:`~openstack.load_balancer.v2.availability_zone_profile.
AvailabilityZoneProfile` instance.
:returns: One
:class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile`
"""
return self._get(_availability_zone_profile.AvailabilityZoneProfile,
*attrs)
def availability_zone_profiles(self, **query):
"""Retrieve a generator of availability zone profiles
:returns: A generator of availability zone profiles instances
"""
return self._list(_availability_zone_profile.AvailabilityZoneProfile,
**query)
def delete_availability_zone_profile(self, availability_zone_profile,
ignore_missing=True):
"""Delete an availability zone profile
:param availability_zone_profile: The availability_zone_profile can be
either the name or a
:class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile`
instance
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the availability zone profile does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent availability zone profile.
:returns: ``None``
"""
self._delete(_availability_zone_profile.AvailabilityZoneProfile,
availability_zone_profile, ignore_missing=ignore_missing)
def find_availability_zone_profile(self, name_or_id, ignore_missing=True):
"""Find a single availability zone profile
:param name_or_id: The name or ID of a availability zone profile
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the availability zone profile does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent availability zone profile.
:returns: ``None``
"""
return self._find(_availability_zone_profile.AvailabilityZoneProfile,
name_or_id, ignore_missing=ignore_missing)
def update_availability_zone_profile(self, availability_zone_profile,
**attrs):
"""Update an availability zone profile
:param availability_zone_profile: The availability_zone_profile can be
either the name or a
:class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile`
instance
:param dict attrs: The attributes to update on the availability_zone
profile represented by
``availability_zone_profile``.
:returns: The updated availability zone profile
:rtype: :class:`~openstack.load_balancer.v2.availability_zone_profile.
AvailabilityZoneProfile`
"""
return self._update(_availability_zone_profile.AvailabilityZoneProfile,
availability_zone_profile, **attrs)
def create_availability_zone(self, **attrs):
"""Create a new availability zone from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.load_balancer.v2.
availability_zone.AvailabilityZone`, comprised of
the properties on the AvailabilityZoneclass.
:returns: The results of availability_zone creation creation
:rtype:
:class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone`
"""
return self._create(_availability_zone.AvailabilityZone, **attrs)
def get_availability_zone(self, *attrs):
"""Get an availability zone
:param availability_zone: The value can be the name of a
availability_zone or
:class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone`
instance.
:returns: One
:class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone`
"""
return self._get(_availability_zone.AvailabilityZone, *attrs)
def availability_zones(self, **query):
"""Retrieve a generator of availability zones
:returns: A generator of availability zone instances
"""
return self._list(_availability_zone.AvailabilityZone, **query)
def delete_availability_zone(self, availability_zone, ignore_missing=True):
"""Delete an availability_zone
:param availability_zone: The availability_zone can be either the name
or a
:class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone`
instance
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the availability zone does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent availability zone.
:returns: ``None``
"""
self._delete(_availability_zone.AvailabilityZone, availability_zone,
ignore_missing=ignore_missing)
def find_availability_zone(self, name_or_id, ignore_missing=True):
"""Find a single availability zone
:param name_or_id: The name or ID of a availability zone
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the availability zone does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent availability zone.
:returns: ``None``
"""
return self._find(_availability_zone.AvailabilityZone, name_or_id,
ignore_missing=ignore_missing)
def update_availability_zone(self, availability_zone, **attrs):
"""Update an availability zone
:param availability_zone: The availability_zone can be either the name
or a
:class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone`
instance
:param dict attrs: The attributes to update on the availability_zone
represented by ``availability_zone``.
:returns: The updated availability_zone
:rtype:
:class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone`
"""
return self._update(_availability_zone.AvailabilityZone,
availability_zone, **attrs)
|
|
from __future__ import unicode_literals
import logging
from django.utils import six
from django.utils.functional import cached_property
from django.utils.html import escape, strip_tags
from django.utils.safestring import mark_safe
from django.utils.six.moves.html_parser import HTMLParser
from django.utils.translation import ugettext_lazy as _
from djblets.markdown import iter_markdown_lines
from djblets.registries.errors import ItemLookupError
from djblets.registries.registry import ALREADY_REGISTERED, NOT_REGISTERED
from reviewboard.diffviewer.diffutils import get_line_changed_regions
from reviewboard.diffviewer.myersdiff import MyersDiffer
from reviewboard.diffviewer.templatetags.difftags import highlightregion
from reviewboard.registries.registry import Registry, OrderedRegistry
from reviewboard.reviews.markdown_utils import (is_rich_text_default_for_user,
normalize_text_for_edit,
render_markdown)
class FieldSetRegistry(OrderedRegistry):
"""A registry for field sets.
This keeps the fieldsets in the registered order, so iterating through them
will do so in the same order.
"""
lookup_attrs = ('fieldset_id',)
errors = {
ALREADY_REGISTERED: _(
'"%(item)s" is already a registered review request fieldset.'
),
NOT_REGISTERED: _(
'%(attr_value)s is not a registered review request fieldset.'
),
}
def __init__(self):
self._key_order = []
super(FieldSetRegistry, self).__init__()
def register(self, fieldset):
"""Register the fieldset.
This will also register all field classes registered on the fieldset on
the field registry.
Args:
fieldset (type):
The fieldset to register, as a
:py:class:`BaseReviewRequestFieldSet` subclass.
"""
super(FieldSetRegistry, self).register(fieldset)
# Set the field_classes to an empty list by default if it doesn't
# explicitly provide its own, so that entries don't go into
# BaseReviewRequestFieldSet's global list.
if fieldset.field_classes is None:
fieldset.field_classes = []
for field_cls in fieldset.field_classes:
field_registry.register(field_cls)
def unregister(self, fieldset):
"""Unregister the fieldset.
This will unregister all field classes on the fieldset from the field
registry.
Args:
fieldset (type):
The field to remove, as a
:py:class:`BaseReviewRequestFieldSet` subclass.
"""
super(FieldSetRegistry, self).unregister(fieldset)
for field_cls in fieldset.field_classes:
fieldset.remove_field(field_cls)
def get_defaults(self):
"""Return the list of built-in fieldsets.
Returns:
list:
A list of the built-in
:py:class:`~reviewboard.reviews.fields.BaseReviewRequestFieldSet`
subclasses.
"""
from reviewboard.reviews.builtin_fields import builtin_fieldsets
return builtin_fieldsets
class FieldRegistry(Registry):
"""A registry for review request fields."""
lookup_attrs = ['field_id']
errors = {
ALREADY_REGISTERED: _(
'"%(item)s" is already a registered review request field. Field '
'IDs must be unique across all fieldsets.'
),
NOT_REGISTERED: _(
'"%(attr_value)s is not a registered review request fieldset.'
),
}
def populate(self):
# Fields are only ever registered via the FieldSetRegistry, so we
# ensure that it has been populated as well.
fieldset_registry.populate()
fieldset_registry = FieldSetRegistry()
field_registry = FieldRegistry()
class BaseReviewRequestFieldSet(object):
"""Base class for sets of review request fields.
A fieldset stores a list of fields that are rendered on the review
request page. They may contain default fields, and new fields can be
added or removed.
Review Board provides three main fieldsets: "main", "info", and
"reviewers". Others can be added by subclassing and registering
through ``register_review_request_fieldset``.
"""
fieldset_id = None
label = None
show_required = False
field_classes = None
tag_name = None
def __init__(self, review_request_details):
self.review_request_details = review_request_details
@classmethod
def is_empty(cls):
"""Returns whether the fieldset is empty.
A fieldset is empty if there are no field classes registered.
An empty fieldset will not be displayed on the page.
"""
return not cls.field_classes
@classmethod
def add_field(cls, field_cls):
"""Adds a field class to this fieldset.
The field will be rendered inside this fieldset on the page.
A given field class can only be in one fieldset. Its ``field_id``
must be unique.
"""
field_registry.register(field_cls)
cls.field_classes.append(field_cls)
@classmethod
def remove_field(cls, field_cls):
"""Removes a field class from this fieldset.
The field class must have been previously added to this fieldset.
"""
cls.field_classes.remove(field_cls)
try:
field_registry.unregister(field_cls)
except ItemLookupError as e:
logging.error('Failed to unregister unknown review request '
'field "%s"',
field_cls.field_id)
raise e
def __str__(self):
"""Represent the field set as a byte string.
Returns:
bytes:
The field set's ID as a byte string.
"""
if isinstance(self.fieldset_id, six.binary_type):
return self.fieldset_id
return self.fieldset_id.encode('utf-8')
def __unicode__(self):
"""Represent the field set as a unicode string.
Returns:
unicode:
The field set's ID as a unicode string.
"""
if isinstance(self.fieldset_id, six.binary_type):
return self.fieldset_id.decode('utf-8')
return self.fieldset_id
class BaseReviewRequestField(object):
"""Base class for a field on a review request.
A field is responsible for displaying data from a review request,
handling any editing requirements if necessary, recording changes
in the ChangeDescription, and rendering those changes.
Each field must have its own unique ``field_id``. This ID will be used
when looking up or storing the field's value.
It is recommended that fields provided by extensions prefix their
field ID with some sort of identifier for the extension or the vendor.
Creating a new field requires subclassing BaseReviewRequestField and
overriding any fields or functions necessary. Its class must then be
added to a fieldset.
A field will be instantiated with either a ReviewRequest or a
ReviewRequestDraft, depending on what is being edited. This is stored
in ``review_request_details``. Functions should optimistically fetch
values from that, if possible. They can call ``get_review_request()``
on ``review_request_details`` to fetch the actual ReviewRequest.
If the function takes a ``review_request_details`` parameter, it must
use that instead.
"""
field_id = None
label = None
is_editable = False
is_required = False
default_css_classes = set()
change_entry_renders_inline = True
model = None
can_record_change_entry = property(lambda self: self.is_editable)
def __init__(self, review_request_details, request=None):
self.review_request_details = review_request_details
self.request = request
@property
def value(self):
"""Returns the value loaded from the database.
This will fetch the value with the associated ReviewRequest or
ReviewRequestDraft, and then cache it for future lookups.
"""
if not hasattr(self, '_value'):
self._value = self.load_value(self.review_request_details)
return self._value
def has_value_changed(self, old_value, new_value):
"""Returns whether the value has changed.
By default, it performs an inequality check on the values. This
can be overridden to perform more specialized checks.
"""
return old_value != new_value
def record_change_entry(self, changedesc, old_value, new_value):
"""Records information on the changed values in a ChangeDescription.
By default, the values are stored as-is along with the field ID.
This can be overridden to perform more specialized storage.
"""
changedesc.record_field_change(self.field_id, old_value, new_value)
def serialize_change_entry(self, changedesc):
"""Serialize a change entry for public consumption.
This will output a version of the change entry for use in the API.
It can be the same content stored in the
:py:class:`~reviewboard.changedescs.models.ChangeDescription`, but
does not need to be.
Args:
changedesc (reviewboard.changedescs.models.ChangeDescription):
The change description whose field is to be serialized.
Returns:
dict:
An appropriate serialization for the field.
"""
field_info = changedesc.fields_changed[self.field_id]
if self.model:
return self.serialize_change_entry_for_model_list(field_info)
else:
return self.serialize_change_entry_for_singleton(field_info)
def serialize_change_entry_for_model_list(self, field_info):
"""Return the change entry for a list of models.
Args:
field_info (dict):
A dictionary describing how the field has changed. This is
guaranteed to have ``new`` and ``old`` keys, but may also
contain ``added`` and ``removed`` keys as well.
Returns:
dict:
A mapping of each key present in ``field_info`` to its list of
model instances.
"""
pks = [
value[2]
for key in ('new', 'old', 'added', 'removed')
if key in field_info
for value in field_info[key]
]
pk_to_objects = dict([
(obj.pk, obj)
for obj in self.model.objects.filter(pk__in=pks)
])
return dict([
(key, [
pk_to_objects[value[2]]
for value in field_info[key]
])
for key in ('new', 'old', 'added', 'removed')
if key in field_info
])
def serialize_change_entry_for_singleton(self, field_info):
"""Return the change entry for a singleton.
Singleton fields (e.g., summaries) are stored in
:py:class:`~reviewboard.changedescs.models.ChangeDescription`\s as
a list with a single element.
Args:
field_info (dict):
A dictionary describing how the field has changed. This is
guaranteed to have ``new`` and ``old`` keys, but may also
contain ``added`` and ``removed`` keys as well.
Returns:
dict:
A mapping of each key in ``field_info`` to a single value.
"""
return dict([
(key, field_info[key][0])
for key in ('new', 'old', 'added', 'removed')
if key in field_info
])
def serialize_change_entry_for_list(self, field_info):
"""Return the change entry for a list of plain data.
Args:
field_info (dict):
A dictionary describing how the field has changed. This is
guaranteed to have ``new`` and ``old`` keys, but may also
contain ``added`` and ``removed`` keys as well.
Returns:
dict:
A mapping of each key in ``field_info`` to a list of values.
"""
return dict([
(key, [
value[0]
for value in field_info[key]
])
for key in ('new', 'old', 'added', 'removed')
if key in field_info
])
def get_change_entry_sections_html(self, info):
"""Returns sections of change entries with titles and rendered HTML.
By default, this just returns a single section for the field, with
the field's title and rendered change HTML.
Subclasses can override this to provide more information.
"""
return [{
'title': self.label,
'rendered_html': mark_safe(self.render_change_entry_html(info)),
}]
def render_change_entry_html(self, info):
"""Renders a change entry to HTML.
By default, this returns a simple "changed from X to Y" using the old
and new values. This can be overridden to generate more specialized
output.
This function is expected to return safe, valid HTML. Any values
coming from a field or any other form of user input must be
properly escaped.
Subclasses can override ``render_change_entry_value_html`` to
change how the value itself will be rendered in the string.
"""
old_value = ''
new_value = ''
if 'old' in info:
old_value = info['old'][0]
if 'new' in info:
new_value = info['new'][0]
s = ['<table class="changed">']
if old_value:
s.append(self.render_change_entry_removed_value_html(
info, old_value))
if new_value:
s.append(self.render_change_entry_added_value_html(
info, new_value))
s.append('</table>')
return ''.join(s)
def render_change_entry_added_value_html(self, info, value):
value_html = self.render_change_entry_value_html(info, value)
if value_html:
return ('<tr class="new-value"><th class="marker">+</th>'
'<td class="value">%s</td></tr>' % value_html)
else:
return ''
def render_change_entry_removed_value_html(self, info, value):
value_html = self.render_change_entry_value_html(info, value)
if value_html:
return ('<tr class="old-value"><th class="marker">-</th>'
'<td class="value">%s</td></tr>' % value_html)
else:
return ''
def render_change_entry_value_html(self, info, value):
"""Renders the value for a change description string to HTML.
By default, this just converts the value to text and escapes it.
This can be overridden to customize how the value is displayed.
"""
return escape(six.text_type(value or ''))
def load_value(self, review_request_details):
"""Loads a value from the review request or draft.
By default, this loads the value as-is from the extra_data field.
This can be overridden if you need to deserialize the value in some
way.
This must use ``review_request_details`` instead of
``self.review_request_details``.
"""
return review_request_details.extra_data.get(self.field_id)
def save_value(self, value):
"""Saves the value in the review request or draft.
By default, this saves the value as-is in the extra_data field.
This can be overridden if you need to serialize the value in some
way.
"""
self.review_request_details.extra_data[self.field_id] = value
def propagate_data(self, review_request_details):
"""Propagate data in from source review request or draft.
By default, this loads only the field's value from a source review
request or draft and saves it as-is into the review request or draft
associated with the field. This can be overridden if you need to
propagate additional data elements.
This method is preferable to explictly calling :py:meth:`load_value`
and :py:meth:`save_value` in series to propagate data from a source
into a field, because it allows for copying additional data elements
beyond only the field's value.
This function must use the ``review_request_details`` parameter instead
of the :py:attr:`review_request_details` attribute on the field.
Args:
review_request_details (reviewboard.reviews.models.base_review_request_details):
The source review request or draft whose data is to be
propagated.
"""
self.save_value(self.load_value(review_request_details))
def render_value(self, value):
"""Renders the value in the field.
By default, this converts to text and escapes it. This can be
overridden if you need to render it in a more specific way.
This must use ``value`` instead of ``self.value``.
"""
return escape(six.text_type(value or ''))
def should_render(self, value):
"""Returns whether the field should be rendered.
By default, the field is always rendered, but this can be overridden
if you only want to show under certain conditions (such as if it has
a value).
This must use ``value`` instead of ``self.value``.
"""
return True
def get_css_classes(self):
"""Returns the list of CSS classes to apply to the element.
By default, this will include the contents of ``default_css_classes``,
and ``required`` if it's a required field.
This can be overridden to provide additional CSS classes, if they're
not appropraite for ``default_css_classes``.
"""
css_classes = set(self.default_css_classes)
if self.is_required:
css_classes.add('required')
return css_classes
def get_data_attributes(self):
"""Returns any data attributes to include in the element.
By default, this returns nothing.
"""
return {}
def as_html(self):
"""Returns the field rendered as HTML.
By default, this just calls ``render_value`` with the value
from the database.
"""
return self.render_value(self.value)
def __str__(self):
"""Represent the field as a byte string.
Returns:
bytes:
The field's ID as a byte string.
"""
if isinstance(self.field_id, six.binary_type):
return self.field_id
return self.field_id.encode('utf-8')
def __unicode__(self):
"""Represent the field as a unicode string.
Returns:
unicode:
The field's ID as a unicode string.
"""
if isinstance(self.field_id, six.binary_type):
return self.field_id.decode('utf-8')
return self.field_id
class BaseEditableField(BaseReviewRequestField):
"""Base class for an editable field.
This simply marks the field as editable.
"""
default_css_classes = ['editable']
is_editable = True
class BaseCommaEditableField(BaseEditableField):
"""Base class for an editable comma-separated list of values.
This is used for dealing with lists of items that appear
comma-separated in the UI. It works with stored lists of content
on the review request or draft, and on the ChangeDescription.
Subclasses can override this to provide specialized rendering
on a per-item-basis. That's useful for showing links to items,
for example.
"""
default_css_classes = ['editable', 'comma-editable']
order_matters = False
one_line_per_change_entry = True
def has_value_changed(self, old_value, new_value):
"""Returns whether two values have changed.
If ``order_matters`` is set to ``True``, this will do a strict
list comparison. Otherwise, it will compare the items in both
lists without caring about the ordering.
"""
if self.order_matters:
return old_value != new_value
else:
return set(old_value or []) != set(new_value or [])
def serialize_change_entry(self, changedesc):
"""Serialize a change entry for public consumption.
This will output a version of the change entry for use in the API.
It can be the same content stored in the
:py:class:`~reviewboard.changedescs.models.ChangeDescription`, but
does not need to be.
Args:
changedesc (reviewboard.changedescs.models.ChangeDescription):
The change description whose field is to be serialized.
Returns:
dict:
An appropriate serialization for the field.
"""
field_info = changedesc.fields_changed[self.field_id]
if self.model:
return self.serialize_change_entry_for_model_list(field_info)
else:
return self.serialize_change_entry_for_list(field_info)
def render_value(self, values):
"""Renders the list of items.
This will call out to ``render_item`` for every item. The list
of rendered items will be separated by a comma and a space.
"""
return ', '.join([
self.render_item(value)
for value in values
])
def render_item(self, item):
"""Renders an item from the list.
By default, this will convert the item to text and then escape it.
"""
return escape(six.text_type(item or ''))
def render_change_entry_html(self, info):
"""Renders a change entry to HTML.
By default, this returns HTML containing a list of removed items,
and a list of added items. This can be overridden to generate
more specialized output.
This function is expected to return safe, valid HTML. Any values
coming from a field or any other form of user input must be
properly escaped.
"""
s = ['<table class="changed">']
if 'removed' in info:
values = info['removed']
if self.one_line_per_change_entry:
s += [
self.render_change_entry_removed_value_html(info, [value])
for value in values
]
else:
s.append(self.render_change_entry_removed_value_html(
info, values))
if 'added' in info:
values = info['added']
if self.one_line_per_change_entry:
s += [
self.render_change_entry_added_value_html(info, [value])
for value in values
]
else:
s.append(self.render_change_entry_added_value_html(
info, values))
s.append('</table>')
return ''.join(s)
def render_change_entry_value_html(self, info, values):
"""Renders a list of items for change description HTML.
By default, this will call ``render_change_entry_item_html`` for every
item in the list. The list of rendered items will be separated by a
comma and a space.
"""
return ', '.join([
self.render_change_entry_item_html(info, item)
for item in values
])
def render_change_entry_item_html(self, info, item):
"""Renders an item for change description HTML.
By default, this just converts the value to text and escapes it.
This can be overridden to customize how the value is displayed.
"""
return escape(six.text_type(item[0]))
class BaseTextAreaField(BaseEditableField):
"""Base class for a multi-line text area field.
The text area can take either plain text or Markdown text. By default,
Markdown is supported, but this can be changed by setting
``enable_markdown`` to ``False``.
"""
default_css_classes = ['editable', 'field-text-area']
enable_markdown = True
always_render_markdown = False
tag_name = 'pre'
@cached_property
def text_type_key(self):
"""Return the text type key for the ``extra_data`` dictionary."""
if self.field_id == 'text':
return 'text_type'
else:
return '%s_text_type' % self.field_id
def is_text_markdown(self, value):
"""Returns whether the text is in Markdown format.
This can be overridden if the field needs to check something else
to determine if the text is in Markdown format.
"""
text_type = self.review_request_details.extra_data.get(
self.text_type_key, 'plain')
return text_type == 'markdown'
def propagate_data(self, review_request_details):
"""Propagate data in from source review request or draft.
In addition to the value propagation handled by the base class, this
copies the text type details from a source review request or draft and
saves it as-is into the review request or draft associated with the
field.
Args:
review_request_details (reviewboard.reviews.models.base_review_request_details):
The source review request or draft whose data is to be
propagated.
"""
super(BaseTextAreaField, self).propagate_data(review_request_details)
source_text_type = review_request_details.extra_data.get(
self.text_type_key, None)
if source_text_type is not None:
self.review_request_details.extra_data[self.text_type_key] = \
source_text_type
def get_css_classes(self):
"""Returns the list of CSS classes.
If Markdown is enabled, and the text is in Markdown format,
this will add a "rich-text" field.
"""
css_classes = super(BaseTextAreaField, self).get_css_classes()
if (self.enable_markdown and self.value and
(self.should_render_as_markdown(self.value) or
(self.request.user and
is_rich_text_default_for_user(self.request.user)))):
css_classes.add('rich-text')
return css_classes
def get_data_attributes(self):
attrs = super(BaseTextAreaField, self).get_data_attributes()
if self.enable_markdown:
if self.request:
user = self.request.user
else:
user = None
attrs.update({
'allow-markdown': True,
'raw-value': normalize_text_for_edit(
user, self.value,
self.should_render_as_markdown(self.value)),
})
return attrs
def render_value(self, text):
"""Returns the value of the field.
If Markdown is enabled, and the text is not in Markdown format,
the text will be escaped.
"""
text = text or ''
if self.should_render_as_markdown(text):
return render_markdown(text)
else:
return escape(text)
def should_render_as_markdown(self, value):
"""Returns whether the text should be rendered as Markdown.
By default, this checks if the field is set to always render
any text as Markdown, or if the given text is in Markdown format.
"""
return self.always_render_markdown or self.is_text_markdown(value)
def render_change_entry_html(self, info):
old_value = ''
new_value = ''
if 'old' in info:
old_value = info['old'][0] or ''
if 'new' in info:
new_value = info['new'][0] or ''
old_value = render_markdown(old_value)
new_value = render_markdown(new_value)
old_lines = list(iter_markdown_lines(old_value))
new_lines = list(iter_markdown_lines(new_value))
differ = MyersDiffer(old_lines, new_lines)
return ('<table class="diffed-text-area">%s</table>'
% ''.join(self._render_all_change_lines(differ, old_lines,
new_lines)))
def _render_all_change_lines(self, differ, old_lines, new_lines):
for tag, i1, i2, j1, j2 in differ.get_opcodes():
if tag == 'equal':
lines = self._render_change_lines(differ, tag, None, None,
i1, i2, old_lines)
elif tag == 'insert':
lines = self._render_change_lines(differ, tag, None, '+',
j1, j2, new_lines)
elif tag == 'delete':
lines = self._render_change_lines(differ, tag, '-', None,
i1, i2, old_lines)
elif tag == 'replace':
lines = self._render_change_replace_lines(differ, i1, i2,
j1, j2, old_lines,
new_lines)
else:
raise ValueError('Unexpected tag "%s"' % tag)
for line in lines:
yield line
def _render_change_lines(self, differ, tag, old_marker, new_marker,
i1, i2, lines):
old_marker = old_marker or ' '
new_marker = new_marker or ' '
for i in range(i1, i2):
line = lines[i]
yield ('<tr class="%s">'
' <td class="marker">%s</td>'
' <td class="marker">%s</td>'
' <td class="line rich-text">%s</td>'
'</tr>'
% (tag, old_marker, new_marker, line))
def _render_change_replace_lines(self, differ, i1, i2, j1, j2,
old_lines, new_lines):
replace_new_lines = []
for i, j in zip(range(i1, i2), range(j1, j2)):
old_line = old_lines[i]
new_line = new_lines[j]
parser = HTMLParser()
old_regions, new_regions = \
get_line_changed_regions(parser.unescape(strip_tags(old_line)),
parser.unescape(strip_tags(new_line)))
old_line = highlightregion(old_line, old_regions)
new_line = highlightregion(new_line, new_regions)
yield (
'<tr class="replace-old">'
' <td class="marker">~</td>'
' <td class="marker"> </td>'
' <td class="line rich-text">%s</td>'
'</tr>'
% old_line)
replace_new_lines.append(new_line)
for line in replace_new_lines:
yield (
'<tr class="replace-new">'
' <td class="marker"> </td>'
' <td class="marker">~</td>'
' <td class="line rich-text">%s</td>'
'</tr>'
% line)
def get_review_request_fields():
"""Yield all registered field classes.
Yields:
type:
The field classes, as subclasses of :py:class:`BaseReviewRequestField`
"""
for field in field_registry:
yield field
def get_review_request_fieldsets(include_main=False,
include_change_entries_only=False):
"""Returns a list of all registered fieldset classes.
As an internal optimization, the "main" fieldset can be filtered out,
to help with rendering the side of the review request page.
Args:
include_main (boolean):
Whether or not the main fieldset should be included.
include_change_entries_only (bool):
Whether or not to include the change-entry only fieldset.
Returns:
list:
The requested :py:class:`fieldsets <BaseReviewRequestFieldSet>`.
"""
if include_main and include_change_entries_only:
return list(fieldset_registry)
else:
excluded_ids = []
if not include_main:
excluded_ids.append('main')
if not include_change_entries_only:
excluded_ids.append('_change_entries_only')
return [
fieldset
for fieldset in fieldset_registry
if fieldset.fieldset_id not in excluded_ids
]
def get_review_request_fieldset(fieldset_id):
"""Return the fieldset with the specified ID.
Args:
fieldset_id (unicode):
The fieldset's ID.
Returns:
BaseReviewRequestFieldSet:
The requested fieldset, or ``None`` if it could not be found.
"""
return fieldset_registry.get('fieldset_id', fieldset_id)
def get_review_request_field(field_id):
"""Return the field with the specified ID.
Args:
field_id (unicode):
The field's ID.
Returns:
BaseReviewRequestField:
The requested field, or ``None`` if it could not be found.
"""
return field_registry.get('field_id', field_id)
def register_review_request_fieldset(fieldset):
"""Register a custom review request fieldset.
The fieldset must have a :py:attr:`~BaseReviewRequestFieldSet.fieldset_id`
attribute. This ID **must** be unique across all registered fieldsets, or
an exception will be thrown.
Args:
fieldset (type):
The :py:class:`BaseReviewRequestFieldSet` subclass.
Raises:
djblets.registries.errors.ItemLookupError:
This will be thrown if a fieldset is already registered with the
same ID.
"""
fieldset_registry.register(fieldset)
def unregister_review_request_fieldset(fieldset):
"""Unregister a previously registered review request fieldset.
Args:
fieldset (type):
The :py:class:`BaseReviewRequestFieldSet` subclass.
Raises:
djblets.registries.errors.ItemLookupError:
This will be thrown if the fieldset is not already registered.
"""
try:
fieldset_registry.unregister(fieldset)
except ItemLookupError as e:
logging.error('Failed to unregister unknown review request fieldset '
'"%s"',
fieldset.fieldset_id)
raise e
|
|
"""CSP (Constraint Satisfaction Problems) problems and solvers. (Chapter 6)."""
from . utils import *
from collections import defaultdict
from . import search
from functools import reduce
class CSP(search.Problem):
"""This class describes finite-domain Constraint Satisfaction Problems.
A CSP is specified by the following inputs:
vars A list of variables; each is atomic (e.g. int or string).
domains A dict of {var:[possible_value, ...]} entries.
neighbors A dict of {var:[var,...]} that for each variable lists
the other variables that participate in constraints.
constraints A function f(A, a, B, b) that returns true if neighbors
A, B satisfy the constraint when they have values A=a, B=b
In the textbook and in most mathematical definitions, the
constraints are specified as explicit pairs of allowable values,
but the formulation here is easier to express and more compact for
most cases. (For example, the n-Queens problem can be represented
in O(n) space using this notation, instead of O(N^4) for the
explicit representation.) In terms of describing the CSP as a
problem, that's all there is.
However, the class also supports data structures and methods that help you
solve CSPs by calling a search function on the CSP. Methods and slots are
as follows, where the argument 'a' represents an assignment, which is a
dict of {var:val} entries:
assign(var, val, a) Assign a[var] = val; do other bookkeeping
unassign(var, a) Do del a[var], plus other bookkeeping
nconflicts(var, val, a) Return the number of other variables that
conflict with var=val
curr_domains[var] Slot: remaining consistent values for var
Used by constraint propagation routines.
The following methods are used only by graph_search and tree_search:
actions(state) Return a list of actions
result(state, action) Return a successor of state
goal_test(state) Return true if all constraints satisfied
The following are just for debugging purposes:
nassigns Slot: tracks the number of assignments made
display(a) Print a human-readable representation
>>> search.depth_first_graph_search(australia)
<Node (('WA', 'B'), ('Q', 'B'), ('T', 'B'), ('V', 'B'), ('SA', 'G'), ('NT', 'R'), ('NSW', 'R'))>
"""
def __init__(self, vars, domains, neighbors, constraints):
"Construct a CSP problem. If vars is empty, it becomes domains.keys()."
vars = vars or list(domains.keys())
update(self, vars=vars, domains=domains,
neighbors=neighbors, constraints=constraints,
initial=(), curr_domains=None, nassigns=0)
def assign(self, var, val, assignment):
"Add {var: val} to assignment; Discard the old value if any."
assignment[var] = val
self.nassigns += 1
def unassign(self, var, assignment):
"""Remove {var: val} from assignment.
DO NOT call this if you are changing a variable to a new value;
just call assign for that."""
if var in assignment:
del assignment[var]
def nconflicts(self, var, val, assignment):
"Return the number of conflicts var=val has with other variables."
# Subclasses may implement this more efficiently
def conflict(var2):
return (var2 in assignment
and not self.constraints(var, val, var2, assignment[var2]))
return count_if(conflict, self.neighbors[var])
def display(self, assignment):
"Show a human-readable representation of the CSP."
# Subclasses can print in a prettier way, or display with a GUI
print('CSP:', self, 'with assignment:', assignment)
# These methods are for the tree- and graph-search interface:
def actions(self, state):
"""Return a list of applicable actions: nonconflicting
assignments to an unassigned variable."""
if len(state) == len(self.vars):
return []
else:
assignment = dict(state)
var = find_if(lambda v: v not in assignment, self.vars)
return [(var, val) for val in self.domains[var]
if self.nconflicts(var, val, assignment) == 0]
def result(self, state, xxx_todo_changeme):
"Perform an action and return the new state."
(var, val) = xxx_todo_changeme
return state + ((var, val),)
def goal_test(self, state):
"The goal is to assign all vars, with all constraints satisfied."
assignment = dict(state)
return (len(assignment) == len(self.vars) and
every(lambda var: self.nconflicts(var, assignment[var],
assignment) == 0,
self.vars))
# These are for constraint propagation
def support_pruning(self):
"""Make sure we can prune values from domains. (We want to pay
for this only if we use it.)"""
if self.curr_domains is None:
self.curr_domains = dict((v, list(self.domains[v]))
for v in self.vars)
def suppose(self, var, value):
"Start accumulating inferences from assuming var=value."
self.support_pruning()
removals = [(var, a) for a in self.curr_domains[var] if a != value]
self.curr_domains[var] = [value]
return removals
def prune(self, var, value, removals):
"Rule out var=value."
self.curr_domains[var].remove(value)
if removals is not None:
removals.append((var, value))
def choices(self, var):
"Return all values for var that aren't currently ruled out."
return (self.curr_domains or self.domains)[var]
def infer_assignment(self):
"Return the partial assignment implied by the current inferences."
self.support_pruning()
return dict((v, self.curr_domains[v][0])
for v in self.vars if 1 == len(self.curr_domains[v]))
def restore(self, removals):
"Undo a supposition and all inferences from it."
for B, b in removals:
self.curr_domains[B].append(b)
# This is for min_conflicts search
def conflicted_vars(self, current):
"Return a list of variables in current assignment that are in conflict"
return [var for var in self.vars
if self.nconflicts(var, current[var], current) > 0]
#______________________________________________________________________________
# Constraint Propagation with AC-3
def AC3(csp, queue=None, removals=None):
"""[Fig. 6.3]"""
if queue is None:
queue = [(Xi, Xk) for Xi in csp.vars for Xk in csp.neighbors[Xi]]
csp.support_pruning()
while queue:
(Xi, Xj) = queue.pop()
if revise(csp, Xi, Xj, removals):
if not csp.curr_domains[Xi]:
return False
for Xk in csp.neighbors[Xi]:
if Xk != Xi:
queue.append((Xk, Xi))
return True
def revise(csp, Xi, Xj, removals):
"Return true if we remove a value."
revised = False
for x in csp.curr_domains[Xi][:]:
# If Xi=x conflicts with Xj=y for every possible y, eliminate Xi=x
if every(lambda y: not csp.constraints(Xi, x, Xj, y),
csp.curr_domains[Xj]):
csp.prune(Xi, x, removals)
revised = True
return revised
#______________________________________________________________________________
# CSP Backtracking Search
# Variable ordering
def first_unassigned_variable(assignment, csp):
"The default variable order."
return find_if(lambda var: var not in assignment, csp.vars)
def mrv(assignment, csp):
"Minimum-remaining-values heuristic."
return argmin_random_tie(
[v for v in csp.vars if v not in assignment],
lambda var: num_legal_values(csp, var, assignment))
def num_legal_values(csp, var, assignment):
if csp.curr_domains:
return len(csp.curr_domains[var])
else:
return count_if(lambda val: csp.nconflicts(var, val, assignment) == 0,
csp.domains[var])
# Value ordering
def unordered_domain_values(var, assignment, csp):
"The default value order."
return csp.choices(var)
def lcv(var, assignment, csp):
"Least-constraining-values heuristic."
return sorted(csp.choices(var),
key=lambda val: csp.nconflicts(var, val, assignment))
# Inference
def no_inference(csp, var, value, assignment, removals):
return True
def forward_checking(csp, var, value, assignment, removals):
"Prune neighbor values inconsistent with var=value."
for B in csp.neighbors[var]:
if B not in assignment:
for b in csp.curr_domains[B][:]:
if not csp.constraints(var, value, B, b):
csp.prune(B, b, removals)
if not csp.curr_domains[B]:
return False
return True
def mac(csp, var, value, assignment, removals):
"Maintain arc consistency."
return AC3(csp, [(X, var) for X in csp.neighbors[var]], removals)
# The search, proper
def backtracking_search(csp,
select_unassigned_variable=first_unassigned_variable,
order_domain_values=unordered_domain_values,
inference=no_inference):
"""[Fig. 6.5]
>>> backtracking_search(australia) is not None
True
>>> backtracking_search(australia, select_unassigned_variable=mrv) is not None
True
>>> backtracking_search(australia, order_domain_values=lcv) is not None
True
>>> backtracking_search(australia, select_unassigned_variable=mrv, order_domain_values=lcv) is not None
True
>>> backtracking_search(australia, inference=forward_checking) is not None
True
>>> backtracking_search(australia, inference=mac) is not None
True
>>> backtracking_search(usa, select_unassigned_variable=mrv, order_domain_values=lcv, inference=mac) is not None
True
"""
def backtrack(assignment):
if len(assignment) == len(csp.vars):
return assignment
var = select_unassigned_variable(assignment, csp)
for value in order_domain_values(var, assignment, csp):
if 0 == csp.nconflicts(var, value, assignment):
csp.assign(var, value, assignment)
removals = csp.suppose(var, value)
if inference(csp, var, value, assignment, removals):
result = backtrack(assignment)
if result is not None:
return result
csp.restore(removals)
csp.unassign(var, assignment)
return None
result = backtrack({})
assert result is None or csp.goal_test(result)
return result
#______________________________________________________________________________
# Min-conflicts hillclimbing search for CSPs
def min_conflicts(csp, max_steps=100000):
"""Solve a CSP by stochastic hillclimbing on the number of conflicts."""
# Generate a complete assignment for all vars (probably with conflicts)
csp.current = current = {}
for var in csp.vars:
val = min_conflicts_value(csp, var, current)
csp.assign(var, val, current)
# Now repeatedly choose a random conflicted variable and change it
for i in range(max_steps):
conflicted = csp.conflicted_vars(current)
if not conflicted:
return current
var = random.choice(conflicted)
val = min_conflicts_value(csp, var, current)
csp.assign(var, val, current)
return None
def min_conflicts_value(csp, var, current):
"""Return the value that will give var the least number of conflicts.
If there is a tie, choose at random."""
return argmin_random_tie(csp.domains[var],
lambda val: csp.nconflicts(var, val, current))
#______________________________________________________________________________
def tree_csp_solver(csp):
"[Fig. 6.11]"
n = len(csp.vars)
assignment = {}
root = csp.vars[0]
X, parent = topological_sort(csp.vars, root)
for Xj in reversed(X):
if not make_arc_consistent(parent[Xj], Xj, csp):
return None
for Xi in X:
if not csp.curr_domains[Xi]:
return None
assignment[Xi] = csp.curr_domains[Xi][0]
return assignment
def topological_sort(xs, x):
unimplemented()
def make_arc_consistent(Xj, Xk, csp):
unimplemented()
#______________________________________________________________________________
# Map-Coloring Problems
class UniversalDict:
"""A universal dict maps any key to the same value. We use it here
as the domains dict for CSPs in which all vars have the same domain.
>>> d = UniversalDict(42)
>>> d['life']
42
"""
def __init__(self, value): self.value = value
def __getitem__(self, key): return self.value
def __repr__(self): return '{Any: %r}' % self.value
def different_values_constraint(A, a, B, b):
"A constraint saying two neighboring variables must differ in value."
return a != b
def MapColoringCSP(colors, neighbors):
"""Make a CSP for the problem of coloring a map with different colors
for any two adjacent regions. Arguments are a list of colors, and a
dict of {region: [neighbor,...]} entries. This dict may also be
specified as a string of the form defined by parse_neighbors."""
if isinstance(neighbors, str):
neighbors = parse_neighbors(neighbors)
return CSP(list(neighbors.keys()), UniversalDict(colors), neighbors,
different_values_constraint)
def parse_neighbors(neighbors, vars=[]):
"""Convert a string of the form 'X: Y Z; Y: Z' into a dict mapping
regions to neighbors. The syntax is a region name followed by a ':'
followed by zero or more region names, followed by ';', repeated for
each region name. If you say 'X: Y' you don't need 'Y: X'.
>>> parse_neighbors('X: Y Z; Y: Z')
{'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']}
"""
dict = defaultdict(list)
for var in vars:
dict[var] = []
specs = [spec.split(':') for spec in neighbors.split(';')]
for (A, Aneighbors) in specs:
A = A.strip()
dict.setdefault(A, [])
for B in Aneighbors.split():
dict[A].append(B)
dict[B].append(A)
return dict
australia = MapColoringCSP(list('RGB'),
'SA: WA NT Q NSW V; NT: WA Q; NSW: Q V; T: ')
usa = MapColoringCSP(list('RGBY'),
"""WA: OR ID; OR: ID NV CA; CA: NV AZ; NV: ID UT AZ; ID: MT WY UT;
UT: WY CO AZ; MT: ND SD WY; WY: SD NE CO; CO: NE KA OK NM; NM: OK TX;
ND: MN SD; SD: MN IA NE; NE: IA MO KA; KA: MO OK; OK: MO AR TX;
TX: AR LA; MN: WI IA; IA: WI IL MO; MO: IL KY TN AR; AR: MS TN LA;
LA: MS; WI: MI IL; IL: IN KY; IN: OH KY; MS: TN AL; AL: TN GA FL;
MI: OH IN; OH: PA WV KY; KY: WV VA TN; TN: VA NC GA; GA: NC SC FL;
PA: NY NJ DE MD WV; WV: MD VA; VA: MD DC NC; NC: SC; NY: VT MA CT NJ;
NJ: DE; DE: MD; MD: DC; VT: NH MA; MA: NH RI CT; CT: RI; ME: NH;
HI: ; AK: """)
france = MapColoringCSP(list('RGBY'),
"""AL: LO FC; AQ: MP LI PC; AU: LI CE BO RA LR MP; BO: CE IF CA FC RA
AU; BR: NB PL; CA: IF PI LO FC BO; CE: PL NB NH IF BO AU LI PC; FC: BO
CA LO AL RA; IF: NH PI CA BO CE; LI: PC CE AU MP AQ; LO: CA AL FC; LR:
MP AU RA PA; MP: AQ LI AU LR; NB: NH CE PL BR; NH: PI IF CE NB; NO:
PI; PA: LR RA; PC: PL CE LI AQ; PI: NH NO CA IF; PL: BR NB CE PC; RA:
AU BO FC PA LR""")
#______________________________________________________________________________
# n-Queens Problem
def queen_constraint(A, a, B, b):
"""Constraint is satisfied (true) if A, B are really the same variable,
or if they are not in the same row, down diagonal, or up diagonal."""
return A == B or (a != b and A + a != B + b and A - a != B - b)
class NQueensCSP(CSP):
"""Make a CSP for the nQueens problem for search with min_conflicts.
Suitable for large n, it uses only data structures of size O(n).
Think of placing queens one per column, from left to right.
That means position (x, y) represents (var, val) in the CSP.
The main structures are three arrays to count queens that could conflict:
rows[i] Number of queens in the ith row (i.e val == i)
downs[i] Number of queens in the \ diagonal
such that their (x, y) coordinates sum to i
ups[i] Number of queens in the / diagonal
such that their (x, y) coordinates have x-y+n-1 = i
We increment/decrement these counts each time a queen is placed/moved from
a row/diagonal. So moving is O(1), as is nconflicts. But choosing
a variable, and a best value for the variable, are each O(n).
If you want, you can keep track of conflicted vars, then variable
selection will also be O(1).
>>> len(backtracking_search(NQueensCSP(8)))
8
"""
def __init__(self, n):
"""Initialize data structures for n Queens."""
CSP.__init__(self, list(range(n)), UniversalDict(list(range(n))),
UniversalDict(list(range(n))), queen_constraint)
update(self, rows=[0]*n, ups=[0]*(2*n - 1), downs=[0]*(2*n - 1))
def nconflicts(self, var, val, assignment):
"""The number of conflicts, as recorded with each assignment.
Count conflicts in row and in up, down diagonals. If there
is a queen there, it can't conflict with itself, so subtract 3."""
n = len(self.vars)
c = self.rows[val] + self.downs[var+val] + self.ups[var-val+n-1]
if assignment.get(var, None) == val:
c -= 3
return c
def assign(self, var, val, assignment):
"Assign var, and keep track of conflicts."
oldval = assignment.get(var, None)
if val != oldval:
if oldval is not None: # Remove old val if there was one
self.record_conflict(assignment, var, oldval, -1)
self.record_conflict(assignment, var, val, +1)
CSP.assign(self, var, val, assignment)
def unassign(self, var, assignment):
"Remove var from assignment (if it is there) and track conflicts."
if var in assignment:
self.record_conflict(assignment, var, assignment[var], -1)
CSP.unassign(self, var, assignment)
def record_conflict(self, assignment, var, val, delta):
"Record conflicts caused by addition or deletion of a Queen."
n = len(self.vars)
self.rows[val] += delta
self.downs[var + val] += delta
self.ups[var - val + n - 1] += delta
def display(self, assignment):
"Print the queens and the nconflicts values (for debugging)."
n = len(self.vars)
for val in range(n):
for var in range(n):
if assignment.get(var, '') == val:
ch = 'Q'
elif (var+val) % 2 == 0:
ch = '.'
else:
ch = '-'
print(ch, end=' ')
print(' ', end=' ')
for var in range(n):
if assignment.get(var, '') == val:
ch = '*'
else:
ch = ' '
print(str(self.nconflicts(var, val, assignment))+ch, end=' ')
print()
#______________________________________________________________________________
# Sudoku
import itertools
import re
def flatten(seqs): return sum(seqs, [])
easy1 = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'
harder1 = '4173698.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'
_R3 = list(range(3))
_CELL = itertools.count().__next__
_BGRID = [[[[_CELL() for x in _R3] for y in _R3] for bx in _R3] for by in _R3]
_BOXES = flatten([list(map(flatten, brow)) for brow in _BGRID])
_ROWS = flatten([list(map(flatten, list(zip(*brow)))) for brow in _BGRID])
_COLS = list(zip(*_ROWS))
_NEIGHBORS = dict([(v, set()) for v in flatten(_ROWS)])
for unit in map(set, _BOXES + _ROWS + _COLS):
for v in unit:
_NEIGHBORS[v].update(unit - set([v]))
class Sudoku(CSP):
"""A Sudoku problem.
The box grid is a 3x3 array of boxes, each a 3x3 array of cells.
Each cell holds a digit in 1..9. In each box, all digits are
different; the same for each row and column as a 9x9 grid.
>>> e = Sudoku(easy1)
>>> e.display(e.infer_assignment())
. . 3 | . 2 . | 6 . .
9 . . | 3 . 5 | . . 1
. . 1 | 8 . 6 | 4 . .
------+-------+------
. . 8 | 1 . 2 | 9 . .
7 . . | . . . | . . 8
. . 6 | 7 . 8 | 2 . .
------+-------+------
. . 2 | 6 . 9 | 5 . .
8 . . | 2 . 3 | . . 9
. . 5 | . 1 . | 3 . .
>>> AC3(e); e.display(e.infer_assignment())
True
4 8 3 | 9 2 1 | 6 5 7
9 6 7 | 3 4 5 | 8 2 1
2 5 1 | 8 7 6 | 4 9 3
------+-------+------
5 4 8 | 1 3 2 | 9 7 6
7 2 9 | 5 6 4 | 1 3 8
1 3 6 | 7 9 8 | 2 4 5
------+-------+------
3 7 2 | 6 8 9 | 5 1 4
8 1 4 | 2 5 3 | 7 6 9
6 9 5 | 4 1 7 | 3 8 2
>>> h = Sudoku(harder1)
>>> None != backtracking_search(h, select_unassigned_variable=mrv, inference=forward_checking)
True
"""
R3 = _R3
Cell = _CELL
bgrid = _BGRID
boxes = _BOXES
rows = _ROWS
cols = _COLS
neighbors = _NEIGHBORS
def __init__(self, grid):
"""Build a Sudoku problem from a string representing the grid:
the digits 1-9 denote a filled cell, '.' or '0' an empty one;
other characters are ignored."""
squares = iter(re.findall(r'\d|\.', grid))
domains = dict((var, ([ch] if ch in '123456789' else '123456789'))
for var, ch in zip(flatten(self.rows), squares))
for _ in squares:
raise ValueError("Not a Sudoku grid", grid) # Too many squares
CSP.__init__(self, None, domains, self.neighbors,
different_values_constraint)
def display(self, assignment):
def show_box(box): return [
' '.join(map(show_cell, row)) for row in box]
def show_cell(cell): return str(assignment.get(cell, '.'))
def abut(lines1, lines2): return list(
map(' | '.join, list(zip(lines1, lines2))))
print('\n------+-------+------\n'.join(
'\n'.join(reduce(abut, list(map(show_box, brow)))) for brow in self.bgrid))
#______________________________________________________________________________
# The Zebra Puzzle
def Zebra():
"Return an instance of the Zebra Puzzle."
Colors = 'Red Yellow Blue Green Ivory'.split()
Pets = 'Dog Fox Snails Horse Zebra'.split()
Drinks = 'OJ Tea Coffee Milk Water'.split()
Countries = 'Englishman Spaniard Norwegian Ukranian Japanese'.split()
Smokes = 'Kools Chesterfields Winston LuckyStrike Parliaments'.split()
vars = Colors + Pets + Drinks + Countries + Smokes
domains = {}
for var in vars:
domains[var] = list(range(1, 6))
domains['Norwegian'] = [1]
domains['Milk'] = [3]
neighbors = parse_neighbors("""Englishman: Red;
Spaniard: Dog; Kools: Yellow; Chesterfields: Fox;
Norwegian: Blue; Winston: Snails; LuckyStrike: OJ;
Ukranian: Tea; Japanese: Parliaments; Kools: Horse;
Coffee: Green; Green: Ivory""", vars)
for type in [Colors, Pets, Drinks, Countries, Smokes]:
for A in type:
for B in type:
if A != B:
if B not in neighbors[A]:
neighbors[A].append(B)
if A not in neighbors[B]:
neighbors[B].append(A)
def zebra_constraint(A, a, B, b, recurse=0):
same = (a == b)
next_to = abs(a - b) == 1
if A == 'Englishman' and B == 'Red':
return same
if A == 'Spaniard' and B == 'Dog':
return same
if A == 'Chesterfields' and B == 'Fox':
return next_to
if A == 'Norwegian' and B == 'Blue':
return next_to
if A == 'Kools' and B == 'Yellow':
return same
if A == 'Winston' and B == 'Snails':
return same
if A == 'LuckyStrike' and B == 'OJ':
return same
if A == 'Ukranian' and B == 'Tea':
return same
if A == 'Japanese' and B == 'Parliaments':
return same
if A == 'Kools' and B == 'Horse':
return next_to
if A == 'Coffee' and B == 'Green':
return same
if A == 'Green' and B == 'Ivory':
return (a - 1) == b
if recurse == 0:
return zebra_constraint(B, b, A, a, 1)
if ((A in Colors and B in Colors) or
(A in Pets and B in Pets) or
(A in Drinks and B in Drinks) or
(A in Countries and B in Countries) or
(A in Smokes and B in Smokes)):
return not same
raise Exception('error')
return CSP(vars, domains, neighbors, zebra_constraint)
def solve_zebra(algorithm=min_conflicts, **args):
z = Zebra()
ans = algorithm(z, **args)
for h in range(1, 6):
print('House', h, end=' ')
for (var, val) in list(ans.items()):
if val == h:
print(var, end=' ')
print()
return ans['Zebra'], ans['Water'], z.nassigns, ans
__doc__ += """
Random tests:
>>> min_conflicts(australia)
{'WA': 'B', 'Q': 'B', 'T': 'G', 'V': 'B', 'SA': 'R', 'NT': 'G', 'NSW': 'G'}
>>> min_conflicts(NQueensCSP(8), max_steps=10000)
{0: 5, 1: 0, 2: 4, 3: 1, 4: 7, 5: 2, 6: 6, 7: 3}
"""
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import copy
import datetime
import os
import pprint
import fixtures
import mock
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from oslo_versionedobjects import fixture
import six
from nova import context
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import virt_device_metadata
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
from nova import utils
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.IntegerField()}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.IntegerField(default=1),
'bar': fields.StringField(),
'missing': fields.StringField(),
'readonly': fields.IntegerField(read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
'mutable_default': fields.ListOfStringsField(default=[]),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
self._context = context
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self):
return 'polo'
@base.remotable
def _update_test(self):
self.bar = 'updated'
@base.remotable
def save(self):
self.obj_reset_changes()
@base.remotable
def refresh(self):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
@base.NovaObjectRegistry.register_if(False)
class SubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.StringField()}
class TestObjToPrimitive(test.NoDBTestCase):
def test_obj_to_primitive_list(self):
@base.NovaObjectRegistry.register_if(False)
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
@base.NovaObjectRegistry.register_if(False)
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
base.NovaObjectRegistry.register(MyObj)
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
base.NovaObjectRegistry.register(MyObj)
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
@base.NovaObjectRegistry.register_if(False)
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
# NOTE(danms): register these here instead of at import time
# so that they're not always present
base.NovaObjectRegistry.register(MyObj)
base.NovaObjectRegistry.register(MyOwnedObject)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
self.useFixture(nova_fixtures.IndirectionAPIFixture(None))
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
# FIXME(danms): We shouldn't be overriding any of this, but need to
# for the moment because of the mocks in the base fixture that don't
# hit our registry subclass.
class FakeIndirectionHack(fixture.FakeIndirectionAPI):
def object_action(self, context, objinst, objmethod, args, kwargs):
objinst = self._ser.deserialize_entity(
context, self._ser.serialize_entity(
context, objinst))
objmethod = six.text_type(objmethod)
args = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, args))
kwargs = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, kwargs))
original = objinst.obj_clone()
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(objinst, objmethod)(*args, **kwargs)
updates = self._get_changes(original, objinst)
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
objname = six.text_type(objname)
objmethod = six.text_type(objmethod)
objver = six.text_type(objver)
args = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, args))
kwargs = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, kwargs))
cls = base.NovaObject.obj_class_from_name(objname, objver)
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(cls, objmethod)(context, *args, **kwargs)
manifest = ovo_base.obj_tree_get_versions(objname)
return (base.NovaObject.obj_from_primitive(
result.obj_to_primitive(target_version=objver,
version_manifest=manifest),
context=context)
if isinstance(result, base.NovaObject) else result)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objname = six.text_type(objname)
objmethod = six.text_type(objmethod)
object_versions = {six.text_type(o): six.text_type(v)
for o, v in object_versions.items()}
args, kwargs = self._canonicalize_args(context, args, kwargs)
objver = object_versions[objname]
cls = base.NovaObject.obj_class_from_name(objname, objver)
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(cls, objmethod)(context, *args, **kwargs)
return (base.NovaObject.obj_from_primitive(
result.obj_to_primitive(target_version=objver),
context=context)
if isinstance(result, base.NovaObject) else result)
class IndirectionFixture(fixtures.Fixture):
def setUp(self):
super(IndirectionFixture, self).setUp()
ser = base.NovaObjectSerializer()
self.indirection_api = FakeIndirectionHack(serializer=ser)
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.base.NovaObject.indirection_api',
self.indirection_api))
class _RemoteTest(_BaseTestCase):
def setUp(self):
super(_RemoteTest, self).setUp()
self.useFixture(IndirectionFixture())
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(ovo_exc.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
@base.NovaObjectRegistry.register_if(False)
class Foo(base.NovaObject):
fields = {'foobar': fields.IntegerField()}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(ovo_exc.OrphanedObjectError,
obj._update_test)
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
def test_changed_with_sub_object(self):
@base.NovaObjectRegistry.register_if(False)
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': utils.isotime(dt),
'updated_at': utils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
actual = obj.obj_to_primitive()
self.assertJsonEqual(actual, expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object',
'rel_objects', 'mutable_default'] +
list(base_fields))
myobj3_fields = ['new_field']
self.assertTrue(issubclass(SubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(SubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(SubclassedObject.fields.keys()))
def test_obj_alternate_context(self):
obj = MyObj(context=self.context)
with obj.obj_alternate_context(mock.sentinel.alt_ctx):
self.assertEqual(mock.sentinel.alt_ctx,
obj._context)
self.assertEqual(self.context, obj._context)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_mutable_default(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.mutable_default = None
obj.mutable_default.append('s1')
self.assertEqual(obj.mutable_default, ['s1'])
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.mutable_default = None
obj1.mutable_default.append('s2')
self.assertEqual(obj1.mutable_default, ['s2'])
def test_obj_mutable_default_set_default(self):
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.obj_set_defaults('mutable_default')
self.assertEqual(obj1.mutable_default, [])
obj1.mutable_default.append('s1')
self.assertEqual(obj1.mutable_default, ['s1'])
obj2 = MyObj(context=self.context, foo=123, bar='abc')
obj2.obj_set_defaults('mutable_default')
self.assertEqual(obj2.mutable_default, [])
obj2.mutable_default.append('s2')
self.assertEqual(obj2.mutable_default, ['s2'])
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,'
'mutable_default=<?>,readonly=<?>,rel_object=<?>,'
'rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
subobj.VERSION = '1.2'
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
orig_primitive = obj.obj_to_primitive()['nova_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.8', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.7', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.2')
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.5', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', primitive)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_doesnt_skip_falsey_sub_objects(self):
@base.NovaObjectRegistry.register_if(False)
class MyList(base.ObjectListBase, base.NovaObject):
VERSION = '1.2'
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
obj_relationships = {
'objects': [('1.1', '1.1'), ('1.2', '1.2')],
}
mylist = MyList(objects=[])
@base.NovaObjectRegistry.register_if(False)
class MyOwner(base.NovaObject):
VERSION = '1.2'
fields = {'mylist': fields.ObjectField('MyList')}
obj_relationships = {
'mylist': [('1.1', '1.1')],
}
myowner = MyOwner(mylist=mylist)
primitive = myowner.obj_to_primitive('1.1')
self.assertIn('mylist', primitive['nova_object.data'])
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
def test_delattr(self):
obj = MyObj(bar='foo')
del obj.bar
# Should appear unset now
self.assertFalse(obj.obj_attr_is_set('bar'))
# Make sure post-delete, references trigger lazy loads
self.assertEqual('loaded!', getattr(obj, 'bar'))
def test_delattr_unset(self):
obj = MyObj()
self.assertRaises(AttributeError, delattr, obj, 'bar')
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(ovo_exc.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo', 'mutable_default']),
obj.obj_what_changed())
self.assertEqual(1, obj.foo)
def test_set_defaults_not_overwrite(self):
# NOTE(danms): deleted defaults to False, so verify that it does
# not get reset by obj_set_defaults()
obj = MyObj(deleted=True)
obj.obj_set_defaults()
self.assertEqual(1, obj.foo)
self.assertTrue(obj.deleted)
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_serialize_set_to_list(self):
ser = base.NovaObjectSerializer()
self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2])))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport_versions.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
base.NovaObjectRegistry.register(MyTestObj)
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport_versions.called)
else:
self.assertEqual('backported', result)
versions = ovo_base.obj_tree_get_versions('MyTestObj')
ser._conductor.object_backport_versions.assert_called_with(
self.context, primitive, versions)
def test_deserialize_entity_newer_version_backports(self):
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1')
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.NovaObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
@mock.patch('oslo_versionedobjects.base.obj_tree_get_versions')
def test_object_tree_backport(self, mock_get_versions):
# Test the full client backport path all the way from the serializer
# to the conductor and back.
self.start_service('conductor',
manager='nova.conductor.manager.ConductorManager')
# NOTE(danms): Actually register a complex set of objects,
# two versions of the same parent object which contain a
# child sub object.
@base.NovaObjectRegistry.register
class Child(base.NovaObject):
VERSION = '1.10'
@base.NovaObjectRegistry.register
class Parent(base.NovaObject):
VERSION = '1.0'
fields = {
'child': fields.ObjectField('Child'),
}
@base.NovaObjectRegistry.register # noqa
class Parent(base.NovaObject):
VERSION = '1.1'
fields = {
'child': fields.ObjectField('Child'),
}
# NOTE(danms): Since we're on the same node as conductor,
# return a fake version manifest so that we confirm that it
# actually honors what the client asked for and not just what
# it sees in the local machine state.
mock_get_versions.return_value = {
'Parent': '1.0',
'Child': '1.5',
}
call_context = {}
real_ofp = base.NovaObject.obj_from_primitive
def fake_obj_from_primitive(*a, **k):
# NOTE(danms): We need the first call to this to report an
# incompatible object version, but subsequent calls must
# succeed. Since we're testing the backport path all the
# way through conductor and RPC, we can't fully break this
# method, we just need it to fail once to trigger the
# backport.
if 'run' in call_context:
return real_ofp(*a, **k)
else:
call_context['run'] = True
raise ovo_exc.IncompatibleObjectVersion('foo')
child = Child()
parent = Parent(child=child)
prim = parent.obj_to_primitive()
ser = base.NovaObjectSerializer()
with mock.patch('nova.objects.base.NovaObject.'
'obj_from_primitive') as mock_ofp:
mock_ofp.side_effect = fake_obj_from_primitive
result = ser.deserialize_entity(self.context, prim)
# Our newest version (and what we passed back) of Parent
# is 1.1, make sure that the manifest version is honored
self.assertEqual('1.0', result.VERSION)
# Our newest version (and what we passed back) of Child
# is 1.10, make sure that the manifest version is honored
self.assertEqual('1.5', result.child.VERSION)
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in six.itervalues(primitive):
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in six.itervalues(thing2):
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
class TestArgsSerializer(test.NoDBTestCase):
def setUp(self):
super(TestArgsSerializer, self).setUp()
self.now = timeutils.utcnow()
self.str_now = utils.strtime(self.now)
self.unicode_str = u'\xF0\x9F\x92\xA9'
@base.serialize_args
def _test_serialize_args(self, *args, **kwargs):
expected_args = ('untouched', self.str_now, self.str_now)
for index, val in enumerate(args):
self.assertEqual(expected_args[index], val)
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
'c': self.str_now, 'exc_val': self.unicode_str}
for key, val in kwargs.items():
self.assertEqual(expected_kwargs[key], val)
def test_serialize_args(self):
self._test_serialize_args('untouched', self.now, self.now,
a='untouched', b=self.now, c=self.now,
exc_val=self.unicode_str)
class TestRegistry(test.NoDBTestCase):
@mock.patch('nova.objects.base.objects')
def test_hook_chooses_newer_properly(self, mock_objects):
reg = base.NovaObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyNewerObj(object):
VERSION = '1.123'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyNewerObj, 0)
self.assertEqual(MyNewerObj, mock_objects.MyObj)
@mock.patch('nova.objects.base.objects')
def test_hook_keeps_newer_properly(self, mock_objects):
reg = base.NovaObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyOlderObj(object):
VERSION = '1.1'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyOlderObj, 0)
self.assertEqual(MyObj, mock_objects.MyObj)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba',
'AgentList': '1.0-5a7380d02c3aaf2a32fc8115ae7ca98c',
'Aggregate': '1.3-f315cb68906307ca2d1cca84d4753585',
'AggregateList': '1.3-3ea55a050354e72ef3306adefa553957',
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
'BandwidthUsageList': '1.2-5fe7475ada6fe62413cbfcc06ec70746',
'BlockDeviceMapping': '1.20-45a6ad666ddf14bbbedece2293af77e2',
'BlockDeviceMappingList': '1.17-1e568eecb91d06d4112db9fd656de235',
'BuildRequest': '1.3-077dee42bed93f8a5b62be77657b7152',
'BuildRequestList': '1.0-cd95608eccb89fbc702c8b52f38ec738',
'CellMapping': '1.1-5d652928000a5bc369d79d5bde7e497d',
'CellMappingList': '1.1-496ef79bb2ab41041fff8bcb57996352',
'ComputeNode': '1.19-af6bd29a6c3b225da436a0d8487096f2',
'ComputeNodeList': '1.17-52f3b0962b1c86b98590144463ebb192',
'ConsoleAuthToken': '1.1-8da320fb065080eb4d3c2e5c59f8bf52',
'CpuDiagnostics': '1.0-d256f2e442d1b837735fd17dfe8e3d47',
'Destination': '1.4-3b440d29459e2c98987ad5b25ad1cb2c',
'DeviceBus': '1.0-77509ea1ea0dd750d5864b9bd87d3f9d',
'DeviceMetadata': '1.0-04eb8fd218a49cbc3b1e54b774d179f7',
'Diagnostics': '1.0-38ad3e9b1a59306253fc03f97936db95',
'DiskDiagnostics': '1.0-dfd0892b5924af1a585f3fed8c9899ca',
'DiskMetadata': '1.0-e7a0f1ccccf10d26a76b28e7492f3788',
'EC2Ids': '1.0-474ee1094c7ec16f8ce657595d8c49d9',
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'FixedIP': '1.14-53e1c10b539f1a82fe83b1af4720efae',
'FixedIPList': '1.15-07b6261cef836cb09d2d8673f68ece15',
'Flavor': '1.2-4ce99b41327bb230262e5a8f45ff0ce3',
'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c',
'FloatingIP': '1.10-52a67d52d85eb8b3f324a5b7935a335b',
'FloatingIPList': '1.12-e4debd21fddb12cf40d36f737225fa9d',
'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HostMappingList': '1.1-18ac2bfb8c1eb5545bed856da58a79bc',
'HyperVLiveMigrateData': '1.4-e265780e6acfa631476c8170e8d6fce0',
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
'ImageMetaProps': '1.25-66fc973af215eb5701ed4034bb6f0685',
'Instance': '2.7-d187aec68cad2e4d8b8a03a68e4739ce',
'InstanceAction': '1.2-9a5abc87fdd3af46f45731960651efb5',
'InstanceActionEvent': '1.3-c749e1b3589e7117c81cb2aa6ac438d5',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.1-a2b2fb6006b47c27076d3a1d48baa759',
'InstanceDeviceMetadata': '1.0-74d78dd36aa32d26d2769a1b57caf186',
'InstanceExternalEvent': '1.3-e47782874cca95bb96e566286e9d1e23',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
'InstanceGroup': '1.11-852ac511d30913ee88f3c3a869a8f30a',
'InstanceGroupList': '1.8-90f8f1a445552bb3bbc9fa1ae7da27d4',
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
'InstanceList': '2.6-238f125650c25d6d12722340d726f723',
'InstanceMapping': '1.2-3bd375e65c8eb9c45498d2f87b882e03',
'InstanceMappingList': '1.3-d34b6ebb076d542ae0f8b440534118da',
'InstanceNUMACell': '1.4-b68e13eacba363ae8f196abf0ffffb5b',
'InstanceNUMATopology': '1.3-ec0030cb0402a49c96da7051c037082a',
'InstancePCIRequest': '1.3-f6d324f1c337fad4f34892ed5f484c9a',
'InstancePCIRequests': '1.1-65e38083177726d806684cb1cc0136d2',
'KeyPair': '1.4-1244e8d1b103cc69d038ed78ab3a8cc6',
'KeyPairList': '1.3-94aad3ac5c938eef4b5e83da0212f506',
'LibvirtLiveMigrateBDMInfo': '1.1-5f4a68873560b6f834b74e7861d71aaf',
'LibvirtLiveMigrateData': '1.10-348cf70ea44d3b985f45f64725d6f6a7',
'LibvirtLiveMigrateNUMAInfo': '1.0-0e777677f3459d0ed1634eabbdb6c22f',
'MemoryDiagnostics': '1.0-2c995ae0f2223bb0f8e523c5cc0b83da',
'Migration': '1.7-b77066a88d08bdb0b05d7bc18780c55a',
'MigrationContext': '1.2-89f10a83999f852a489962ae37d8a026',
'MigrationList': '1.4-983a9c29d4f1e747ce719dc9063b729b',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NUMACell': '1.4-7695303e820fa855d76954be2eb2680e',
'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'Network': '1.2-a977ab383aa462a479b2fae8211a5dde',
'NetworkInterfaceMetadata': '1.2-6f3d480b40fe339067b1c0dd4d656716',
'NetworkList': '1.2-69eca910d8fa035dfecd8ba10877ee59',
'NetworkMetadata': '1.0-2cb8d21b34f87b0261d3e1d1ae5cf218',
'NetworkRequest': '1.2-af1ff2d986999fbb79377712794d82aa',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NicDiagnostics': '1.0-895e9ad50e0f56d5258585e3e066aea5',
'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
'PciDevice': '1.6-25ca0542a22bc25386a72c0065a79c01',
'PciDeviceList': '1.3-52ff14355491c8c580bdc0ba34c26210',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'PowerVMLiveMigrateData': '1.4-a745f4eda16b45e1bc5686a0c498f27e',
'Quotas': '1.3-3b2b91371f60e788035778fc5f87797d',
'QuotasNoOp': '1.3-d1593cf969c81846bc8192255ea95cce',
'RequestGroup': '1.3-0458d350a8ec9d0673f9be5640a990ce',
'RequestLevelParams': '1.0-1e5c8c18bd44cd233c8b32509c99d06f',
'RequestSpec': '1.13-e1aa38b2bf3f8547474ee9e4c0aa2745',
'Resource': '1.0-d8a2abbb380da583b995fd118f6a8953',
'ResourceList': '1.0-4a53826625cc280e15fae64a575e0879',
'ResourceMetadata': '1.0-77509ea1ea0dd750d5864b9bd87d3f9d',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641',
'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0',
'SecurityGroup': '1.2-86d67d8d3ab0c971e1dc86e02f9524a8',
'SecurityGroupList': '1.1-c655ed13298e630f4d398152f7d08d71',
'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29',
'SecurityGroupRuleList': '1.2-0005c47fcd0fb78dd6d7fd32a1409f5b',
'Selection': '1.1-548e3c2f04da2a61ceaf9c4e1589f264',
'Service': '1.22-8a740459ab9bf258a19c8fcb875c2d9a',
'ServiceList': '1.19-5325bce13eebcbf22edc9678285270cc',
'Tag': '1.1-8b8d7d5b48887651a0e01241672e2963',
'TagList': '1.1-55231bdb671ecf7641d6a2e9109b5d8e',
'TaskLog': '1.0-78b0534366f29aa3eebb01860fbe18fe',
'TaskLogList': '1.0-cc8cce1af8a283b9d28b55fcd682e777',
'TrustedCerts': '1.0-dcf528851e0f868c77ee47e90563cda7',
'USBDeviceBus': '1.0-e4c7dd6032e46cd74b027df5eb2d4750',
'VIFMigrateData': '1.0-cb15282b25a039ab35046ed705eb931d',
'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VirtCPUFeature': '1.0-ea2464bdd09084bd388e5f61d5d4fc86',
'VirtCPUModel': '1.0-5e1864af9227f698326203d7249796b5',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.3-efd3ca8ebcc5ce65fff5a25f31754c54',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
'XenDeviceBus': '1.0-272a4f899b24e31e42b2b9a7ed7e9194',
'XenapiLiveMigrateData': '1.4-7dc9417e921b2953faa6751f18785f3f',
# TODO(efried): re-alphabetize this
'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
}
def get_nova_objects():
"""Get Nova versioned objects
This returns a dict of versioned objects which are
in the Nova project namespace only. ie excludes
objects from os-vif and other 3rd party modules
:return: a dict mapping class names to lists of versioned objects
"""
all_classes = base.NovaObjectRegistry.obj_classes()
nova_classes = {}
for name in all_classes:
objclasses = all_classes[name]
# NOTE(danms): All object registries that inherit from the
# base VersionedObjectRegistry share a common list of classes.
# That means even things like os_vif objects will be in our
# registry, and for any of them that share the same name
# (i.e. Network), we need to keep ours and exclude theirs.
our_ns = [cls for cls in objclasses
if (cls.OBJ_PROJECT_NAMESPACE ==
base.NovaObject.OBJ_PROJECT_NAMESPACE)]
if our_ns:
nova_classes[name] = our_ns
return nova_classes
class TestObjectVersions(test.NoDBTestCase):
def test_versions(self):
checker = fixture.ObjectVersionChecker(
get_nova_objects())
fingerprints = checker.get_hashes()
if os.getenv('GENERATE_HASHES'):
open('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
expected, actual = checker.test_hashes(object_data)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes here.')
def test_obj_make_compatible(self):
# NOTE(danms): This is normally not registered because it is just a
# base class. However, the test fixture below requires it to be
# in the registry so that it can verify backports based on its
# children. So, register it here, which will be reverted after the
# cleanUp for this (and all) tests is run.
base.NovaObjectRegistry.register(virt_device_metadata.DeviceBus)
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
# Hold a dictionary of args/kwargs that need to get passed into
# __init__() for specific classes. The key in the dictionary is
# the obj_class that needs the init args/kwargs.
init_args = {}
init_kwargs = {}
checker = fixture.ObjectVersionChecker(
base.NovaObjectRegistry.obj_classes())
checker.test_compatibility_routines(use_manifest=True,
init_args=init_args,
init_kwargs=init_kwargs)
def test_list_obj_make_compatible(self):
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
VERSION = '1.4'
fields = {'foo': fields.IntegerField()}
@base.NovaObjectRegistry.register_if(False)
class TestListObj(base.ObjectListBase, base.NovaObject):
VERSION = '1.5'
fields = {'objects': fields.ListOfObjectsField('TestObj')}
obj_relationships = {
'objects': [('1.0', '1.1'), ('1.1', '1.2'),
('1.3', '1.3'), ('1.5', '1.4')]
}
my_list = TestListObj()
my_obj = TestObj(foo=1)
my_list.objects = [my_obj]
primitive = my_list.obj_to_primitive(target_version='1.5')
primitive_data = primitive['nova_object.data']
obj_primitive = my_obj.obj_to_primitive(target_version='1.4')
obj_primitive_data = obj_primitive['nova_object.data']
with mock.patch.object(TestObj, 'obj_make_compatible') as comp:
my_list.obj_make_compatible(primitive_data, '1.1')
comp.assert_called_with(obj_primitive_data,
'1.2')
def test_list_obj_make_compatible_when_no_objects(self):
# Test to make sure obj_make_compatible works with no 'objects'
# If a List object ever has a version that did not contain the
# 'objects' key, we need to make sure converting back to that version
# doesn't cause backporting problems.
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
VERSION = '1.1'
fields = {'foo': fields.IntegerField()}
@base.NovaObjectRegistry.register_if(False)
class TestListObj(base.ObjectListBase, base.NovaObject):
VERSION = '1.1'
fields = {'objects': fields.ListOfObjectsField('TestObj')}
# pretend that version 1.0 didn't have 'objects'
obj_relationships = {
'objects': [('1.1', '1.1')]
}
my_list = TestListObj()
my_list.objects = [TestObj(foo=1)]
primitive = my_list.obj_to_primitive(target_version='1.1')
primitive_data = primitive['nova_object.data']
my_list.obj_make_compatible(primitive_data,
target_version='1.0')
self.assertNotIn('objects', primitive_data,
"List was backported to before 'objects' existed."
" 'objects' should not be in the primitive.")
class TestObjEqualPrims(_BaseTestCase):
def test_object_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='goodbye')
obj2.obj_reset_changes()
obj2.bar = 'goodbye'
# obj2 will be marked with field 'three' updated
self.assertTrue(base.obj_equal_prims(obj1, obj2),
"Objects that differ only because one a is marked "
"as updated should be equal")
def test_object_not_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertFalse(base.obj_equal_prims(obj1, obj2),
"Objects that differ in any field "
"should not be equal")
def test_object_ignore_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertTrue(base.obj_equal_prims(obj1, obj2, ['bar']),
"Objects that only differ in an ignored field "
"should be equal")
class TestObjMethodOverrides(test.NoDBTestCase):
def test_obj_reset_changes(self):
args = utils.getargspec(base.NovaObject.obj_reset_changes)
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
self.assertEqual(args,
utils.getargspec(obj_class.obj_reset_changes))
class TestObjectsDefaultingOnInit(test.NoDBTestCase):
def test_init_behavior_policy(self):
all_objects = get_nova_objects()
violations = collections.defaultdict(list)
# NOTE(danms): Do not add things to this list!
#
# There is one known exception to this init policy, and that
# is the Service object because of the special behavior of the
# version field. We *want* to counteract the usual non-clobber
# behavior of that field specifically. See the comments in
# Service.__init__ for more details. This will likely never
# apply to any other non-ephemeral object, so this list should
# never grow.
exceptions = [objects.Service]
for name, objclasses in all_objects.items():
for objcls in objclasses:
if objcls in exceptions:
continue
key = '%s-%s' % (name, objcls.VERSION)
obj = objcls()
if isinstance(obj, base.NovaEphemeralObject):
# Skip ephemeral objects, which are allowed to
# set fields at init time
continue
for field in objcls.fields:
if field in obj:
violations[key].append(field)
self.assertEqual({}, violations,
'Some non-ephemeral objects set fields during '
'initialization; This is not allowed.')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_map_ops.map_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops as mo
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedMapOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
# The following test sets map over a RaggedTensor and apply a
# transformation that returns with shape:
# [d1, (d2)] -> [d1]
dict(
fn=mo.reduce_mean,
elems=[[1, 2, 3], [4, 5], [6, 7]],
elems_dtype=dtypes.int32,
expected_output=[2, 4, 6],
result_dtype=dtypes.int32,
),
dict(
fn=string_ops.reduce_join,
elems=[['foo', 'bar', 'baz'], ['a'], ['b', 'c']],
expected_output=[b'foobarbaz', b'a', b'bc'],
elems_dtype=dtypes.string,
result_dtype=dtypes.string,
),
# [d1, (d2)] -> [d1, 2]
dict(
fn=lambda x: array_ops.stack([mo.reduce_mean(x), mo.reduce_sum(x)]),
# fn=self.stack_mean_and_sum,
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[2, 6], [4.5, 9], [6.5, 13]],
elems_dtype=dtypes.float32,
result_dtype=dtypes.float32,
expected_ragged_rank=0,
),
# [d1, (d2)] -> [d1, (d2)]
dict(
fn=lambda x: x + np.int64(1),
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[2, 3, 4], [5, 6], [7, 8]],
elems_dtype=dtypes.int64,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), d3] -> [d1, (d2), d3]
dict(
fn=lambda x: x + np.int64(1),
elems=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
elems_ragged_rank=1,
expected_ragged_rank=1,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
expected_output=[[[2, 3], [4, 5]], [], [[6, 7], [8, 9], [10, 1]]],
),
# [d1, (d2)] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0]),
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[[1, 2, 3]], [[4, 5]], [[6, 7]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3)] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_functional_ops.map_flat_values(mo.add, x, 1),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[[2, 3, 4]], [[5, 6], [7, 8]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3)] -> [d1, (d2)]
dict(
fn=lambda x: ragged_math_ops.reduce_sum(x, axis=1),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[6], [9, 13]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), (d3)] -> [d1, (d3)]
dict(
fn=lambda x: ragged_math_ops.reduce_sum(x, axis=0),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[1, 2, 3], [10, 12]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), (d3)] -> [d1]
dict(
fn=ragged_math_ops.reduce_sum,
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[6, 22],
result_dtype=dtypes.int64,
),
# [d1] -> [d1, (d2)]
dict(
fn=mo.range,
elems=[4, 0, 2],
expected_output=[[0, 1, 2, 3], [], [0, 1]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_math_ops.range(mo.range(x)),
elems=[5, 0, 3],
expected_output=[[[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3]], [],
[[], [0], [0, 1]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3), (d4a), (d5)] -> [d1, (d2), (d3), (d4b), (d5)]
dict(
fn=lambda x: x + np.int64(1),
elems=[[[[[1, 2, 3]], [[4], [5]]]], [[[[6, 7]]], [[[8], []]]]],
expected_output=[[[[[2, 3, 4]], [[5], [6]]]], [[[[7, 8]]], [[[9],
[]]]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=4),
),
])
def testRaggedMap(
self,
fn,
elems,
expected_output,
expected_ragged_rank=None,
result_ragged_rank=None,
elems_ragged_rank=None,
elems_dtype=dtypes.int64,
result_dtype=None,
infer_shape=True,
):
elems = ragged_factory_ops.constant(elems, elems_dtype, elems_ragged_rank)
output = ragged_map_ops.map_fn(
fn=fn, elems=elems, dtype=result_dtype, infer_shape=infer_shape)
expected_rt = ragged_factory_ops.constant(
expected_output, ragged_rank=expected_ragged_rank)
self.assertAllEqual(expected_rt, output)
def testRaggedMapOnStructure(self):
batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]])
# [[10, 20, 30], [40], [50, 60, 70]]
robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10)
features = {'batman': batman, 'robin': robin}
def _reduce_sum_from_all(f):
return mo.reduce_sum(f['batman']) + mo.reduce_sum(f['robin'])
output = ragged_map_ops.map_fn(
fn=_reduce_sum_from_all,
elems=features,
dtype=dtypes.int32,
)
self.assertAllEqual(output, [66, 44, 198])
# Test mapping over a dict of RTs can produce a dict of RTs.
def testRaggedMapOnStructure_RaggedOutputs(self):
batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]])
# [[10, 20, 30], [40], [50, 60, 70]]
robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10)
features = {'batman': batman, 'robin': robin}
def _increment(f):
return {
'batman': f['batman'] + 1,
'robin': f['robin'] + 1,
}
output = ragged_map_ops.map_fn(
fn=_increment,
elems=features,
infer_shape=False,
dtype={
'batman':
ragged_tensor.RaggedTensorType(
dtype=dtypes.int32, ragged_rank=1),
'robin':
ragged_tensor.RaggedTensorType(
dtype=dtypes.int32, ragged_rank=1)
},
)
self.assertAllEqual(output['batman'], [[2, 3, 4], [5], [6, 7, 8]])
self.assertAllEqual(output['robin'], [[11, 21, 31], [41], [51, 61, 71]])
def testZip(self):
x = ragged_factory_ops.constant(
[[10, 20], [30, 40], [50, 60], [70], [80, 90, 100]], dtypes.int64)
y = array_ops.expand_dims(mo.range(x.nrows(out_type=dtypes.int64)), axis=1)
def _zip(foo):
y_val, x_val = foo
bar = array_ops.tile(y_val, array_ops.shape(x_val))
return array_ops.stack([bar, x_val], axis=1)
output = ragged_map_ops.map_fn(
_zip, (y, x),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1),
infer_shape=False)
self.assertAllEqual(
output, [[[0, 10], [0, 20]], [[1, 30], [1, 40]], [[2, 50], [2, 60]],
[[3, 70]], [[4, 80], [4, 90], [4, 100]]])
def testBatchGather(self):
tokens = ragged_factory_ops.constant([['hello', '.', 'there'], ['merhaba'],
['bonjour', '.', 'ca va', '?']])
indices = ragged_factory_ops.constant([[0, 2], [0], [0, 2]])
def gather(x):
tokens_val, indices_val = x
return array_ops.gather(tokens_val, indices_val)
data = tokens, indices
out = ragged_map_ops.map_fn(
gather,
data,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.string, ragged_rank=1),
infer_shape=False)
self.assertAllEqual(
out, [[b'hello', b'there'], [b'merhaba'], [b'bonjour', b'ca va']])
def testMismatchRaggedRank(self):
elems = ragged_factory_ops.constant([[[1, 2, 3]], [[4, 5], [6, 7]]])
fn = lambda x: ragged_math_ops.reduce_sum(x, axis=0)
with self.assertRaisesRegexp(
ValueError, r'(?s)Expected `fn` to return.*But it returned.*'):
_ = ragged_map_ops.map_fn(
fn,
elems,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=23))
def testMismatchRaggedRank2(self):
elems = ragged_factory_ops.constant([[1, 2, 3], [4, 5], [6, 7]])
fn = lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0])
with self.assertRaisesRegexp(
ValueError, r'(?s)Expected `fn` to return.*But it returned.*'):
_ = ragged_map_ops.map_fn(
fn,
elems,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=10))
def testMapOnSparseTensor(self):
s = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
values=[0, 5, 0, 4],
dense_shape=[2, 2],
)
t2 = ragged_tensor.RaggedTensor.from_sparse(s)
id_t2 = ragged_map_ops.map_fn(
lambda x: x, t2,
)
self.assertAllEqual(id_t2, [[0, 5], [0, 4]])
if __name__ == '__main__':
googletest.main()
|
|
#!/usr/bin/env python
# Copyright (c) 2012, Daniel Zerbino
# All rights reserved.
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3)The name of the author may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import os.path
import argparse
import cPickle as pickle
import random
import glob
import gzip
import copy
import cnavg.preprocess.vcf as vcf
import cnavg.preprocess.bambam as bambam
import cnavg.avg.balanced as balancedAVG
import cnavg.cactus.graph as cactus
import cnavg.cactusSampling.sampling as normalized
import cnavg.cactus.oriented as oriented
import cnavg.cactus.balanced as balancedCactus
import cnavg.historySampling.cycleCover as cycleCover
import cnavg.historySampling.sampleGraphCycles as sampleGraphCycles
import cnavg.history.flattened as flattened
import cnavg.history.ordered as ordered
import cnavg.history.debug as debug
from cnavg.history.ordered import prettify
def _parseOptions():
print "Parsing options"
parser = argparse.ArgumentParser(description="Process a VCF files to sample possible historical explanations")
parser.add_argument('--vcf', '-v', dest='vcffile', type=file, help='A VCF (ver >= 4.1) file')
parser.add_argument('--bambam', '-b', dest='bambam', nargs='*', help='BamBam files')
parser.add_argument('--snps', '-p', dest='snpsfiles', nargs='*', help='SNPs files (optional)')
parser.add_argument('--index', '-i', dest='index', type=int, help='ID of sampling run')
parser.add_argument('--breaks', '-k', dest='breaks', type=file, help='A BamBam breaks file')
parser.add_argument('--lengths', '-l', dest='chromLengths', type=file, help='Chromosome lengths')
parser.add_argument('--dir', '-d', dest='dir', help='Working directory')
parser.add_argument('--debug', '-g', dest='debug', action='store_true', help='Debug switch for whatever')
parser.add_argument('--continue', '-c', dest='cont', action='store_true', help='Continue sampling for 24 hours')
parser.add_argument('--integer', '-n', dest='integer', action='store_true', help='Integer switch for idealized integer histories')
parser.add_argument('--size', '-s', dest='size', type=int, default=100, help='Number of sampled histories')
parser.add_argument('--temp', '-t', dest='temp', type=float, default=1, help='Starting temperature of MCMC sampling')
return parser.parse_args()
def _parseGraph(options):
print "Parsing input files"
if options.bambam is not None and options.breaks is not None and options.chromLengths is not None:
options.bambam = sum(map(glob.glob, options.bambam), [])
assert len(options.bambam) > 0, options.bambam
breakends = bambam.parse(options.bambam, options.breaks, options.chromLengths, options.snpsfiles)
elif options.vcffile is not None and options.chromLengths is not None:
breakends = vcf.parse(options.vcffile, options.chromLengths)
else:
if options.vcffile is None:
print "No VCF"
else:
print "VCF: %s" % options.vcffile
if options.chromLengths is None:
print "No chromosome lengths"
else:
print "Chromosome lengths: %s" % options.chromLengths
if options.bambam is None:
print "No BamBam files"
else:
print "BamBam files: %s" % options.bambam
if options.breaks is None:
print "No BamBam break file"
else:
print "Breaks lengths: %s" % options.breaks
sys.exit("Not enough files")
breakends.validate()
return breakends.avg()
def main():
options = _parseOptions()
sampleGraphCycles.TEMPERATURE = options.temp
if options.dir is not None:
if not os.path.exists(options.dir):
os.mkdir(options.dir)
os.chdir(options.dir)
if options.index is None:
## Initial graph construction
G = _parseGraph(options)
B = balancedAVG.BalancedAVG(G)
C = cactus.Cactus(B)
pickle.dump(C, open('CACTUS', "wb"))
else:
H = None
if options.debug:
## Picking up from where we started
OC = pickle.load(open('CACTUS_%i' % options.index))
random.setstate(pickle.load(open("STATE_%i" % options.index)))
elif options.cont:
## Picking up from where we stopped
OC = pickle.load(open('CACTUS_%i' % options.index))
## Going through all the histories to the last in file
file= open('HISTORIES_%i' % (options.index))
while True:
try:
H = pickle.load(file)
except:
break
file.close()
else:
## Just moving from there
pickle.dump(random.getstate(), open("STATE_%i" % options.index, "wb"))
C = pickle.load(open('CACTUS'))
## Sampling possible cactus
NC = normalized.NormalizedCactus(C)
if debug.RATIO_TO_OFFSET:
BC = balancedCactus.BalancedCactus(NC)
else:
BC = NC
OC = oriented.OrientedCactus(BC)
## Saving sampled cactus
pickle.dump(OC, open('CACTUS_%i' % options.index, "wb"))
# Moving into historical space
if options.integer:
debug.INTEGER_HISTORY = True
if H is None:
H = cycleCover.initialHistory(OC)
FH = flattened.flattenGraph(H)
S = FH.simplifyStubsAndTrivials()
F = S.removeLowRatioEvents(debug.RATIO_CUTOFF)
O = ordered.OrderedHistory(F)
# Preparing file for progressive write
if options.cont:
stats_file = open("HISTORY_STATS_%li" % options.index, "a")
pickle_file = open('HISTORIES_%i' % options.index, "ab")
braney_file = gzip.open("HISTORIES_%i.braney" % options.index, "a")
else:
stats_file = open("HISTORY_STATS_%li" % options.index, "w")
pickle_file = open('HISTORIES_%i' % options.index, "wb")
braney_file = gzip.open("HISTORIES_%i.braney" % options.index, "w")
stats_file.write("%s\n" % H.stats())
#pickle.dump(H, pickle_file)
braney_file.write("%s\n" % O.braneyText(0, H.rearrangementCost()))
#tree_file = open("HISTORY_TREES_%li" % options.index, "w")
#tree_file.write("%s\n" % O.newick())
tree_file = None
# Sampling
for i in range(options.size):
H2 = copy.copy(H)
# Shuffle events
for event in list(H.parent):
H2.correctSchedulingError(event)
H = H2
stats_file.write("%s\n" % H.stats())
FH = flattened.flattenGraph(H)
S = FH.simplifyStubsAndTrivials()
F = S.removeLowRatioEvents(debug.RATIO_CUTOFF)
O = ordered.OrderedHistory(F)
braney_file.write("%s\n" % O.braneyText(i+1, H.rearrangementCost()))
# Cleaning up
stats_file.close()
pickle_file.close()
braney_file.close()
#tree_file.close()
## Removing temp file
if os.path.exists("STATE_%i" % options.index):
os.remove("STATE_%i" % options.index)
if __name__ == "__main__":
main()
|
|
import json
import re
import click
import cligj
import mapbox
from mapboxcli.errors import MapboxCLIException
def waypoint_snapping_callback(ctx, param, value):
results = []
tuple_pattern = re.compile("[,]")
int_pattern = re.compile("[0-9]")
# value is an n-tuple, each element of
# which contains input from the user.
#
# Iterate over each element, determining
# whether to convert it to a tuple,
# convert it to an int, or leave it as
# a str.
#
# Append each element to results, which
# the Directions SDK will attempt to
# validate.
if len(value) == 0:
return None
for element in value:
# If the element contains a comma, then assume
# that the user intended to pass in a tuple.
#
# Convert each item in the element to an int,
# and create a tuple containing all items.
#
# Raise an error if the item is not a valid int.
#
# (The SDK accepts a three-tuple with ints for
# radius, angle, and range.)
if re.search(tuple_pattern, element):
element = re.split(tuple_pattern, element)
for index in range(0, len(element)):
try:
element[index] = int(element[index])
except ValueError as exc:
raise mapbox.errors.ValidationError(str(exc))
element = tuple(element)
results.append(element)
# If the element contains a decimal number but not
# a comma, then assume that the user intended to
# pass in an int.
#
# Convert the element to an int.
#
# Raise an error if the item is not a valid int.
#
# (The Directions SDK accepts an int for radius.)
elif re.search(int_pattern, element):
try:
element = int(element)
except ValueError as exc:
raise mapbox.errors.ValidationError(str(exc))
results.append(element)
# If the element contains neither a decimal number
# nor a comma, then assume that the user intended
# to pass in a str.
#
# Do nothing since the element is already a str.
#
# (The Directions SDK accepts a str for unlimited radius.)
else:
results.append(element)
return results
@click.command(short_help="Routing between waypoints")
@cligj.features_in_arg
@click.option(
"--profile",
type=click.Choice(mapbox.Directions.valid_profiles),
default="mapbox/driving",
help="Routing profile"
)
@click.option(
"--alternatives/--no-alternatives",
default=True,
help="Whether to try to return alternative routes"
)
@click.option(
"--geometries",
type=click.Choice(mapbox.Directions.valid_geom_encoding),
default="geojson",
help="Format of returned geometry"
)
# Directions.valid_geom_overview contains two
# elements of type str and one element of type bool.
# This causes the Directions CLI's --help option to
# raise a TypeError. To prevent this, we convert
# the bool to a str.
@click.option(
"--overview",
type=click.Choice(str(item) for item in mapbox.Directions.valid_geom_overview),
help="Type of returned overview geometry"
)
@click.option(
"--steps/--no-steps",
default=True,
help="Whether to return steps and turn-by-turn instructions"
)
@click.option(
"--continue-straight/--no-continue-straight",
default=True,
help="Whether to see the allowed direction of travel when departing the original waypoint"
)
@click.option(
"--waypoint-snapping",
multiple=True,
callback=waypoint_snapping_callback,
help="Controls waypoint snapping"
)
@click.option(
"--annotations",
help="Additional metadata along the route"
)
@click.option(
"--language",
help="Language of returned turn-by-turn instructions"
)
@click.option(
"-o",
"--output",
default="-",
help="Save output to a file"
)
@click.pass_context
def directions(ctx, features, profile, alternatives,
geometries, overview, steps, continue_straight,
waypoint_snapping, annotations, language, output):
"""The Mapbox Directions API will show you how to get
where you're going.
mapbox directions "[0, 0]" "[1, 1]"
An access token is required. See "mapbox --help".
"""
access_token = (ctx.obj and ctx.obj.get("access_token")) or None
service = mapbox.Directions(access_token=access_token)
# The Directions SDK expects False to be
# a bool, not a str.
if overview == "False":
overview = False
# When using waypoint snapping, the
# Directions SDK expects features to be
# a list, not a generator.
if waypoint_snapping is not None:
features = list(features)
if annotations:
annotations = annotations.split(",")
stdout = click.open_file(output, "w")
try:
res = service.directions(
features,
profile=profile,
alternatives=alternatives,
geometries=geometries,
overview=overview,
steps=steps,
continue_straight=continue_straight,
waypoint_snapping=waypoint_snapping,
annotations=annotations,
language=language
)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
if geometries == "geojson":
click.echo(json.dumps(res.geojson()), file=stdout)
else:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
|
|
from sympy.assumptions.ask import Q
from sympy.core.numbers import oo
from sympy.core.relational import Equality
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, symbols)
from sympy.sets.sets import (EmptySet, Interval, Union)
from sympy.simplify.simplify import simplify
from sympy.logic.boolalg import (
And, Boolean, Equivalent, ITE, Implies, Nand, Nor, Not, Or,
POSform, SOPform, Xor, conjuncts, disjuncts,
distribute_or_over_and, distribute_and_over_or,
eliminate_implications, is_nnf, is_cnf, is_dnf, simplify_logic,
to_nnf, to_cnf, to_dnf, to_int_repr, bool_map, true, false,
BooleanAtom, is_literal, term_to_integer, integer_to_term,
truth_table)
from sympy.utilities.pytest import raises, XFAIL
from sympy.utilities import cartes
A, B, C, D= symbols('A,B,C,D')
def test_overloading():
"""Test that |, & are overloaded as expected"""
assert A & B == And(A, B)
assert A | B == Or(A, B)
assert (A & B) | C == Or(And(A, B), C)
assert A >> B == Implies(A, B)
assert A << B == Implies(B, A)
assert ~A == Not(A)
assert A ^ B == Xor(A, B)
def test_And():
assert And() is true
assert And(A) == A
assert And(True) is true
assert And(False) is false
assert And(True, True ) is true
assert And(True, False) is false
assert And(False, False) is false
assert And(True, A) == A
assert And(False, A) is false
assert And(True, True, True) is true
assert And(True, True, A) == A
assert And(True, False, A) is false
assert And(2, A) == A
assert And(2, 3) is true
assert And(A < 1, A >= 1) is false
e = A > 1
assert And(e, e.canonical) == e.canonical
g, l, ge, le = A > B, B < A, A >= B, B <= A
assert And(g, l, ge, le) == And(l, le)
def test_Or():
assert Or() is false
assert Or(A) == A
assert Or(True) is true
assert Or(False) is false
assert Or(True, True ) is true
assert Or(True, False) is true
assert Or(False, False) is false
assert Or(True, A) is true
assert Or(False, A) == A
assert Or(True, False, False) is true
assert Or(True, False, A) is true
assert Or(False, False, A) == A
assert Or(2, A) is true
assert Or(A < 1, A >= 1) is true
e = A > 1
assert Or(e, e.canonical) == e
g, l, ge, le = A > B, B < A, A >= B, B <= A
assert Or(g, l, ge, le) == Or(g, ge)
def test_Xor():
assert Xor() is false
assert Xor(A) == A
assert Xor(A, A) is false
assert Xor(True, A, A) is true
assert Xor(A, A, A, A, A) == A
assert Xor(True, False, False, A, B) == ~Xor(A, B)
assert Xor(True) is true
assert Xor(False) is false
assert Xor(True, True ) is false
assert Xor(True, False) is true
assert Xor(False, False) is false
assert Xor(True, A) == ~A
assert Xor(False, A) == A
assert Xor(True, False, False) is true
assert Xor(True, False, A) == ~A
assert Xor(False, False, A) == A
assert isinstance(Xor(A, B), Xor)
assert Xor(A, B, Xor(C, D)) == Xor(A, B, C, D)
assert Xor(A, B, Xor(B, C)) == Xor(A, C)
assert Xor(A < 1, A >= 1, B) == Xor(0, 1, B) == Xor(1, 0, B)
e = A > 1
assert Xor(e, e.canonical) == Xor(0, 0) == Xor(1, 1)
def test_Not():
raises(TypeError, lambda: Not(True, False))
assert Not(True) is false
assert Not(False) is true
assert Not(0) is true
assert Not(1) is false
assert Not(2) is false
def test_Nand():
assert Nand() is false
assert Nand(A) == ~A
assert Nand(True) is false
assert Nand(False) is true
assert Nand(True, True ) is false
assert Nand(True, False) is true
assert Nand(False, False) is true
assert Nand(True, A) == ~A
assert Nand(False, A) is true
assert Nand(True, True, True) is false
assert Nand(True, True, A) == ~A
assert Nand(True, False, A) is true
def test_Nor():
assert Nor() is true
assert Nor(A) == ~A
assert Nor(True) is false
assert Nor(False) is true
assert Nor(True, True ) is false
assert Nor(True, False) is false
assert Nor(False, False) is true
assert Nor(True, A) is false
assert Nor(False, A) == ~A
assert Nor(True, True, True) is false
assert Nor(True, True, A) is false
assert Nor(True, False, A) is false
def test_Implies():
raises(ValueError, lambda: Implies(A, B, C))
assert Implies(True, True) is true
assert Implies(True, False) is false
assert Implies(False, True) is true
assert Implies(False, False) is true
assert Implies(0, A) is true
assert Implies(1, 1) is true
assert Implies(1, 0) is false
assert A >> B == B << A
assert (A < 1) >> (A >= 1) == (A >= 1)
assert (A < 1) >> (S(1) > A) is true
assert A >> A is true
def test_Equivalent():
assert Equivalent(A, B) == Equivalent(B, A) == Equivalent(A, B, A)
assert Equivalent() is true
assert Equivalent(A, A) == Equivalent(A) is true
assert Equivalent(True, True) == Equivalent(False, False) is true
assert Equivalent(True, False) == Equivalent(False, True) is false
assert Equivalent(A, True) == A
assert Equivalent(A, False) == Not(A)
assert Equivalent(A, B, True) == A & B
assert Equivalent(A, B, False) == ~A & ~B
assert Equivalent(1, A) == A
assert Equivalent(0, A) == Not(A)
assert Equivalent(A, Equivalent(B, C)) != Equivalent(Equivalent(A, B), C)
assert Equivalent(A < 1, A >= 1) is false
assert Equivalent(A < 1, A >= 1, 0) is false
assert Equivalent(A < 1, A >= 1, 1) is false
assert Equivalent(A < 1, S(1) > A) == Equivalent(1, 1) == Equivalent(0, 0)
assert Equivalent(Equality(A, B), Equality(B, A)) is true
def test_equals():
assert Not(Or(A, B)).equals( And(Not(A), Not(B)) ) is True
assert Equivalent(A, B).equals((A >> B) & (B >> A)) is True
assert ((A | ~B) & (~A | B)).equals((~A & ~B) | (A & B)) is True
assert (A >> B).equals(~A >> ~B) is False
assert (A >> (B >> A)).equals(A >> (C >> A)) is False
raises(NotImplementedError, lambda: And(A, A < B).equals(And(A, B > A)))
def test_simplification():
"""
Test working of simplification methods.
"""
set1 = [[0, 0, 1], [0, 1, 1], [1, 0, 0], [1, 1, 0]]
set2 = [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1]]
from sympy.abc import w, x, y, z
assert SOPform([x, y, z], set1) == Or(And(Not(x), z), And(Not(z), x))
assert Not(SOPform([x, y, z], set2)) == Not(Or(And(Not(x), Not(z)), And(x, z)))
assert POSform([x, y, z], set1 + set2) is true
assert SOPform([x, y, z], set1 + set2) is true
assert SOPform([Dummy(), Dummy(), Dummy()], set1 + set2) is true
minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1],
[1, 1, 1, 1]]
dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
assert (
SOPform([w, x, y, z], minterms, dontcares) ==
Or(And(Not(w), z), And(y, z)))
assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z)
# test simplification
ans = And(A, Or(B, C))
assert simplify_logic(A & (B | C)) == ans
assert simplify_logic((A & B) | (A & C)) == ans
assert simplify_logic(Implies(A, B)) == Or(Not(A), B)
assert simplify_logic(Equivalent(A, B)) == \
Or(And(A, B), And(Not(A), Not(B)))
assert simplify_logic(And(Equality(A, 2), C)) == And(Equality(A, 2), C)
assert simplify_logic(And(Equality(A, 2), A)) == And(Equality(A, 2), A)
assert simplify_logic(And(Equality(A, B), C)) == And(Equality(A, B), C)
assert simplify_logic(Or(And(Equality(A, 3), B), And(Equality(A, 3), C))) \
== And(Equality(A, 3), Or(B, C))
e = And(A, x**2 - x)
assert simplify_logic(e) == And(A, x*(x - 1))
assert simplify_logic(e, deep=False) == e
# check input
ans = SOPform([x, y], [[1, 0]])
assert SOPform([x, y], [[1, 0]]) == ans
assert POSform([x, y], [[1, 0]]) == ans
raises(ValueError, lambda: SOPform([x], [[1]], [[1]]))
assert SOPform([x], [[1]], [[0]]) is true
assert SOPform([x], [[0]], [[1]]) is true
assert SOPform([x], [], []) is false
raises(ValueError, lambda: POSform([x], [[1]], [[1]]))
assert POSform([x], [[1]], [[0]]) is true
assert POSform([x], [[0]], [[1]]) is true
assert POSform([x], [], []) is false
# check working of simplify
assert simplify((A & B) | (A & C)) == And(A, Or(B, C))
assert simplify(And(x, Not(x))) == False
assert simplify(Or(x, Not(x))) == True
def test_bool_map():
"""
Test working of bool_map function.
"""
minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1],
[1, 1, 1, 1]]
from sympy.abc import a, b, c, w, x, y, z
assert bool_map(Not(Not(a)), a) == (a, {a: a})
assert bool_map(SOPform([w, x, y, z], minterms),
POSform([w, x, y, z], minterms)) == \
(And(Or(Not(w), y), Or(Not(x), y), z), {x: x, w: w, z: z, y: y})
assert bool_map(SOPform([x, z, y],[[1, 0, 1]]),
SOPform([a, b, c],[[1, 0, 1]])) != False
function1 = SOPform([x,z,y],[[1, 0, 1], [0, 0, 1]])
function2 = SOPform([a,b,c],[[1, 0, 1], [1, 0, 0]])
assert bool_map(function1, function2) == \
(function1, {y: a, z: b})
def test_bool_symbol():
"""Test that mixing symbols with boolean values
works as expected"""
assert And(A, True) == A
assert And(A, True, True) == A
assert And(A, False) is false
assert And(A, True, False) is false
assert Or(A, True) is true
assert Or(A, False) == A
def test_is_boolean():
assert true.is_Boolean
assert (A & B).is_Boolean
assert (A | B).is_Boolean
assert (~A).is_Boolean
assert (A ^ B).is_Boolean
def test_subs():
assert (A & B).subs(A, True) == B
assert (A & B).subs(A, False) is false
assert (A & B).subs(B, True) == A
assert (A & B).subs(B, False) is false
assert (A & B).subs({A: True, B: True}) is true
assert (A | B).subs(A, True) is true
assert (A | B).subs(A, False) == B
assert (A | B).subs(B, True) is true
assert (A | B).subs(B, False) == A
assert (A | B).subs({A: True, B: True}) is true
"""
we test for axioms of boolean algebra
see http://en.wikipedia.org/wiki/Boolean_algebra_(structure)
"""
def test_commutative():
"""Test for commutativity of And and Or"""
A, B = map(Boolean, symbols('A,B'))
assert A & B == B & A
assert A | B == B | A
def test_and_associativity():
"""Test for associativity of And"""
assert (A & B) & C == A & (B & C)
def test_or_assicativity():
assert ((A | B) | C) == (A | (B | C))
def test_double_negation():
a = Boolean()
assert ~(~a) == a
# test methods
def test_eliminate_implications():
from sympy.abc import A, B, C, D
assert eliminate_implications(Implies(A, B, evaluate=False)) == (~A) | B
assert eliminate_implications(
A >> (C >> Not(B))) == Or(Or(Not(B), Not(C)), Not(A))
assert eliminate_implications(Equivalent(A, B, C, D)) == \
(~A | B) & (~B | C) & (~C | D) & (~D | A)
def test_conjuncts():
assert conjuncts(A & B & C) == set([A, B, C])
assert conjuncts((A | B) & C) == set([A | B, C])
assert conjuncts(A) == set([A])
assert conjuncts(True) == set([True])
assert conjuncts(False) == set([False])
def test_disjuncts():
assert disjuncts(A | B | C) == set([A, B, C])
assert disjuncts((A | B) & C) == set([(A | B) & C])
assert disjuncts(A) == set([A])
assert disjuncts(True) == set([True])
assert disjuncts(False) == set([False])
def test_distribute():
assert distribute_and_over_or(Or(And(A, B), C)) == And(Or(A, C), Or(B, C))
assert distribute_or_over_and(And(A, Or(B, C))) == Or(And(A, B), And(A, C))
def test_to_nnf():
assert to_nnf(true) is true
assert to_nnf(false) is false
assert to_nnf(A) == A
assert to_nnf(A | ~A | B) is true
assert to_nnf(A & ~A & B) is false
assert to_nnf(A >> B) == ~A | B
assert to_nnf(Equivalent(A, B, C)) == (~A | B) & (~B | C) & (~C | A)
assert to_nnf(A ^ B ^ C) == \
(A | B | C) & (~A | ~B | C) & (A | ~B | ~C) & (~A | B | ~C)
assert to_nnf(ITE(A, B, C)) == (~A | B) & (A | C)
assert to_nnf(Not(A | B | C)) == ~A & ~B & ~C
assert to_nnf(Not(A & B & C)) == ~A | ~B | ~C
assert to_nnf(Not(A >> B)) == A & ~B
assert to_nnf(Not(Equivalent(A, B, C))) == And(Or(A, B, C), Or(~A, ~B, ~C))
assert to_nnf(Not(A ^ B ^ C)) == \
(~A | B | C) & (A | ~B | C) & (A | B | ~C) & (~A | ~B | ~C)
assert to_nnf(Not(ITE(A, B, C))) == (~A | ~B) & (A | ~C)
assert to_nnf((A >> B) ^ (B >> A)) == (A & ~B) | (~A & B)
assert to_nnf((A >> B) ^ (B >> A), False) == \
(~A | ~B | A | B) & ((A & ~B) | (~A & B))
def test_to_cnf():
assert to_cnf(~(B | C)) == And(Not(B), Not(C))
assert to_cnf((A & B) | C) == And(Or(A, C), Or(B, C))
assert to_cnf(A >> B) == (~A) | B
assert to_cnf(A >> (B & C)) == (~A | B) & (~A | C)
assert to_cnf(A & (B | C) | ~A & (B | C), True) == B | C
assert to_cnf(Equivalent(A, B)) == And(Or(A, Not(B)), Or(B, Not(A)))
assert to_cnf(Equivalent(A, B & C)) == \
(~A | B) & (~A | C) & (~B | ~C | A)
assert to_cnf(Equivalent(A, B | C), True) == \
And(Or(Not(B), A), Or(Not(C), A), Or(B, C, Not(A)))
def test_to_dnf():
assert to_dnf(~(B | C)) == And(Not(B), Not(C))
assert to_dnf(A & (B | C)) == Or(And(A, B), And(A, C))
assert to_dnf(A >> B) == (~A) | B
assert to_dnf(A >> (B & C)) == (~A) | (B & C)
assert to_dnf(Equivalent(A, B), True) == \
Or(And(A, B), And(Not(A), Not(B)))
assert to_dnf(Equivalent(A, B & C), True) == \
Or(And(A, B, C), And(Not(A), Not(B)), And(Not(A), Not(C)))
def test_to_int_repr():
x, y, z = map(Boolean, symbols('x,y,z'))
def sorted_recursive(arg):
try:
return sorted(sorted_recursive(x) for x in arg)
except TypeError: # arg is not a sequence
return arg
assert sorted_recursive(to_int_repr([x | y, z | x], [x, y, z])) == \
sorted_recursive([[1, 2], [1, 3]])
assert sorted_recursive(to_int_repr([x | y, z | ~x], [x, y, z])) == \
sorted_recursive([[1, 2], [3, -1]])
def test_is_nnf():
from sympy.abc import A, B
assert is_nnf(true) is True
assert is_nnf(A) is True
assert is_nnf(~A) is True
assert is_nnf(A & B) is True
assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), False) is True
assert is_nnf((A | B) & (~A | ~B)) is True
assert is_nnf(Not(Or(A, B))) is False
assert is_nnf(A ^ B) is False
assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), True) is False
def test_is_cnf():
x, y, z = symbols('x,y,z')
assert is_cnf(x) is True
assert is_cnf(x | y | z) is True
assert is_cnf(x & y & z) is True
assert is_cnf((x | y) & z) is True
assert is_cnf((x & y) | z) is False
def test_is_dnf():
x, y, z = symbols('x,y,z')
assert is_dnf(x) is True
assert is_dnf(x | y | z) is True
assert is_dnf(x & y & z) is True
assert is_dnf((x & y) | z) is True
assert is_dnf((x | y) & z) is False
def test_ITE():
A, B, C = map(Boolean, symbols('A,B,C'))
assert ITE(True, False, True) is false
assert ITE(True, True, False) is true
assert ITE(False, True, False) is false
assert ITE(False, False, True) is true
assert isinstance(ITE(A, B, C), ITE)
A = True
assert ITE(A, B, C) == B
A = False
assert ITE(A, B, C) == C
B = True
assert ITE(And(A, B), B, C) == C
assert ITE(Or(A, False), And(B, True), False) is false
def test_ITE_diff():
# analogous to Piecewise.diff
x = symbols('x')
assert ITE(x > 0, x**2, x).diff(x) == ITE(x > 0, 2*x, 1)
def test_is_literal():
assert is_literal(True) is True
assert is_literal(False) is True
assert is_literal(A) is True
assert is_literal(~A) is True
assert is_literal(Or(A, B)) is False
assert is_literal(Q.zero(A)) is True
assert is_literal(Not(Q.zero(A))) is True
assert is_literal(Or(A, B)) is False
assert is_literal(And(Q.zero(A), Q.zero(B))) is False
def test_operators():
# Mostly test __and__, __rand__, and so on
assert True & A == A & True == A
assert False & A == A & False == False
assert A & B == And(A, B)
assert True | A == A | True == True
assert False | A == A | False == A
assert A | B == Or(A, B)
assert ~A == Not(A)
assert True >> A == A << True == A
assert False >> A == A << False == True
assert A >> True == True << A == True
assert A >> False == False << A == ~A
assert A >> B == B << A == Implies(A, B)
assert True ^ A == A ^ True == ~A
assert False ^ A == A ^ False == A
assert A ^ B == Xor(A, B)
def test_true_false():
x = symbols('x')
assert true is S.true
assert false is S.false
assert true is not True
assert false is not False
assert true
assert not false
assert true == True
assert false == False
assert not (true == False)
assert not (false == True)
assert not (true == false)
assert hash(true) == hash(True)
assert hash(false) == hash(False)
assert len(set([true, True])) == len(set([false, False])) == 1
assert isinstance(true, BooleanAtom)
assert isinstance(false, BooleanAtom)
# We don't want to subclass from bool, because bool subclasses from
# int. But operators like &, |, ^, <<, >>, and ~ act differently on 0 and
# 1 then we want them to on true and false. See the docstrings of the
# various And, Or, etc. functions for examples.
assert not isinstance(true, bool)
assert not isinstance(false, bool)
# Note: using 'is' comparison is important here. We want these to return
# true and false, not True and False
assert Not(true) is false
assert Not(True) is false
assert Not(false) is true
assert Not(False) is true
assert ~true is false
assert ~false is true
for T, F in cartes([True, true], [False, false]):
assert And(T, F) is false
assert And(F, T) is false
assert And(F, F) is false
assert And(T, T) is true
assert And(T, x) == x
assert And(F, x) is false
if not (T is True and F is False):
assert T & F is false
assert F & T is false
if not F is False:
assert F & F is false
if not T is True:
assert T & T is true
assert Or(T, F) is true
assert Or(F, T) is true
assert Or(F, F) is false
assert Or(T, T) is true
assert Or(T, x) is true
assert Or(F, x) == x
if not (T is True and F is False):
assert T | F is true
assert F | T is true
if not F is False:
assert F | F is false
if not T is True:
assert T | T is true
assert Xor(T, F) is true
assert Xor(F, T) is true
assert Xor(F, F) is false
assert Xor(T, T) is false
assert Xor(T, x) == ~x
assert Xor(F, x) == x
if not (T is True and F is False):
assert T ^ F is true
assert F ^ T is true
if not F is False:
assert F ^ F is false
if not T is True:
assert T ^ T is false
assert Nand(T, F) is true
assert Nand(F, T) is true
assert Nand(F, F) is true
assert Nand(T, T) is false
assert Nand(T, x) == ~x
assert Nand(F, x) is true
assert Nor(T, F) is false
assert Nor(F, T) is false
assert Nor(F, F) is true
assert Nor(T, T) is false
assert Nor(T, x) is false
assert Nor(F, x) == ~x
assert Implies(T, F) is false
assert Implies(F, T) is true
assert Implies(F, F) is true
assert Implies(T, T) is true
assert Implies(T, x) == x
assert Implies(F, x) is true
assert Implies(x, T) is true
assert Implies(x, F) == ~x
if not (T is True and F is False):
assert T >> F is false
assert F << T is false
assert F >> T is true
assert T << F is true
if not F is False:
assert F >> F is true
assert F << F is true
if not T is True:
assert T >> T is true
assert T << T is true
assert Equivalent(T, F) is false
assert Equivalent(F, T) is false
assert Equivalent(F, F) is true
assert Equivalent(T, T) is true
assert Equivalent(T, x) == x
assert Equivalent(F, x) == ~x
assert Equivalent(x, T) == x
assert Equivalent(x, F) == ~x
assert ITE(T, T, T) is true
assert ITE(T, T, F) is true
assert ITE(T, F, T) is false
assert ITE(T, F, F) is false
assert ITE(F, T, T) is true
assert ITE(F, T, F) is false
assert ITE(F, F, T) is true
assert ITE(F, F, F) is false
def test_bool_as_set():
x = symbols('x')
assert And(x <= 2, x >= -2).as_set() == Interval(-2, 2)
assert Or(x >= 2, x <= -2).as_set() == Interval(-oo, -2) + Interval(2, oo)
assert Not(x > 2).as_set() == Interval(-oo, 2)
# issue 10240
assert Not(And(x > 2, x < 3)).as_set() == \
Union(Interval(-oo,2),Interval(3,oo))
assert true.as_set() == S.UniversalSet
assert false.as_set() == EmptySet()
@XFAIL
def test_multivariate_bool_as_set():
x, y = symbols('x,y')
assert And(x >= 0, y >= 0).as_set() == Interval(0, oo)*Interval(0, oo)
assert Or(x >= 0, y >= 0).as_set() == S.Reals*S.Reals - \
Interval(-oo, 0, True, True)*Interval(-oo, 0, True, True)
def test_all_or_nothing():
x = symbols('x', real=True)
args = x >=- oo, x <= oo
v = And(*args)
if v.func is And:
assert len(v.args) == len(args) - args.count(S.true)
else:
assert v == True
v = Or(*args)
if v.func is Or:
assert len(v.args) == 2
else:
assert v == True
def test_canonical_atoms():
assert true.canonical == true
assert false.canonical == false
def test_issue_8777():
x = symbols('x')
assert And(x > 2, x < oo).as_set() == Interval(2, oo, left_open=True)
assert And(x >= 1, x < oo).as_set() == Interval(1, oo)
assert (x < oo).as_set() == Interval(-oo, oo)
assert (x > -oo).as_set() == Interval(-oo, oo)
def test_issue_8975():
x = symbols('x')
assert Or(And(-oo < x, x <= -2), And(2 <= x, x < oo)).as_set() == \
Interval(-oo, -2) + Interval(2, oo)
def test_term_to_integer():
assert term_to_integer([1, 0, 1, 0, 0, 1, 0]) == 82
assert term_to_integer('0010101000111001') == 10809
def test_integer_to_term():
assert integer_to_term(777) == [1, 1, 0, 0, 0, 0, 1, 0, 0, 1]
assert integer_to_term(123, 3) == [1, 1, 1, 1, 0, 1, 1]
assert integer_to_term(456, 16) == [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0]
def test_truth_table():
x, y = symbols('x,y')
assert list(truth_table(And(x, y), [x, y], input=False)) == [False, False, False, True]
assert list(truth_table(x | y, [x, y], input=False)) == [False, True, True, True]
assert list(truth_table(x >> y, [x, y], input=False)) == [True, True, False, True]
|
|
###################################################################################
# SNAPSHOT STUFF
###################################################################################
import inspect
import __main__
from param.parameterized import Parameterized, Parameter
# CEBALERT: Can't this stuff move to the ParameterizedMetaclass?
class PicklableClassAttributes(object):
"""
Supports pickling of Parameterized class attributes for a given module.
When requested to be pickled, stores a module's PO classes' attributes,
and any given startup_commands. On unpickling, executes the startup
commands and sets the class attributes.
"""
# classes that aren't parameterized any more
do_not_restore = []
deleted_params = {"Simulation": ["time_type","time_type_args"]}
# Support for changing parameter names
# CEBALERT: doesn't support things like changing output_fn to output_fns,
# where we also need to do output_fn=x -> output_fns=[x].
# Should implement fuller support in legacy, and remove this from here.
param_name_changes = {}
# e.g. you change imagen.Gaussian.aspect_ratio to aspect_ration
# _param_name_changes['imagen.Gaussian']={'aspect_ratio':'aspect_ration'}
#
# (not yet finished - do we need to add information about version numbers?)
# CEBALERT: same comments as above about doing this more cleanly
param_moves = {}
# pylint: disable-msg=R0903
# CB: might have mixed up module and package in the docs.
def __init__(self,module,exclusions=(),startup_commands=()):
"""
module: a module object, such as topo
Any submodules listed by name in exclusions will not have their
classes' attributes saved.
"""
self.module=module
self.exclude=exclusions
self.startup_commands=startup_commands
def __getstate__(self):
"""
Return a dictionary of self.module's PO classes' attributes, plus
self.startup_commands.
"""
class_attributes = {}
self.get_PO_class_attributes(self.module,class_attributes,[],exclude=self.exclude)
# CB: we don't want to pickle anything about this object except what
# we want to have executed on unpickling (this object's not going to be hanging around).
return {'class_attributes':class_attributes,
'startup_commands':self.startup_commands}
def __setstate__(self,state):
"""
Execute the startup commands and set class attributes.
"""
self.startup_commands = state['startup_commands']
for cmd in self.startup_commands:
exec cmd in __main__.__dict__
to_restore = {}
########## pre-processing (renames, moves, etc)
for class_path,state in state['class_attributes'].items():
# from e.g. "topo.base.parameter.Parameter", we want "topo.base.parameter"
if class_path in self.do_not_restore:
#print "Did not restore:",class_path
break
for p_name,p_obj in state.items():
if p_name in self.param_moves.get(class_path,{}):
assert p_name not in self.param_name_changes.get(class_path,{})
if len(self.param_moves[class_path][p_name]) == 2:
new_class_path,new_p_name = self.param_moves[class_path][p_name]
if len(self.param_moves[class_path][p_name]) == 3:
new_class_path,new_p_name,fn = self.param_moves[class_path][p_name]
p_obj = fn(p_obj)
if new_class_path not in to_restore:
to_restore[new_class_path] = {}
Parameterized().message("%s.%s has been moved to %s.%s"%(class_path,p_name,new_class_path,new_p_name))
assert new_p_name not in to_restore[new_class_path]
to_restore[new_class_path][new_p_name]=p_obj
elif p_name in self.param_name_changes.get(class_path,{}):
if isinstance(self.param_name_changes[class_path][p_name],tuple):
new_p_name, fn = self.param_name_changes[class_path][p_name]
p_obj = fn(p_obj)
else:
new_p_name= self.param_name_changes[class_path][p_name]
if class_path not in to_restore:
to_restore[class_path] = {}
Parameterized().message("%s's %s parameter has been renamed to %s."%(class_path,p_name,new_p_name))
to_restore[class_path][new_p_name] = p_obj
else:
if class_path not in to_restore:
to_restore[class_path] = {}
to_restore[class_path][p_name]= p_obj
########## restoring
for class_path in to_restore:
module_path = class_path[0:class_path.rindex('.')]
class_name = class_path[class_path.rindex('.')+1::]
deleted_params = self.deleted_params.get(class_name, [])
try:
module = __import__(module_path,fromlist=[module_path])
except:
Parameterized().warning("Could not find module '%s' to restore parameter values of '%s' (module might have been moved or renamed; if you are using this module, please file a support request via topographica.org"%(module_path,class_path))
break
try:
class_=getattr(module,class_name)
except:
Parameterized().warning("Could not find class '%s' to restore its parameter values (class might have been removed or renamed; if you are using this class, please file a support request via topographica.org)."%class_path)
break
for p_name,p_obj in to_restore[class_path].items():
try:
if p_name in deleted_params:
pass
elif p_name not in class_.params():
# CEBALERT: GlobalParams's source code never has
# parameters. If we move Parameter saving and
# restoring to Parameterized, could allow
# individual classes to customize Parameter
# restoration.
if class_.__name__!='GlobalParams':
Parameterized(name='load_snapshot').warning("%s.%s found in snapshot, but '%s' is no longer defined as a Parameter by the current version of %s. If you are using this class, please file a support request via topographica.org." % (class_.__name__, p_name,p_name,class_.__name__))
else:
setattr(class_,p_name,p_obj)
except:
Parameterized(name='load_snapshot').warning("%s.%s found in snapshot, but '%s' but could not be restored to the current version of %s. If you are using this class, please file a support request via topographica.org." % (class_.__name__, p_name,p_name,class_.__name__))
# CB: I guess this could be simplified
def get_PO_class_attributes(self,module,class_attributes,processed_modules,exclude=()):
"""
Recursively search module and get attributes of Parameterized classes within it.
class_attributes is a dictionary {module.path.and.Classname: state}, where state
is the dictionary {attribute: value}.
Something is considered a module for our purposes if inspect says it's a module,
and it defines __all__. We only search through modules listed in __all__.
Keeps a list of processed modules to avoid looking at the same one
more than once (since e.g. __main__ contains __main__ contains
__main__...)
Modules can be specifically excluded if listed in exclude.
"""
dict_ = module.__dict__
for (k,v) in dict_.items():
if '__all__' in dict_ and inspect.ismodule(v) and k not in exclude:
if k in dict_['__all__'] and v not in processed_modules:
self.get_PO_class_attributes(v,class_attributes,processed_modules,exclude)
processed_modules.append(v)
else:
if isinstance(v,type) and issubclass(v,Parameterized):
# Note: we take the class name as v.__name__, not
# k, because k might be just a label for the true
# class. For example, if someone imports a class
# using 'as', the name in the local namespace
# could be different from the name when the class
# was defined. It is correct to set the
# attributes on the true class.
full_class_path = v.__module__+'.'+v.__name__
class_attributes[full_class_path] = {}
# Parameterized classes always have parameters in
# __dict__, never in __slots__
for (name,obj) in v.__dict__.items():
if isinstance(obj,Parameter) and obj.pickle_default_value:
class_attributes[full_class_path][name] = obj
|
|
"""Helper classes for dealing with Haystack faceting."""
# TODO: Clean up and refactor
import re
import datetime
from django.utils.http import urlquote_plus as quote_plus
class FacetClass(object):
"""Class representing a facet with multiple values
to filter on. i.e. keywords => [foo, bar ...]"""
FACET_SORT_COUNT = 0
FACET_SORT_NAME = 1
def __init__(self, name, prettyname, sort=FACET_SORT_COUNT,
paramname=None, renderfn=None, raw=False):
self.name = name
self.prettyname = prettyname
self.paramname = paramname if paramname else name
self.sort = sort
self.renderfn = renderfn if renderfn else lambda v: unicode(v)
self.facets = []
# this is a suffix that we append onto the facet name to
# match the proxy facet fields generated by Haystack. In
# Haystack, this is '_exact'. Sometimes, however, we want
# to facet against a field not specifically marked as
# faceted in the search_index file, such as django_ct. In
# this case, we don't want to append '_exact'. If given
# the 'raw' argument, the FacetClass won't do this.
self.suffix = "_exact" if not raw else ""
def all_sorted_by_name(self):
return sorted(self.facets, key=lambda k: k.name)
def sorted_facets(self):
if self.sort == self.FACET_SORT_COUNT:
return self.sorted_by_count()
return self.sorted_by_name()
def sorted_by_name(self):
return [f for f in sorted(self.facets, key=lambda k: k.name) \
if f.count > 0]
def sorted_by_count(self):
return [f for f in sorted(self.facets, key=lambda k: -k.count) \
if f.count > 0]
def apply(self, queryset):
"""Apply the facet to the search query set."""
return queryset.facet(self.name)
def parse(self, counts, current):
"""Parse the facet_counts structure returns from
the Haystack query."""
self.facets = []
flist = counts.get("fields", {}).get(self.name, [])
for item, count in flist:
self.facets.append(Facet(
item, klass=self, count=count, selected=current))
def __repr__(self):
return u"<%s: %s (%d)" % (
self.__class__.__name__, self.name, len(self.facets))
def __unicode__(self):
return self.prettyname
def narrow(self, queryset, active):
"""Narrow the queryset appropriately if one if
our points is in the params."""
for facet in active:
queryset = queryset.narrow('%s%s:"%s"' % (self.name,
self.suffix, queryset.query.clean(facet)))
return queryset
class QueryFacetClass(FacetClass):
"""Class representing a query facet."""
def __init__(self, *args, **kwargs):
facets = kwargs.pop("queries", [])
super(QueryFacetClass, self).__init__(*args, **kwargs)
self.facets = facets
for facet in self.facets:
facet.klass = self
def sorted_by_name(self):
"""Name sort should respect the order in which
the Query facet points were added in the point spec."""
return [f for f in self.facets if f.count > 0]
def parse(self, counts, current):
if not counts.get("queries"):
return
for facet in self.facets:
count = counts["queries"].get("%s%s:%s" % (
self.name, self.suffix, facet.querystr()))
facet.count = count
facet._selected = current
def apply(self, queryset):
"""Apply the facet to the search query set."""
for facet in self.facets:
queryset = queryset.query_facet(self.name, facet.querystr())
return queryset
def narrow(self, queryset, active):
"""Narrow the queryset appropriately if one if
our points is in the params."""
for pname in active:
# this shouldn't happen unless people diddle with
# the params, in which case they don't deserve any
# results
try:
point = [p for p in self.facets if unicode(p) == pname][0]
except IndexError:
continue
queryset = queryset.narrow(point.query())
return queryset
class Facet(object):
"""Class representing an individual facet constraint,
i.e. 'language:Afrikaans'."""
def __init__(self, name, klass=None, count=None,
selected=[], desc=None):
self.name = name
self.klass = klass
self.count = count
self.desc = desc
self._selected = selected
def prettyname(self):
name = self.desc if self.desc else self.name
return self.klass.renderfn(name)
def selected(self):
return self.filter_name() in self._selected
def filter_name(self):
# FIXME: Hack for rare facets with '(', ')', etc
# in the name, need to find a cleaner way of
# handling quoting: see 'clean' func in
# haystack/backends/__init__.py
def clean(val):
for char in ['(', ')', '-']:
val = val.replace(char, '\\%s' % char)
return val
return clean('%s%s:"%s"' % (self.klass.name, self.klass.suffix, self.name))
def facet_param(self):
return "%s=%s" % (self.klass.paramname, quote_plus(self.name))
class QueryFacet(Facet):
"""Class representing a Query Facet point."""
def __init__(self, *args, **kwargs):
self.point = kwargs.pop("query")
self.range = isinstance(self.point, tuple)
super(QueryFacet, self).__init__(str(self), *args, **kwargs)
def selected(self):
return self.query() in self._selected
def query(self):
return u"%s:%s" % (self.klass.name, self.querystr())
def querystr(self):
if self.range:
return u"[%s TO %s]" % (
self._qpoint(self.point[0]),
self._qpoint(self.point[1]))
return u"%d" % self.point
def filter_name(self):
return u"%s%s:%s" % (self.klass.name, self.klass.suffix, self)
def _strpoint(self, p):
if isinstance(p, basestring):
return ""
return p
def _qpoint(self, p):
if isinstance(p, basestring):
return "*"
return p
def __str__(self):
if self.range:
return u"%s_%s" % (
self._strpoint(self.point[0]),
self._strpoint(self.point[1]))
return u"%d" % self.point
class DateQueryFacet(QueryFacet):
"""Specialisation of QueryFacet for dates, where
each point is either a datetime.datetime object
or a string, such as glob ("*")."""
def _qpoint(self, p):
if isinstance(p, basestring):
return p
return p.isoformat() + "Z"
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""arg_scope tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
@tf.contrib.framework.add_arg_scope
def func1(*args, **kwargs):
return (args, kwargs)
@tf.contrib.framework.add_arg_scope
def func2(*args, **kwargs):
return (args, kwargs)
@tf.contrib.framework.add_arg_scope
def func3(args, a=None, b=1, c=2):
"""Some cool doc string."""
return (args, a, b, c)
def _key_op(op):
return getattr(op, '_key_op', str(op))
class ArgScopeTest(tf.test.TestCase):
def testEmptyArgScope(self):
with self.test_session():
with tf.contrib.framework.arg_scope([]) as sc:
self.assertEqual(sc, {})
def testClearArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
func1_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with tf.contrib.framework.arg_scope([func1], a=1, b=None, c=[1]) as sc1:
self.assertEqual(sc1, func1_scope)
with tf.contrib.framework.arg_scope({}) as sc2:
self.assertEqual(sc2, {})
with tf.contrib.framework.arg_scope([]) as current_arg_scope:
self.assertEqual(current_arg_scope, func1_scope)
def testNonDecorated(self):
def my_func(t, a=None):
return (t, a)
with self.assertRaises(ValueError):
with tf.contrib.framework.arg_scope([my_func], a=1):
pass
def testUnexpectedArg(self):
with self.assertRaises(TypeError):
with tf.contrib.framework.arg_scope([func3], d=1):
func3(1)
def testCurrentArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with tf.contrib.framework.arg_scope([func1], a=1, b=None, c=[1]) as scope:
self.assertDictEqual(scope, current_scope)
def testArgScopedArguments(self):
func3_kwargs = ('a', 'b', 'c')
self.assertEquals(tf.contrib.framework.arg_scoped_arguments(func3),
func3_kwargs)
def testCurrentArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope = {key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()}
with self.test_session():
with tf.contrib.framework.arg_scope([func1], a=1, b=None, c=[1]):
with tf.contrib.framework.arg_scope([func2], b=2, d=[2]) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with tf.contrib.framework.arg_scope([func1],
a=1, b=None, c=[1]) as scope1:
pass
with tf.contrib.framework.arg_scope(scope1) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope1 = {key(func1): func1_kwargs.copy()}
current_scope2 = {key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()}
with self.test_session():
with tf.contrib.framework.arg_scope([func1],
a=1, b=None, c=[1]) as scope1:
with tf.contrib.framework.arg_scope([func2], b=2, d=[2]) as scope2:
pass
with tf.contrib.framework.arg_scope(scope1):
with tf.contrib.framework.arg_scope([]) as current_arg_scope:
self.assertDictEqual(current_arg_scope, current_scope1)
with tf.contrib.framework.arg_scope(scope2):
with tf.contrib.framework.arg_scope([]) as current_arg_scope:
self.assertDictEqual(current_arg_scope, current_scope2)
def testSimpleArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.test_session():
with tf.contrib.framework.arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSimpleArgScopeWithTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.test_session():
with tf.contrib.framework.arg_scope((func1,), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testOverwriteArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': 2, 'c': [1]}
with tf.contrib.framework.arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0, b=2)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testNestedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with tf.contrib.framework.arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
func1_kwargs['b'] = 2
with tf.contrib.framework.arg_scope([func1], b=2):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with tf.contrib.framework.arg_scope([func1, func2], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSharedArgScopeTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with tf.contrib.framework.arg_scope((func1, func2), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testPartiallySharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_args = (1,)
func2_kwargs = {'a': 1, 'b': None, 'd': [2]}
with tf.contrib.framework.arg_scope([func1, func2], a=1, b=None):
with tf.contrib.framework.arg_scope([func1], c=[1]):
with tf.contrib.framework.arg_scope([func2], d=[2]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(1)
self.assertTupleEqual(args, func2_args)
self.assertDictEqual(kwargs, func2_kwargs)
def testDocString(self):
self.assertEqual(func3.__doc__, 'Some cool doc string.')
if __name__ == '__main__':
tf.test.main()
|
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import glob
from py4j.protocol import Py4JJavaError
from py4j.java_gateway import JavaObject
from py4j.java_collections import ListConverter, JavaArray, JavaList, JavaMap, MapConverter
from pyspark import RDD, SparkContext
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import DataFrame, SQLContext
from pyspark.mllib.common import callJavaFunc
from pyspark import SparkConf
import numpy as np
import threading
import tempfile
from bigdl.util.engine import get_bigdl_classpath, is_spark_below_2_2
INTMAX = 2147483647
INTMIN = -2147483648
DOUBLEMAX = 1.7976931348623157E308
if sys.version >= '3':
long = int
unicode = str
class SingletonMixin(object):
_lock = threading.RLock()
_instance = None
@classmethod
def instance(cls,
bigdl_type="float"):
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = cls(bigdl_type)
return cls._instance
class JavaCreator(SingletonMixin):
__creator_class="com.intel.analytics.bigdl.python.api.PythonBigDL"
@classmethod
def get_creator_class(cls):
with JavaCreator._lock:
return JavaCreator.__creator_class
@classmethod
def set_creator_class(cls, cclass):
with JavaCreator._lock:
JavaCreator.__creator_class = cclass
JavaCreator._instance = None
def __init__(self, bigdl_type):
sc = get_spark_context()
jclass = getattr(sc._jvm, JavaCreator.get_creator_class())
if bigdl_type == "float":
self.value = getattr(jclass, "ofFloat")()
elif bigdl_type == "double":
self.value = getattr(jclass, "ofDouble")()
else:
raise Exception("Not supported bigdl_type: %s" % bigdl_type)
class JavaValue(object):
def jvm_class_constructor(self):
name = "create" + self.__class__.__name__
print("creating: " + name)
return name
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, self.jvm_class_constructor(), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
class EvaluatedResult():
"""
A testing result used to benchmark the model quality.
"""
def __init__(self, result, total_num, method):
"""
:param result: the validation result. i.e: top1 accuracy percentage.
:param total_num: the total processed records.
:param method: the validation method. i.e: Top1Accuracy
"""
self.result = result
self.total_num = total_num
self.method = method
def __reduce__(self):
return (EvaluatedResult, (self.result, self.total_num, self.method))
def __str__(self):
return "Evaluated result: %s, total_num: %s, method: %s" % (
self.result, self.total_num, self.method)
def get_dtype(bigdl_type):
# Always return float32 for now
return "float32"
class Configuration(object):
__bigdl_jars = [get_bigdl_classpath()]
@staticmethod
def add_extra_jars(jars):
"""
Add extra jars to classpath
:param jars: a string or a list of strings as jar paths
"""
import six
if isinstance(jars, six.string_types):
jars = [jars]
Configuration.__bigdl_jars += jars
@staticmethod
def add_extra_python_modules(packages):
"""
Add extra python modules to sys.path
:param packages: a string or a list of strings as python package paths
"""
import six
if isinstance(packages, six.string_types):
packages = [packages]
for package in packages:
sys.path.insert(0, package)
@staticmethod
def get_bigdl_jars():
return Configuration.__bigdl_jars
class JActivity(object):
def __init__(self, value):
self.value = value
class JTensor(object):
"""
A wrapper to easy our work when need to pass or return Tensor to/from Scala.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> np.random.seed(123)
>>>
"""
def __init__(self, storage, shape, bigdl_type="float", indices=None):
"""
:param storage: values in this tensor
:param shape: shape of this tensor
:param bigdl_type: numeric type
:param indices: if indices is provided, means this is a SparseTensor;
if not provided, means this is a DenseTensor
"""
if isinstance(storage, bytes) and isinstance(shape, bytes):
self.storage = np.frombuffer(storage, dtype=get_dtype(bigdl_type))
self.shape = np.frombuffer(shape, dtype=np.int32)
else:
self.storage = np.array(storage, dtype=get_dtype(bigdl_type))
self.shape = np.array(shape, dtype=np.int32)
if indices is None:
self.indices = None
elif isinstance(indices, bytes):
self.indices = np.frombuffer(indices, dtype=np.int32)
else:
assert isinstance(indices, np.ndarray), \
"indices should be a np.ndarray, not %s, %s" % (type(a_ndarray), str(indices))
self.indices = np.array(indices, dtype=np.int32)
self.bigdl_type = bigdl_type
@classmethod
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> print(result)
JTensor: storage: [[ 0.69646919 0.28613934 0.22685145]
[ 0.55131477 0.71946895 0.42310646]], shape: [2 3], float
>>> result
JTensor: storage: [[ 0.69646919 0.28613934 0.22685145]
[ 0.55131477 0.71946895 0.42310646]], shape: [2 3], float
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape if a_ndarray.shape else (a_ndarray.size),
bigdl_type)
@classmethod
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type="float"):
"""
Convert a three ndarray to SparseTensor which would be used in Java side.
For example:
a_ndarray = [1, 3, 2, 4]
i_ndarray = [[0, 0, 1, 2],
[0, 3, 2, 1]]
shape = [3, 4]
Present a dense tensor
[[ 1, 0, 0, 3],
[ 0, 0, 2, 0],
[ 0, 4, 0, 0]]
:param a_ndarray non-zero elements in this SparseTensor
:param i_ndarray zero-based indices for non-zero element
i_ndarray's shape should be (shape.size, a_ndarray.size)
And the i-th non-zero elements indices is i_ndarray[:, 1]
:param shape shape as a DenseTensor.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.arange(1, 7).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> result = JTensor.sparse(data, indices, shape)
>>> result
JTensor: storage: [ 1. 2. 3. 4. 5. 6.], shape: [10] ,indices [1 2 3 4 5 6], float
>>> tensor1 = callBigDlFunc("float", "testTensor", result) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> expected_ndarray = np.array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> (array_from_tensor == expected_ndarray).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"values array should be a np.ndarray, not %s" % type(a_ndarray)
assert isinstance(i_ndarray, np.ndarray), \
"indices array should be a np.ndarray, not %s" % type(a_ndarray)
assert i_ndarray.size == a_ndarray.size * shape.size, \
"size of values and indices should match."
return cls(a_ndarray,
shape,
bigdl_type,
i_ndarray)
def to_ndarray(self):
"""
Transfer JTensor to ndarray.
As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor.
:return: a ndarray
"""
assert self.indices is None, "sparseTensor to ndarray is not supported"
return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape) # noqa
def __reduce__(self):
if self.indices is None:
return JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type)
else:
return JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type, self.indices.tostring())
def __str__(self):
return self.__repr__()
def __repr__(self):
indices = "" if self.indices is None else " ,indices %s" % str(self.indices)
return "JTensor: storage: %s, shape: %s%s, %s" % (str(self.storage), str(self.shape), indices, self.bigdl_type)
class Sample(object):
def __init__(self, features, labels, bigdl_type="float"):
"""
User should always use Sample.from_ndarray to construct Sample.
:param features: a list of JTensors
:param labels: a list of JTensors
:param bigdl_type: "double" or "float"
"""
self.feature = features[0]
self.features = features
self.label = labels[0]
self.bigdl_type = bigdl_type
self.labels = labels
@classmethod
def from_ndarray(cls, features, labels, bigdl_type="float"):
"""
Convert a ndarray of features and labels to Sample, which would be used in Java side.
:param features: an ndarray or a list of ndarrays
:param labels: an ndarray or a list of ndarrays or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> from bigdl.util.common import callBigDlFunc
>>> from numpy.testing import assert_allclose
>>> np.random.seed(123)
>>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3)))
>>> sample_back = callBigDlFunc("float", "testSample", sample)
>>> assert_allclose(sample.features[0].to_ndarray(), sample_back.features[0].to_ndarray())
>>> assert_allclose(sample.label.to_ndarray(), sample_back.label.to_ndarray())
>>> print(sample)
Sample: features: [JTensor: storage: [[ 0.69646919 0.28613934 0.22685145]
[ 0.55131477 0.71946895 0.42310646]], shape: [2 3], float], labels: [JTensor: storage: [[ 0.98076421 0.68482971 0.48093191]
[ 0.39211753 0.343178 0.72904968]], shape: [2 3], float],
"""
if isinstance(features, np.ndarray):
features = [features]
else:
assert all(isinstance(feature, np.ndarray) for feature in features), \
"features should be a list of np.ndarray, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [np.array(labels)]
elif isinstance(labels, np.ndarray):
labels = [labels]
else:
assert all(isinstance(label, np.ndarray) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=[JTensor.from_ndarray(feature) for feature in features],
labels=[JTensor.from_ndarray(label) for label in labels],
bigdl_type=bigdl_type)
@classmethod
def from_jtensor(cls, features, labels, bigdl_type="float"):
"""
Convert a sequence of JTensor to Sample, which would be used in Java side.
:param features: an JTensor or a list of JTensor
:param labels: an JTensor or a list of JTensor or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> data = np.random.uniform(0, 1, (6)).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> feature0 = JTensor.sparse(data, indices, shape)
>>> feature1 = JTensor.from_ndarray(np.random.uniform(0, 1, (2, 3)).astype("float32"))
>>> sample = Sample.from_jtensor([feature0, feature1], 1)
"""
if isinstance(features, JTensor):
features = [features]
else:
assert all(isinstance(feature, JTensor) for feature in features), \
"features should be a list of JTensor, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [JTensor.from_ndarray(np.array(labels))]
elif isinstance(labels, JTensor):
labels = [labels]
else:
assert all(isinstance(label, JTensor) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=features,
labels=labels,
bigdl_type=bigdl_type)
def __reduce__(self):
return Sample, (self.features, self.labels, self.bigdl_type)
def __str__(self):
return "Sample: features: %s, labels: %s," % (self.features, self.labels)
def __repr__(self):
return "Sample: features: %s, labels: %s" % (self.features, self.labels)
class RNG():
"""
generate tensor data with seed
"""
def __init__(self, bigdl_type="float"):
self.bigdl_type = bigdl_type
def set_seed(self, seed):
callBigDlFunc(self.bigdl_type, "setModelSeed", seed)
def uniform(self, a, b, size):
return callBigDlFunc(self.bigdl_type, "uniform", a, b, size).to_ndarray() # noqa
_picklable_classes = [
'LinkedList',
'SparseVector',
'DenseVector',
'DenseMatrix',
'Rating',
'LabeledPoint',
'Sample',
'EvaluatedResult',
'JTensor',
'JActivity'
]
def init_engine(bigdl_type="float"):
callBigDlFunc(bigdl_type, "initEngine")
def redire_spark_logs(bigdl_type="float", log_path=os.getcwd()+"/bigdl.log"):
"""
Redirect spark logs to the specified path.
:param bigdl_type: "double" or "float"
:param log_path: the file path to be redirected to; the default file is under the current workspace named `bigdl.log`.
"""
callBigDlFunc(bigdl_type, "redirectSparkLogs", log_path)
def show_bigdl_info_logs(bigdl_type="float"):
"""
Set BigDL log level to INFO.
:param bigdl_type: "double" or "float"
"""
callBigDlFunc(bigdl_type, "showBigDlInfoLogs")
def get_bigdl_conf():
bigdl_conf_file = "spark-bigdl.conf"
bigdl_python_wrapper = "python-api.zip"
def load_conf(conf_str):
return dict(line.split() for line in conf_str.split("\n") if
"#" not in line and line.strip())
for p in sys.path:
if bigdl_conf_file in p and os.path.isfile(p):
with open(p) if sys.version_info < (3,) else open(p, encoding='latin-1') as conf_file: # noqa
return load_conf(conf_file.read())
if bigdl_python_wrapper in p and os.path.isfile(p):
import zipfile
with zipfile.ZipFile(p, 'r') as zip_conf:
if bigdl_conf_file in zip_conf.namelist():
content = zip_conf.read(bigdl_conf_file)
if sys.version_info >= (3,):
content = str(content, 'latin-1')
return load_conf(content)
return {}
def to_list(a):
if type(a) is list:
return a
return [a]
def to_sample_rdd(x, y, numSlices=None):
"""
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
sc = get_spark_context()
from bigdl.util.common import Sample
x_rdd = sc.parallelize(x, numSlices)
y_rdd = sc.parallelize(y, numSlices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))
def extend_spark_driver_cp(sparkConf, path):
original_driver_classpath = ":" + sparkConf.get("spark.driver.extraClassPath") \
if sparkConf.contains("spark.driver.extraClassPath") else ""
sparkConf.set("spark.driver.extraClassPath", path + original_driver_classpath)
def create_spark_conf():
bigdl_conf = get_bigdl_conf()
sparkConf = SparkConf()
sparkConf.setAll(bigdl_conf.items())
if not is_spark_below_2_2():
for jar in Configuration.get_bigdl_jars():
extend_spark_driver_cp(sparkConf, jar)
# add content in PYSPARK_FILES in spark.submit.pyFiles
# This is a workaround for current Spark on k8s
python_lib = os.environ.get('PYSPARK_FILES', None)
if python_lib:
existing_py_files = sparkConf.get("spark.submit.pyFiles")
if existing_py_files:
sparkConf.set(key="spark.submit.pyFiles", value="%s,%s" % (python_lib, existing_py_files))
else:
sparkConf.set(key="spark.submit.pyFiles", value=python_lib)
return sparkConf
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context
def get_spark_sql_context(sc):
if "getOrCreate" in SQLContext.__dict__:
return SQLContext.getOrCreate(sc)
else:
return SQLContext(sc) # Compatible with Spark1.5.1
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
jinstance = JavaCreator.instance(bigdl_type=bigdl_type).value
sc = get_spark_context()
api = getattr(jinstance, name)
return callJavaFunc(sc, api, *args)
def _java2py(sc, r, encoding="bytes"):
if isinstance(r, JavaObject):
clsName = r.getClass().getSimpleName()
# convert RDD into JavaRDD
if clsName != 'JavaRDD' and clsName.endswith("RDD"):
r = r.toJavaRDD()
clsName = 'JavaRDD'
if clsName == 'JavaRDD':
jrdd = sc._jvm.SerDe.javaToPython(r)
return RDD(jrdd, sc)
if clsName == 'DataFrame':
return DataFrame(r, get_spark_sql_context(sc))
if clsName == 'Dataset':
return DataFrame(r, get_spark_sql_context(sc))
if clsName in _picklable_classes:
r = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(r)
elif isinstance(r, (JavaArray, JavaList, JavaMap)):
try:
r = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(
r)
except Py4JJavaError:
pass # not pickable
if isinstance(r, (bytearray, bytes)):
r = PickleSerializer().loads(bytes(r), encoding=encoding)
return r
def callJavaFunc(sc, func, *args):
""" Call Java Function """
args = [_py2java(sc, a) for a in args]
result = func(*args)
return _java2py(sc, result)
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(sc, x) for x in obj],
sc._gateway._gateway_client)
elif isinstance(obj, dict):
result = {}
for (key, value) in obj.items():
result[key] = _py2java(sc, value)
obj = MapConverter().convert(result, sc._gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
def create_tmp_path():
tmp_file = tempfile.NamedTemporaryFile(prefix="bigdl")
tmp_file.close()
return tmp_file.name
def text_from_path(path):
sc = get_spark_context()
return sc.textFile(path).collect()[0]
def get_local_file(a_path):
if not is_distributed(a_path):
return a_path
path, data = get_spark_context().binaryFiles(a_path).collect()[0]
local_file_path = create_tmp_path()
with open(local_file_path, 'w') as local_file:
local_file.write(data)
return local_file_path
def is_distributed(path):
return "://" in path
def get_activation_by_name(activation_name, activation_id=None):
""" Convert to a bigdl activation layer
given the name of the activation as a string """
import bigdl.nn.layer as BLayer
activation = None
activation_name = activation_name.lower()
if activation_name == "tanh":
activation = BLayer.Tanh()
elif activation_name == "sigmoid":
activation = BLayer.Sigmoid()
elif activation_name == "hard_sigmoid":
activation = BLayer.HardSigmoid()
elif activation_name == "relu":
activation = BLayer.ReLU()
elif activation_name == "softmax":
activation = BLayer.SoftMax()
elif activation_name == "softplus":
activation = BLayer.SoftPlus(beta=1.0)
elif activation_name == "softsign":
activation = BLayer.SoftSign()
elif activation_name == "linear":
activation = BLayer.Identity()
else:
raise Exception("Unsupported activation type: %s" % activation_name)
if not activation_id:
activation.set_name(activation_id)
return activation
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import layer
globs = layer.__dict__.copy()
sc = SparkContext(master="local[2]", appName="test common utility")
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
# -*- coding: utf-8 -*-
"""
Splash can send outgoing network requests through an HTTP proxy server.
This modules provides classes ("proxy factories") which define
which proxies to use for a given request. QNetworkManager calls
a proxy factory for each outgoing request.
Not to be confused with Splash Proxy mode when Splash itself works as
an HTTP proxy (see :mod:`splash.proxy_server`).
"""
from __future__ import absolute_import
import re
import os
import urlparse
import ConfigParser
from PyQt4.QtNetwork import QNetworkProxy
from splash.render_options import BadOption
from splash.qtutils import create_proxy, validate_proxy_type
from splash.utils import path_join_secure
class _BlackWhiteSplashProxyFactory(object):
"""
Proxy factory that enables non-default proxy list when
requested URL is matched by one of whitelist patterns
while not being matched by one of the blacklist patterns.
"""
def __init__(self, blacklist=None, whitelist=None, proxy_list=None):
self.blacklist = blacklist or []
self.whitelist = whitelist or []
self.proxy_list = proxy_list or []
def queryProxy(self, query=None, *args, **kwargs):
protocol = unicode(query.protocolTag())
url = unicode(query.url().toString())
if self.shouldUseProxyList(protocol, url):
return self._customProxyList()
return self._defaultProxyList()
def shouldUseProxyList(self, protocol, url):
if not self.proxy_list:
return False
if protocol not in ('http', 'https'):
# don't try to proxy unknown protocols
return False
if any(re.match(p, url) for p in self.blacklist):
return False
if any(re.match(p, url) for p in self.whitelist):
return True
return not bool(self.whitelist)
def _defaultProxyList(self):
return [QNetworkProxy(QNetworkProxy.DefaultProxy)]
def _customProxyList(self):
return [
create_proxy(host, port, username, password, type)
for host, port, username, password,type in self.proxy_list
]
class ProfilesSplashProxyFactory(_BlackWhiteSplashProxyFactory):
"""
This proxy factory reads BlackWhiteQNetworkProxyFactory
parameters from ini file; name of the profile can be set per-request
using GET parameter.
Example config file for 'mywebsite' proxy profile::
; /etc/splash/proxy-profiles/mywebsite.ini
[proxy]
host=proxy.crawlera.com
port=8010
username=username
password=password
type=HTTP
[rules]
whitelist=
.*mywebsite\.com.*
blacklist=
.*\.js.*
.*\.css.*
.*\.png
If there is ``default.ini`` proxy profile in profiles folder
it will be used when no profile is specified in GET parameter.
If GET parameter is 'none' or empty ('') no proxy will be used even if
``default.ini`` is present.
"""
NO_PROXY_PROFILE_MSG = 'Proxy profile does not exist'
def __init__(self, proxy_profiles_path, profile_name):
self.proxy_profiles_path = proxy_profiles_path
blacklist, whitelist, proxy_list = self._getFilterParams(profile_name)
super(ProfilesSplashProxyFactory, self).__init__(blacklist, whitelist, proxy_list)
def _getFilterParams(self, profile_name=None):
"""
Return (blacklist, whitelist, proxy_list) tuple
loaded from profile ``profile_name``.
"""
if profile_name is None:
profile_name = 'default'
ini_path = self._getIniPath(profile_name)
if not os.path.isfile(ini_path):
profile_name = 'none'
if profile_name == 'none':
return [], [], []
ini_path = self._getIniPath(profile_name)
return self._parseIni(ini_path)
def _getIniPath(self, profile_name):
filename = profile_name + '.ini'
try:
return path_join_secure(self.proxy_profiles_path, filename)
except ValueError as e:
# security check fails
print(e)
raise BadOption(self.NO_PROXY_PROFILE_MSG)
def _parseIni(self, ini_path):
parser = ConfigParser.ConfigParser(allow_no_value=True)
if not parser.read(ini_path):
raise BadOption(self.NO_PROXY_PROFILE_MSG)
blacklist = _get_lines(parser, 'rules', 'blacklist', [])
whitelist = _get_lines(parser, 'rules', 'whitelist', [])
try:
proxy = dict(parser.items('proxy'))
except ConfigParser.NoSectionError:
raise BadOption("Invalid proxy profile: no [proxy] section found")
try:
host = proxy['host']
except KeyError:
raise BadOption("Invalid proxy profile: [proxy] host is not found")
try:
port = int(proxy['port'])
except KeyError:
raise BadOption("Invalid proxy profile: [proxy] port is not found")
except ValueError:
raise BadOption("Invalid proxy profile: [proxy] port is incorrect")
if 'type' in proxy:
validate_proxy_type(proxy['type'])
proxy_list = [(host, port,
proxy.get('username'), proxy.get('password'),
proxy.get('type'))]
return blacklist, whitelist, proxy_list
class DirectSplashProxyFactory(object):
"""
This proxy factory will set the proxy passed to a render request
using a parameter.
If GET parameter is a fully qualified URL, use the specified proxy.
The syntax to specify the proxy is:
[protocol://][user:password@]proxyhost[:port])
Where protocol is either ``http`` or ``socks5``. If port is not specified,
it's assumed to be 1080.
"""
def __init__(self, proxy):
url = urlparse.urlparse(proxy)
if url.scheme and url.scheme in ('http', 'socks5') and url.hostname:
self.proxy = create_proxy(
url.hostname,
url.port or 1080,
username=url.username,
password=url.password,
type=url.scheme.upper()
)
else:
raise BadOption('Invalid proxy URL format.')
def queryProxy(self, *args, **kwargs):
return [self.proxy]
def getFactory(ini_path, parameter):
"""
Returns the appropriate factory depending on the value of
ini_path and parameter
"""
if parameter and re.match('^\w+://', parameter):
return DirectSplashProxyFactory(parameter)
else:
if ini_path:
return ProfilesSplashProxyFactory(ini_path, parameter)
else:
return None
def _get_lines(config_parser, section, option, default):
try:
lines = config_parser.get(section, option).splitlines()
return [line for line in lines if line]
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
return default
|
|
# Copyright (C) 2010 - Jens Nyman ([email protected])
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
from gi.repository import Gtk, GObject, Gedit, PeasGtk, Gio
import re
import traceback
class IntelligentTextCompletionPlugin(GObject.Object, Gedit.WindowActivatable, PeasGtk.Configurable):
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
self._instances = {}
def do_create_configure_widget(self):
return IntelligentTextCompletionOptions.get_instance().create_configure_dialog()
def _connect_view(self, view, window):
"""Connect to view's editing signals."""
callback = self._on_view_key_press_event
id = view.connect("key-press-event", callback, window)
view.intelligent_text_completion_id = id
def _on_window_tab_added(self, window, tab):
"""Connect to signals of the document and view in tab."""
view = tab.get_view()
handler_id = getattr(view, 'intelligent_text_completion_id', None)
if handler_id is None:
self._connect_view(view, window)
def _on_window_tab_removed(self, window, tab):
pass
def do_activate(self):
"""Activate plugin."""
window = self.window
callback = self._on_window_tab_added
id_1 = window.connect("tab-added", callback)
callback = self._on_window_tab_removed
id_2 = window.connect("tab-removed", callback)
window.intelligent_text_completion_id = (id_1, id_2)
views = window.get_views()
for view in views:
self._connect_view(view, window)
def do_deactivate(self):
"""Deactivate plugin."""
window = self.window
widgets = [window]
widgets.extend(window.get_views())
widgets.extend(window.get_documents())
for widget in widgets:
for handler_id in getattr(widget, 'intelligent_text_completion_id', []):
widget.disconnect(handler_id)
widget.intelligent_text_completion_id = None
def _on_view_key_press_event(self, view, event, window):
doc = window.get_active_document()
try:
return self._handle_event(view, event, window)
except:
err = "Exception\n"
err += traceback.format_exc()
doc.set_text(err)
############ plugin core functions ############
def _handle_event(self, view, event, window):
"""Key press event"""
### get vars ###
# constants
ignore_whitespace = '\t '
# get document
doc = window.get_active_document()
# get cursor
cursor = doc.get_iter_at_mark(doc.get_insert())
# get typed string
typed_string = event.string
# get previous char
prev_char = None
if not cursor.get_line_offset() == 0:
prev_char_pos = cursor.copy()
prev_char_pos.set_line_offset(cursor.get_line_offset() - 1)
prev_char = doc.get_text(prev_char_pos, cursor, False)
# get next char
next_char = None
if not cursor.ends_line():
next_char_pos = cursor.copy()
next_char_pos.set_line_offset(cursor.get_line_offset() + 1)
next_char = doc.get_text(cursor, next_char_pos, False)
# get line before cursor
line_start = cursor.copy()
line_start.set_line_offset(0)
preceding_line = doc.get_text(line_start, cursor, False)
# get line after cursor
line_end = cursor.copy()
if not cursor.ends_line():
line_end.forward_to_line_end()
line_after = doc.get_text(cursor, line_end, False)
# get whitespace in front of line
whitespace_pos = 0
whitespace = ""
while len(preceding_line) > whitespace_pos and preceding_line[whitespace_pos] in ignore_whitespace:
whitespace += preceding_line[whitespace_pos]
whitespace_pos += 1
# get options
options = IntelligentTextCompletionOptions.get_instance()
# Do not complete text after pasting text.
if len(typed_string) > 1:
return False
typed_char = typed_string
# GLOBALS
open_close = {
'"': '"',
"'": "'",
'(': ')',
'{': '}',
'[': ']',
}
################### selected text ###################
bounds = doc.get_selection_bounds()
if len(bounds) > 0:
# auto-close brackets and quotes
if options.closeBracketsAndQuotes:
for open, close in open_close.items():
if typed_char == open:
# get bounds data
off1 = bounds[0].get_offset()
off2 = bounds[1].get_offset()
# add open char
doc.place_cursor(bounds[0])
doc.insert_at_cursor(open)
# refresh cursor and move it
cursor = doc.get_iter_at_mark(doc.get_insert())
cursor.set_offset(cursor.get_offset() + (off2 - off1))
doc.place_cursor(cursor)
# add close char
doc.insert_at_cursor(close)
return True
return False
################### auto-close brackets and quotes ###################
if options.closeBracketsAndQuotes and prev_char != '\\':
""" detect python comments """
if typed_char == '"' and re.search('^[^"]*""$', preceding_line) and cursor.ends_line():
return self._insert_at_cursor(typed_char + ' ', ' """')
for check_char, add_char in open_close.items():
# if character user is adding is the same as the one that
# is auto-generated, remove the auto generated char
if typed_char == add_char:
if not cursor.ends_line():
if next_char == add_char:
if check_char != add_char:
# don't remove ) when it's probably not auto-generated
preceding_check_chars = len(re.findall('\%s' % check_char, preceding_line))
preceding_add_chars = len(re.findall('\%s' % add_char, preceding_line))
following_check_chars = len(re.findall('\%s' % check_char, line_after))
following_add_chars = len(re.findall('\%s' % add_char, line_after))
if preceding_check_chars - preceding_add_chars > following_add_chars:
continue
# don't remove ) when the line becomes complex
if following_check_chars > 0:
continue
doc.delete(cursor, next_char_pos)
return False
# typed_char equals char we're looking for
if typed_char == check_char:
# check for unlogical adding
if check_char == add_char:
# uneven number of check_char's in front
if len(re.findall(check_char, preceding_line)) % 2 == 1:
continue
# uneven number of check_char's in back
if len(re.findall(check_char, line_after)) % 2 == 1:
continue
# don't add add_char if it is used around text
non_text_left = ' \t\n\r,=+*:;.?!$&@%~<(){}[]-"\''
non_text_right = ' \t\n\r,=+*:;.?&@%~>)}]'
if not next_char and not check_char == "'":
# if we're just typing with nothing on the right,
# adding is OK as long as it isn't a "'".
pass
elif (not prev_char or prev_char in non_text_left) and (not next_char or next_char in non_text_right):
# this char is surrounded by nothing or non-text, therefore, we can add autotext
pass
elif check_char != add_char and (not next_char or next_char in non_text_right):
# this opening char has non-text on the right, therefore, we can add autotext
pass
else:
continue
# insert add_char
return self._insert_at_cursor(typed_char, add_char)
# check backspace
if event.keyval == 65288: # backspace
if prev_char == check_char and next_char == add_char:
doc.delete(cursor, next_char_pos)
################### auto-complete XML tags ###################
if options.completeXML:
if prev_char == "<" and typed_char == "/":
start = doc.get_start_iter()
preceding_document = doc.get_text(start, cursor, False)
# analyse previous XML code
closing_tag = get_closing_xml_tag(preceding_document)
# insert code
if closing_tag:
return self._insert_at_cursor(typed_char + closing_tag + ">")
else:
return False # do nothing
################### auto-complete django tags ###################
if options.completeXML: # TODO: make separate setting for this
if typed_char == "{":
# The normal opening and closing paradigm does not autocomplete
# for instance <a href="{{ url }}"> becase {{ url }} is inside
# of a sequence preventing autoclosing of brackets.
# We fix that here...
if next_char in open_close.values():
# The next character has prevented a proper closing } from
# being inserted
return self._insert_at_cursor(typed_char, "}")
if prev_char == "{" and typed_char == "%":
# insert code
self._insert_at_cursor("% %")
# refresh cursor and move it to the middle
cursor = doc.get_iter_at_mark(doc.get_insert())
cursor.set_offset(cursor.get_offset() - 2)
doc.place_cursor(cursor)
return True
################### detect lists ###################
if options.detectLists:
if event.keyval == 65293: # return
# constants
list_bullets = ['* ', '- ', '$ ', '> ', '+ ', '~ ']
# cycle through all bullets
for bullet in list_bullets:
if len(preceding_line) >= whitespace_pos + len(bullet):
if preceding_line[whitespace_pos:whitespace_pos + len(bullet)] == bullet:
# endlist function by double enter
if preceding_line == whitespace + bullet and bullet != '* ':
start = cursor.copy()
start.set_line_offset(len(whitespace))
doc.delete(start, cursor)
return True
return self._insert_at_cursor(typed_char + whitespace + bullet)
################### detect java-like comment ###################
if event.keyval == 65293: # return
# constants
comments = {
'/**' : (' * ', ' */'),
'/*' : (' * ', ' */'),
}
# cycle through all types of comment
for comment_start, (comment_middle, comment_end) in comments.items():
if preceding_line[whitespace_pos:] == comment_start:
add_middle = typed_char + whitespace + comment_middle
add_end = typed_char + whitespace + comment_end
return self._insert_at_cursor(add_middle, add_end)
################### auto-indent after function/list ###################
if options.autoindentAfterFunctionOrList:
if event.keyval == 65293: # return
indent_triggers = {
'(': ')',
'{': '}',
'[': ']',
':': '',
}
for indent_trigger, ending_char in indent_triggers.items():
if prev_char == indent_trigger:
if line_after:
# text between begin and ending brackets should come
# in the middle row
if ending_char != '' and ending_char in line_after:
ending_pos = line_after.find(ending_char)
else:
ending_pos = len(line_after)
end = cursor.copy()
end.set_line_offset(end.get_line_offset() + ending_pos)
ending_text = doc.get_text(cursor, end, False).strip()
doc.delete(cursor, end)
add_middle = typed_char + whitespace + get_tab_string(view)
add_end = ending_text + typed_char + whitespace
else:
add_middle = typed_char + whitespace + get_tab_string(view)
add_end = ""
return self._insert_at_cursor(add_middle, add_end)
if typed_char == '}':
if preceding_line and preceding_line.isspace():
whitespace_pos_iter = cursor.copy()
whitespace_pos_iter.set_line_offset(whitespace_pos)
return self._remove_at_cursor(whitespace_pos_iter) and self._insert_at_cursor("}")
def _insert_at_cursor(self, middle, end = ""):
window = self.window
doc = window.get_active_document()
doc.insert_at_cursor(middle + end)
# refresh cursor and move it to the middle
cursor = doc.get_iter_at_mark(doc.get_insert())
cursor.set_offset(cursor.get_offset() - len(end))
doc.place_cursor(cursor)
return True
def _remove_at_cursor(self, pos):
window = self.window
doc = window.get_active_document()
return doc.backspace(pos, False, True)
##### regular functions #####
def get_tab_string(view):
tab_width = view.get_tab_width()
tab_spaces = view.get_insert_spaces_instead_of_tabs()
tab_code = ""
if tab_spaces:
for x in range(tab_width):
tab_code += " "
else:
tab_code = "\t"
return tab_code
def get_closing_xml_tag(document):
tags = re.findall(r'<.*?>', document)
tags.reverse()
closed = []
for tag in tags:
# ignore special tags like <!-- --> and <!doctype ...>
if re.match(r'<!.*?>', tag):
continue
# ignore special tags like <?, <?=, <?php
if re.match(r'<\?.*?>', tag):
continue
# neutral tag
if re.match(r'<.*?/>', tag):
continue
# closing tag
m = re.match(r'</ *([^ ]*).*?>', tag)
if m:
closed.append(m.group(1))
continue
# opening tag
m = re.match(r'< *([^/][^ ]*).*?>', tag)
if m:
openedtag = m.group(1)
while True:
if len(closed) == 0:
return openedtag
close_tag = closed.pop()
if close_tag.lower() == openedtag.lower():
break
continue
return None
################## OPTIONS DIALOG ##################
class IntelligentTextCompletionOptions(object):
## settings
closeBracketsAndQuotes = True
completeXML = True
detectLists = True
autoindentAfterFunctionOrList = True
## buttons for settings
_closeBracketsAndQuotesButton = None
_completeXMLButton = None
_detectListsButton = None
_autoindentAfterFunctionOrListButton = None
## configuration client
_BASE_KEY = "apps.gedit-3.plugins.intelligent_text_completion"
_settings = None
## static singleton reference
singleton = None
def __init__(self):
# create settings directory if not set yet
#self._settings = Gio.Settings.new(self._BASE_KEY)
#if not self._gconf_client.dir_exists(self._GCONF_SETTINGS_DIR):
# self._gconf_client.add_dir(self._GCONF_SETTINGS_DIR, gconf.CLIENT_PRELOAD_NONE)
# load settings
self.closeBracketsAndQuotes = self._load_setting("closeBracketsAndQuotes")
self.completeXML = self._load_setting("completeXML")
self.detectLists = self._load_setting("detectLists")
self.autoindentAfterFunctionOrList = self._load_setting("autoindentAfterFunctionOrList")
@classmethod
def get_instance(cls):
""" Get singleton instance """
if cls.singleton is None:
cls.singleton = cls()
return cls.singleton
def create_configure_dialog(self):
""" Creates configure dialog using GTK """
# make vertically stacking box
vbox = Gtk.VBox()
vbox.set_border_width(6)
# add warning
box = Gtk.HBox()
label = Gtk.Label("Warning: these options are not yet persistent")
box.pack_start(label, False, False, 6)
vbox.pack_start(box, False, True, 0)
# add checkboxes
self._closeBracketsAndQuotesButton = self._add_setting_checkbox(
vbox=vbox,
current_value=self.closeBracketsAndQuotes,
helptext="Auto-close brackets and quotes",
)
self._completeXMLButton = self._add_setting_checkbox(
vbox=vbox,
current_value=self.completeXML,
helptext="Auto-complete XML tags",
)
self._detectListsButton = self._add_setting_checkbox(
vbox=vbox,
current_value=self.detectLists,
helptext="Detect lists",
)
self._autoindentAfterFunctionOrListButton = self._add_setting_checkbox(
vbox=vbox,
current_value=self.autoindentAfterFunctionOrList,
helptext="Auto-indent after function or list",
)
return vbox
def _add_setting_checkbox(self, vbox, current_value, helptext):
box = Gtk.HBox()
check_button = Gtk.CheckButton(helptext)
check_button.set_active(current_value)
box.pack_start(check_button,False,False,6)
check_button.connect('toggled', self._on_check_button_toggled)
vbox.pack_start(box, False, True, 0)
return check_button
def _on_check_button_toggled(self, *args):
# set class attributes
self.closeBracketsAndQuotes = self._closeBracketsAndQuotesButton.get_active()
self.completeXML = self._completeXMLButton.get_active()
self.detectLists = self._detectListsButton.get_active()
self.autoindentAfterFunctionOrList = self._autoindentAfterFunctionOrListButton.get_active()
# write changes to gconf
self._save_setting("closeBracketsAndQuotes", self.closeBracketsAndQuotes)
self._save_setting("completeXML", self.completeXML)
self._save_setting("detectLists", self.detectLists)
self._save_setting("autoindentAfterFunctionOrList", self.autoindentAfterFunctionOrList)
def _save_setting(self, setting_name, value):
pass
#self._gconf_client.set_bool("{}/{}".format(self._GCONF_SETTINGS_DIR, setting_name), value)
def _load_setting(self, setting_name):
return True
#return self._gconf_client.get_bool("{}/{}".format(self._GCONF_SETTINGS_DIR, setting_name))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Operations that indicate some error in the users graph, e.g. a placeholder
# that's introduced outside of the infeed.
_BLACKLISTED_OPS = set([
"Placeholder",
])
# These operations will currently fail to compile, but we should be able to
# support them eventually via CPU offload or extending our operation set.
_NOT_IMPLEMENTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
def _tpu_system_device_name(job):
"""Returns the device name for the TPU_SYSTEM device of `job`."""
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
def initialize_system(embedding_config=None, job=None):
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, an `EmbeddingLayerConfiguration` proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX)
that contains the TPU devices that will be initialized. If job=None
it is assumed there is only one job in the TensorFlow flock, and an
error will be returned if this assumption does not hold.
Returns:
A serialized `TopologyProto` that describes the TPU system. Note:
the topology must be evaluated using `Session.run` before it can be used.
"""
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_distributed_tpu(embedding_config=config_string)
def shutdown_system(job=None):
"""Shuts down a running a distributed TPU system."""
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
def core(num):
"""Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to `tf.device()`.
"""
return "device:TPU_REPLICATED_CORE:{}".format(num)
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU computation.
The primary role of `TPUReplicateContext` is to mark operators inside a
tpu.replicate() computation with the attribute "_tpu_replicate=XYZ", where XYZ
is a unique name.
We use a `ControlFlowContext` to perform the annotation since it
integrates with Tensorflow constructs like ResourceVariables. For example,
if a `ResourceVariable` is constructed inside a tpu.replicate() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the replicated computation.
"""
def __init__(self, name):
super(TPUReplicateContext, self).__init__()
self._name = name
self._unsupported_ops = []
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = "\n".join([" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
if op.type in _NOT_IMPLEMENTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
if _TPU_REPLICATE_ATTR in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op._set_attr(_TPU_REPLICATE_ATTR,
attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))
# pylint: enable=protected-access
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the TPUReplicateContext to
# be None as the TPUReplicateContext does not get nested nor does the
# grad_state outside the TPUReplicateContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of lists of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
"""
del name
inputs = [[]] if inputs is None else inputs
metadata_kwargs = {}
if device_assignment is not None:
# Turn the Numpy array into a flattened list so we can pass it as an
# operator attribute.
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist(),
"computation_shape":
device_assignment.computation_shape.tolist()
}
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Converts inputs to Tensors.
inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]
# Verifies that all replicas have matching numbers and types of inputs
input_types = [x.dtype for x in inputs[0]]
input_arity = len(input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in inputs[i]]
if types != input_types:
raise ValueError(
"Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
input_types, i, types))
arg_error = tpu_function.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
graph = ops.get_default_graph()
# Fan-in: Builds a TPUReplicatedInput node for each input.
computation_inputs = []
for i in range(0, input_arity):
replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
computation_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
context = TPUReplicateContext(name=graph.unique_name("cluster"))
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
# The EncapsulateTPUComputations rewrite needs to identify the
# replicated arguments inside each computation. Adds identity operators
# tagged with an attribute _tpu_replicated_input to identify the
# replicated inputs.
# pylint: disable=protected-access
with graph._attr_scope({"_tpu_replicated_input":
attr_value_pb2.AttrValue(b=True)}):
computation_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(computation_inputs)]
# pylint: enable=protected-access
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs
if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
output_arity = len(output_tensors)
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
new_output_tensors.append(array_ops.identity(t))
output_tensors = new_output_tensors
finally:
context.report_unsupported_operations()
context.Exit()
# Fan-out: Builds a TPUReplicatedOutput node for each output.
outputs = [tpu_ops.tpu_replicated_output(output_tensors[i], num_replicas,
name="output{}".format(i))
for i in xrange(output_arity)]
with ops.control_dependencies(output_operations):
if output_arity == 0:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
control_flow_ops.no_op(name="shard_%d" % i)
for i in range(num_replicas)
]
else:
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
return [
[array_ops.identity(outputs[out][replica],
name="output_%d_shard_%d" % (out, replica))
for out in xrange(output_arity)]
for replica in xrange(num_replicas)
]
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty
list), each of which has a corresponding split axis (from
`input_shard_axes`). Each input is split into `num_shards` pieces
along the corresponding axis, and computation is applied to each
shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty
list). Each input tensor has a corresponding shard axes, given
by `input_shard_axes`, which must have size divisible by
`num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
outputs = replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return results
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty
list). Each input is split into `num_shards` pieces along the 0-th
dimension, and computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty
list). The 0-th dimension of each Tensor must have size
divisible by `num_shards`.
num_shards: The number of shards.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If `num_shards <= 0`
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
def rewrite(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: A Python function that builds a computation to apply
to the input. If the function takes n inputs, 'inputs' should be
a list of n tensors. If the function returns m outputs, rewrite
will return a list of m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
"""
if inputs is not None and not isinstance(inputs, (list, tuple)):
raise TypeError("tpu.rewrite() inputs must be a list or tuple")
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[0]
# pylint: enable=indexing-exception
|
|
# -*- coding: utf-8 -*-
#
# VirusTotalApi makes calls to the VirusTotal API.
#
from threat_intel.util.api_cache import ApiCache
from threat_intel.util.http import MultiRequest
class VirusTotalApi(object):
BASE_DOMAIN = u'https://www.virustotal.com/vtapi/v2/'
def __init__(self, api_key, resources_per_req=25, cache_file_name=None, update_cache=True):
"""Establishes basic HTTP params and loads a cache.
Args:
api_key: VirusTotal API key
resources_per_req: Maximum number of resources (hashes, URLs)
to be send in a single request
cache_file_name: String file name of cache.
update_cache: Determines whether cache should be written out back to the disk when closing it.
Default is `True`.
"""
self._api_key = api_key
self._resources_per_req = resources_per_req
self._requests = MultiRequest()
# Create an ApiCache if instructed to
self._cache = ApiCache(cache_file_name, update_cache) if cache_file_name else None
@MultiRequest.error_handling
def get_file_reports(self, resources):
"""Retrieves the most recent reports for a set of md5, sha1, and/or sha2 hashes.
Args:
resources: list of string hashes.
Returns:
A dict with the hash as key and the VT report as value.
"""
api_name = 'virustotal-file-reports'
all_responses, resources = self._bulk_cache_lookup(api_name, resources)
resource_chunks = self._prepare_resource_chunks(resources)
response_chunks = self._request_reports("resource", resource_chunks, 'file/report')
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
def _extract_all_responses(self, resources, api_endpoint, api_name):
""" Aux function to extract all the API endpoint responses.
Args:
resources: list of string hashes.
api_endpoint: endpoint path
api_name: endpoint name
Returns:
A dict with the hash as key and the VT report as value.
"""
all_responses, resources = self._bulk_cache_lookup(api_name, resources)
resource_chunks = self._prepare_resource_chunks(resources)
response_chunks = self._request_reports("resource", resource_chunks, api_endpoint)
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
@MultiRequest.error_handling
def get_file_behaviour(self, resources):
"""Retrieves a report about the behaviour of a md5, sha1, and/or sha2 hash of
a file when executed in a sandboxed environment (Cuckoo sandbox).
Args:
resources: list of string hashes.
"""
api_name = 'virustotal-file-behaviour'
api_endpoint = 'file/behaviour'
return self._extract_all_responses(resources, api_endpoint, api_name)
@MultiRequest.error_handling
def get_file_download(self, resources):
"""Retrieves a file from its a md5, sha1, and/or sha2 hash.
Args:
resources: list of string hashes.
Returns:
a file download
"""
api_name = 'virustotal-file-download'
api_endpoint = 'file/download'
return self._extract_all_responses(resources, api_endpoint, api_name)
@MultiRequest.error_handling
def get_file_network_traffic(self, resources):
"""Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of
file, when it is executed.
Args:
resources: list of string hashes.
"""
api_name = 'virustotal-file-network-traffic'
api_endpoint = 'file/network-traffic'
return self._extract_all_responses(resources, api_endpoint, api_name)
@MultiRequest.error_handling
def get_domain_reports(self, domains):
"""Retrieves the most recent VT info for a set of domains.
Args:
domains: list of string domains.
Returns:
A dict with the domain as key and the VT report as value.
"""
api_name = 'virustotal-domain-reports'
(all_responses, domains) = self._bulk_cache_lookup(api_name, domains)
responses = self._request_reports("domain", domains, 'domain/report')
for domain, response in zip(domains, responses):
if self._cache:
self._cache.cache_value(api_name, domain, response)
all_responses[domain] = response
return all_responses
@MultiRequest.error_handling
def get_url_distribution(self, params=None):
"""Retrieves a live feed with the latest URLs submitted to VT.
Args:
resources: a dictionary with name and value for optional arguments
Returns:
A dict with the VT report.
"""
params = params or {}
all_responses = {}
api_name = 'virustotal-url-distribution'
response_chunks = self._request_reports(params.keys(), params.values(), 'url/distribution')
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
@MultiRequest.error_handling
def get_file_distribution(self, params=None):
"""Retrieves a live feed with the latest hashes submitted to VT.
Args:
params: a dictionary with name and values for optional arguments,
such as: before (timestampe), after (timestamp), reports (boolean),
limit (retrieve limit file items).
Example: 'reports': 'true'
Returns:
A dict with the VT report.
"""
params = params or []
all_responses = {}
api_name = 'virustotal-file-distribution'
response_chunks = self._request_reports(params.keys(), params.value(), 'file/distribution')
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
@MultiRequest.error_handling
def get_url_reports(self, resources):
"""Retrieves a scan report on a given URL.
Args:
resources: list of URLs.
Returns:
A dict with the URL as key and the VT report as value.
"""
api_name = 'virustotal-url-reports'
(all_responses, resources) = self._bulk_cache_lookup(api_name, resources)
resource_chunks = self._prepare_resource_chunks(resources, '\n')
response_chunks = self._request_reports("resource", resource_chunks, 'url/report')
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
@MultiRequest.error_handling
def get_ip_reports(self, ips):
"""Retrieves the most recent VT info for a set of ips.
Args:
ips: list of IPs.
Returns:
A dict with the IP as key and the VT report as value.
"""
api_name = 'virustotal-ip-address-reports'
(all_responses, ips) = self._bulk_cache_lookup(api_name, ips)
responses = self._request_reports("ip", ips, 'ip-address/report')
for ip, response in zip(ips, responses):
if self._cache:
self._cache.cache_value(api_name, ip, response)
all_responses[ip] = response
return all_responses
@MultiRequest.error_handling
def get_file_search(self, query):
"""Performs advanced search on samples, matching certain binary/
metadata/detection criteria.
Possible queries: file size, file type, first or last submission to
VT, number of positives, bynary content, etc.
Args:
query: dictionary with search arguments
Example: 'query': 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"'
Returns:
A dict with the VT report.
"""
api_name = 'virustotal-file-search'
(all_responses, query) = self._bulk_cache_lookup(api_name, query)
response_chunks = self._request_reports("query", query, 'file/search')
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
@MultiRequest.error_handling
def get_file_clusters(self, date):
"""Retrieves file similarity clusters for a given time frame.
Args:
date: the specific date for which we want the clustering details.
Example: 'date': '2013-09-10'
Returns:
A dict with the VT report.
"""
api_name = 'virustotal-file-clusters'
(all_responses, resources) = self._bulk_cache_lookup(api_name, date)
response = self._request_reports("date", date, 'file/clusters')
self._extract_response_chunks(all_responses, response, api_name)
return all_responses
def _bulk_cache_lookup(self, api_name, keys):
"""Performes a bulk cache lookup and returns a tuple with the results
found and the keys missing in the cache. If cached is not configured
it will return an empty dictionary of found results and the initial
list of keys.
Args:
api_name: a string name of the API.
keys: an enumerable of string keys.
Returns:
A tuple: (responses found, missing keys).
"""
if self._cache:
responses = self._cache.bulk_lookup(api_name, keys)
missing_keys = [key for key in keys if key not in responses.keys()]
return (responses, missing_keys)
return ({}, keys)
def _prepare_resource_chunks(self, resources, resource_delim=','):
"""As in some VirusTotal API methods the call can be made for multiple
resources at once this method prepares a list of concatenated resources
according to the maximum number of resources per requests.
Args:
resources: a list of the resources.
resource_delim: a string used to separate the resources.
Default value is a comma.
Returns:
A list of the concatenated resources.
"""
return [self._prepare_resource_chunk(resources, resource_delim, pos)
for pos in xrange(0, len(resources), self._resources_per_req)]
def _prepare_resource_chunk(self, resources, resource_delim, pos):
return resource_delim.join(
resources[pos:pos + self._resources_per_req])
def _request_reports(self, resource_param_name, resources, endpoint_name):
"""Sends multiples requests for the resources to a particular endpoint.
Args:
resource_param_name: a string name of the resource parameter.
resources: list of of the resources.
endpoint_name: VirusTotal endpoint URL suffix.
Returns:
A list of the responses.
"""
params = [{resource_param_name: resource, 'apikey': self._api_key} for resource in resources]
return self._requests.multi_get(self.BASE_DOMAIN + endpoint_name, query_params=params)
def _extract_response_chunks(self, all_responses, response_chunks, api_name):
"""Extracts and caches the responses from the response chunks in case
of the responses for the requests containing multiple concatenated
resources. Extracted responses are added to the already cached
responses passed in the all_responses parameter.
Args:
all_responses: a list containing already cached responses.
response_chunks: a list with response chunks.
api_name: a string name of the API.
"""
for response_chunk in response_chunks:
if not isinstance(response_chunk, list):
response_chunk = [response_chunk]
for response in response_chunk:
if not response:
continue
if self._cache:
self._cache.cache_value(api_name, response['resource'], response)
all_responses[response['resource']] = response
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import pkg_resources
import sys
import rpm
import tarfile
from anvil import colorizer
from anvil import exceptions as excp
from anvil import log as logging
from anvil.packaging import base
from anvil.packaging.helpers import pip_helper
from anvil.packaging.helpers import yum_helper
from anvil import settings
from anvil import shell as sh
from anvil import utils
LOG = logging.getLogger(__name__)
# Certain versions of pbr seem to miss these files, which causes the rpmbuild
# phases to not complete correctly. Ensure that we don't miss them.
ENSURE_NOT_MISSING = [
'doc', # Without this one our rpm doc build won't work
'README.rst', # Without this one pbr won't work (thus killing setup.py)
'babel.cfg',
'HACKING',
'AUTHORS',
'ChangeLog',
'CONTRIBUTING.rst',
'LICENSE',
]
class YumInstallHelper(base.InstallHelper):
def pre_install(self, pkg, params=None):
"""pre-install is handled in openstack-deps %pre script.
"""
pass
def post_install(self, pkg, params=None):
"""post-install is handled in openstack-deps %post script.
"""
pass
class YumDependencyHandler(base.DependencyHandler):
OPENSTACK_EPOCH = 2
SPEC_TEMPLATE_DIR = "packaging/specs"
API_NAMES = {
"nova": "Compute",
"glance": "Image",
"keystone": "Identity",
"cinder": "Volume",
"quantum": "Networking",
}
SERVER_NAMES = ["nova", "glance", "keystone", "quantum", "cinder"]
TRANSLATION_NAMES = {
'horizon': "python-django-horizon",
}
REPO_FN = "anvil.repo"
YUM_REPO_DIR = "/etc/yum.repos.d/"
BANNED_PACKAGES = [
'distribute',
'setuptools',
]
SRC_REPOS = {
'anvil': 'anvil-source',
"anvil-deps": "anvil-deps-source",
}
REPOS = ["anvil-deps", "anvil"]
py2rpm_executable = sh.which("py2rpm", ["tools/"])
rpmbuild_executable = sh.which("rpmbuild")
jobs = 2
def __init__(self, distro, root_dir, instances, opts=None):
super(YumDependencyHandler, self).__init__(distro, root_dir, instances, opts)
self.rpmbuild_dir = sh.joinpths(self.deps_dir, "rpmbuild")
self.deps_repo_dir = sh.joinpths(self.deps_dir, "openstack-deps")
self.deps_src_repo_dir = sh.joinpths(self.deps_dir, "openstack-deps-sources")
self.anvil_repo_filename = sh.joinpths(self.deps_dir, self.REPO_FN)
self.helper = yum_helper.Helper()
self.rpm_sources_dir = sh.joinpths(self.rpmbuild_dir, "SOURCES")
self.anvil_repo_dir = sh.joinpths(self.root_dir, "repo")
self._no_remove = None
def py2rpm_start_cmdline(self):
cmdline = [
self.py2rpm_executable,
"--rpm-base",
self.rpmbuild_dir,
]
if self.python_names:
cmdline += [
"--epoch-map",
] + ["%s==%s" % (name, self.OPENSTACK_EPOCH)
for name in self.python_names]
package_map = self.distro._dependency_handler.get("package_map", {})
if package_map:
cmdline += [
"--package-map",
] + ["%s==%s" % (key, value)
for key, value in package_map.iteritems()]
arch_dependent = self.distro._dependency_handler.get(
"arch_dependent", [])
if arch_dependent:
cmdline += [
"--arch-dependent",
] + arch_dependent
return cmdline
def _package_parameters(self, instance):
params = {}
params["release"] = instance.get_option("release", default_value="1")
if '-' in params["release"]:
# NOTE(imelnikov): "-" is prohibited in RPM releases
raise ValueError("Malformed package release: %r" % params["release"])
version_suffix = instance.get_option("version_suffix", default_value="")
if version_suffix and not version_suffix.startswith('.'):
version_suffix = '.' + version_suffix
params['version_suffix'] = version_suffix
return params
def _create_rpmbuild_subdirs(self):
for dirname in (sh.joinpths(self.rpmbuild_dir, "SPECS"),
sh.joinpths(self.rpmbuild_dir, "SOURCES")):
sh.mkdirslist(dirname, tracewriter=self.tracewriter)
def package_instance(self, instance):
with sh.remove_before_after(self.rpmbuild_dir):
self._create_rpmbuild_subdirs()
if instance.name in ["general"]:
self._build_dependencies()
self._move_srpms("anvil-deps")
else:
# Meta packages don't get built.
app_dir = instance.get_option("app_dir")
if sh.isdir(app_dir):
self._build_openstack_package(instance)
self._move_srpms("anvil")
@staticmethod
def _move_files(source_dir, target_dir):
if not sh.isdir(source_dir):
return
for filename in sh.listdir(source_dir, recursive=True, files_only=True):
sh.move(filename, target_dir, force=True)
def build_binary(self):
def _is_src_rpm(filename):
return filename.endswith('.src.rpm')
LOG.info("Installing build requirements")
self.helper.transaction(
install_pkgs=self.requirements["build-requires"],
tracewriter=self.tracewriter)
for repo_name in self.REPOS:
repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
sh.mkdirslist(repo_dir, tracewriter=self.tracewriter)
src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name])
if sh.isdir(src_repo_dir):
src_repo_files = sh.listdir(src_repo_dir, files_only=True)
src_repo_files = sorted([f for f in src_repo_files if _is_src_rpm(f)])
else:
src_repo_files = []
if not src_repo_files:
continue
LOG.info('Building %s RPM packages from their SRPMs for repo %s using %s jobs',
len(src_repo_files), self.SRC_REPOS[repo_name], self.jobs)
makefile_name = sh.joinpths(self.deps_dir, "binary-%s.mk" % repo_name)
marks_dir = sh.joinpths(self.deps_dir, "marks-binary")
sh.mkdirslist(marks_dir, tracewriter=self.tracewriter)
(_fn, content) = utils.load_template("packaging/makefiles", "binary.mk")
rpmbuild_flags = ("--rebuild --define '_topdir %s'" % self.rpmbuild_dir)
if self.opts.get("usr_only", False):
rpmbuild_flags += "--define 'usr_only 1'"
params = {
"SRC_REPO_DIR": src_repo_dir,
"RPMBUILD_FLAGS": rpmbuild_flags,
"LOGS_DIR": self.log_dir,
}
sh.write_file(makefile_name,
utils.expand_template(content, params),
tracewriter=self.tracewriter)
with sh.remove_before_after(self.rpmbuild_dir):
self._create_rpmbuild_subdirs()
self._execute_make(makefile_name, marks_dir)
self._move_files(sh.joinpths(self.rpmbuild_dir, "RPMS"),
repo_dir)
self._create_repo(repo_name)
def _execute_make(self, filename, marks_dir):
sh.execute(
["make", "-f", filename, "-j", str(self.jobs)],
cwd=marks_dir,
stdout_fh=sys.stdout, stderr_fh=sys.stderr)
def _move_srpms(self, repo_name):
src_repo_name = self.SRC_REPOS[repo_name]
src_repo_dir = sh.joinpths(self.anvil_repo_dir, src_repo_name)
sh.mkdirslist(src_repo_dir, tracewriter=self.tracewriter)
self._move_files(sh.joinpths(self.rpmbuild_dir, "SRPMS"), src_repo_dir)
def _create_repo(self, repo_name):
repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name])
for a_dir in (repo_dir, src_repo_dir):
if not sh.isdir(a_dir):
sh.mkdirslist(a_dir, tracewriter=self.tracewriter)
cmdline = ["createrepo", a_dir]
LOG.info("Creating repo at %s", a_dir)
sh.execute(cmdline)
repo_filename = sh.joinpths(self.anvil_repo_dir, "%s.repo" % repo_name)
LOG.info("Writing %s", repo_filename)
(_fn, content) = utils.load_template("packaging", "common.repo")
params = {
"repo_name": repo_name,
"baseurl_bin": "file://%s" % repo_dir,
"baseurl_src": "file://%s" % src_repo_dir,
}
sh.write_file(repo_filename, utils.expand_template(content, params),
tracewriter=self.tracewriter)
# Install *.repo file so that anvil deps will be available
# when building OpenStack
system_repo_filename = sh.joinpths(self.YUM_REPO_DIR, "%s.repo" % repo_name)
sh.copy(repo_filename, system_repo_filename)
LOG.info("Copying to %s", system_repo_filename)
self.tracewriter.file_touched(system_repo_filename)
def _get_yum_available(self):
yum_map = collections.defaultdict(list)
for pkg in self.helper.get_available():
for provides in pkg['provides']:
yum_map[provides[0]].append((pkg['version'], pkg['repo']))
return dict(yum_map)
@staticmethod
def _find_yum_match(yum_map, req, rpm_name):
yum_versions = yum_map.get(rpm_name, [])
for (version, repo) in yum_versions:
if version in req:
return (version, repo)
return (None, None)
def filter_download_requires(self):
yum_map = self._get_yum_available()
pip_origins = {}
for line in self.pips_to_install:
req = pip_helper.extract_requirement(line)
pip_origins[req.key] = line
pips_to_download = []
req_to_install = [pip_helper.extract_requirement(line)
for line in self.pips_to_install]
requested_names = [req.key for req in req_to_install]
rpm_to_install = self._convert_names_python2rpm(requested_names)
satisfied_list = []
for (req, rpm_name) in zip(req_to_install, rpm_to_install):
(version, repo) = self._find_yum_match(yum_map, req, rpm_name)
if not repo:
# We need the source requirement incase its a url.
pips_to_download.append(pip_origins[req.key])
else:
satisfied_list.append((req, rpm_name, version, repo))
if satisfied_list:
# Organize by repo
repos = collections.defaultdict(list)
for (req, rpm_name, version, repo) in satisfied_list:
repos[repo].append("%s as %s-%s" % (req, rpm_name, version))
for r in sorted(repos.keys()):
header = ("%s Python packages are already available "
"as RPMs from repository %s")
header = header % (len(repos[r]), colorizer.quote(r))
utils.log_iterable(sorted(repos[r]), logger=LOG, header=header)
return pips_to_download
def _build_dependencies(self):
(pips_downloaded, package_files) = self.download_dependencies()
# Analyze what was downloaded and eject things that were downloaded
# by pip as a dependency of a download but which we do not want to
# build or can satisfy by other means
no_pips = [pkg_resources.Requirement.parse(name).key
for name in self.python_names]
no_pips.extend(self.BANNED_PACKAGES)
yum_map = self._get_yum_available()
pips_keys = set([p.key for p in pips_downloaded])
def _filter_package_files(package_files):
package_reqs = []
package_keys = []
for filename in package_files:
package_details = pip_helper.get_archive_details(filename)
package_reqs.append(package_details['req'])
package_keys.append(package_details['req'].key)
package_rpm_names = self._convert_names_python2rpm(package_keys)
filtered_files = []
for (filename, req, rpm_name) in zip(package_files, package_reqs,
package_rpm_names):
if req.key in no_pips:
LOG.info(("Dependency %s was downloaded additionally "
"but it is disallowed."), colorizer.quote(req))
continue
if req.key in pips_keys:
filtered_files.append(filename)
continue
# See if pip tried to download it but we already can satisfy
# it via yum and avoid building it in the first place...
(_version, repo) = self._find_yum_match(yum_map, req, rpm_name)
if not repo:
filtered_files.append(filename)
else:
LOG.info(("Dependency %s was downloaded additionally "
"but it can be satisfied by %s from repository "
"%s instead."), colorizer.quote(req),
colorizer.quote(rpm_name),
colorizer.quote(repo))
return filtered_files
LOG.info("Filtering %s downloaded files.", len(package_files))
filtered_package_files = _filter_package_files(package_files)
if not filtered_package_files:
LOG.info("No SRPM package dependencies to build.")
return
for filename in package_files:
if filename not in filtered_package_files:
sh.unlink(filename)
package_files = filtered_package_files
makefile_name = sh.joinpths(self.deps_dir, "deps.mk")
marks_dir = sh.joinpths(self.deps_dir, "marks-deps")
sh.mkdirslist(marks_dir, tracewriter=self.tracewriter)
(_fn, content) = utils.load_template("packaging/makefiles", "source.mk")
scripts_dir = sh.abspth(sh.joinpths(settings.TEMPLATE_DIR, "packaging", "scripts"))
py2rpm_options = self.py2rpm_start_cmdline()[1:] + [
"--scripts-dir", scripts_dir,
"--source-only",
"--rpm-base", self.rpmbuild_dir,
]
params = {
"DOWNLOADS_DIR": self.download_dir,
"LOGS_DIR": self.log_dir,
"PY2RPM": self.py2rpm_executable,
"PY2RPM_FLAGS": " ".join(py2rpm_options),
}
sh.write_file(makefile_name,
utils.expand_template(content, params),
tracewriter=self.tracewriter)
LOG.info("Building %s SRPM packages using %s jobs", len(package_files), self.jobs)
self._execute_make(makefile_name, marks_dir)
def _write_spec_file(self, instance, rpm_name, template_name, params):
requires_what = params.get('requires')
if not requires_what:
requires_what = []
requires_python = []
try:
requires_python.extend(instance.egg_info['dependencies'])
except AttributeError:
pass
if requires_python:
requires_what.extend(self._convert_names_python2rpm(requires_python))
params['requires'] = requires_what
params["epoch"] = self.OPENSTACK_EPOCH
content = utils.load_template(self.SPEC_TEMPLATE_DIR, template_name)[1]
spec_filename = sh.joinpths(self.rpmbuild_dir, "SPECS", "%s.spec" % rpm_name)
sh.write_file(spec_filename, utils.expand_template(content, params),
tracewriter=self.tracewriter)
return spec_filename
def _copy_startup_scripts(self, spec_filename):
common_init_content = utils.load_template("packaging",
"common.init")[1]
for src in rpm.spec(spec_filename).sources:
script = sh.basename(src[0])
if not (script.endswith(".init")):
continue
target_filename = sh.joinpths(self.rpm_sources_dir, script)
if sh.isfile(target_filename):
continue
bin_name = utils.strip_prefix_suffix(script, "openstack-", ".init")
if bin_name == "quantum-server":
daemon_args = ("'--config-file=/etc/quantum/plugin.ini"
" --config-file=/etc/quantum/quantum.conf'")
elif bin_name == "quantum-l3-agent":
daemon_args = ("'--config-file=/etc/quantum/l3_agent.ini"
" --config-file=/etc/quantum/quantum.conf'")
elif bin_name == "quantum-dhcp-agent":
daemon_args = ("'--config-file=/etc/quantum/dhcp_agent.ini"
" --config-file=/etc/quantum/quantum.conf'")
else:
daemon_args = ""
params = {
"bin": bin_name,
"package": bin_name.split("-", 1)[0],
"daemon_args": daemon_args,
}
sh.write_file(target_filename,
utils.expand_template(common_init_content, params))
def _copy_sources(self, instance):
other_sources_dir = sh.joinpths(settings.TEMPLATE_DIR,
"packaging", "sources", instance.name)
if sh.isdir(other_sources_dir):
for filename in sh.listdir(other_sources_dir, files_only=True):
sh.copy(filename, self.rpm_sources_dir)
def _copy_patches(self, patches):
for filename in patches:
sh.copy(filename, self.rpm_sources_dir)
def _build_from_spec(self, instance, spec_filename, patches=None):
pkg_dir = instance.get_option('app_dir')
if sh.isfile(sh.joinpths(pkg_dir, "setup.py")):
self._write_python_tarball(instance, pkg_dir, ENSURE_NOT_MISSING)
else:
self._write_git_tarball(pkg_dir, spec_filename)
self._copy_sources(instance)
if patches:
self._copy_patches(patches)
self._copy_startup_scripts(spec_filename)
cmdline = [
self.rpmbuild_executable,
"-bs",
"--define", "_topdir %s" % self.rpmbuild_dir,
spec_filename,
]
out_filename = sh.joinpths(self.log_dir, "rpmbuild-%s.log" % instance.name)
sh.execute_save_output(cmdline, out_filename=out_filename, quiet=True)
def _write_git_tarball(self, pkg_dir, spec_filename):
cmdline = [
"rpm",
"-q",
"--specfile", spec_filename,
"--qf", "%{NAME}-%{VERSION}\n"
]
tar_base = sh.execute(cmdline, cwd=pkg_dir)[0].splitlines()[0].strip()
# git 1.7.1 from RHEL doesn't understand --format=tar.gz
output_filename = sh.joinpths(self.rpm_sources_dir,
"%s.tar" % tar_base)
cmdline = [
"git",
"archive",
"--format=tar",
"--prefix=%s/" % tar_base,
"--output=%s" % output_filename,
"HEAD",
]
sh.execute(cmdline, cwd=pkg_dir)
sh.gzip(output_filename)
sh.unlink(output_filename)
def _write_python_tarball(self, instance, pkg_dir, ensure_exists=None):
def prefix_exists(text, in_what):
for t in in_what:
if t.startswith(text):
return True
return False
pkg_name = instance.egg_info['name']
version = instance.egg_info['version']
base_name = "%s-%s" % (pkg_name, version)
cmdline = [
sys.executable,
"setup.py",
"sdist",
"--formats=tar",
"--dist-dir", self.rpm_sources_dir,
]
out_filename = sh.joinpths(self.log_dir, "sdist-%s.log" % (instance.name))
sh.execute_save_output(cmdline, cwd=pkg_dir, out_filename=out_filename, quiet=True)
archive_name = sh.joinpths(self.rpm_sources_dir, "%s.tar" % (base_name))
if ensure_exists:
with contextlib.closing(tarfile.open(archive_name, 'r')) as tfh:
tar_entries = [t.path for t in tfh.getmembers()]
missing_paths = {}
for path in ensure_exists:
tar_path = sh.joinpths(base_name, path)
source_path = sh.joinpths(pkg_dir, path)
if not prefix_exists(tar_path, tar_entries) and sh.exists(source_path):
missing_paths[tar_path] = source_path
if missing_paths:
utils.log_iterable(sorted(missing_paths.keys()),
logger=LOG,
header='%s paths were not archived and will now be' % (len(missing_paths)))
with contextlib.closing(tarfile.open(archive_name, 'a')) as tfh:
for (tar_path, source_path) in missing_paths.items():
tfh.add(source_path, tar_path)
sh.gzip(archive_name)
sh.unlink(archive_name)
@staticmethod
def _is_client(instance_name, egg_name):
for i in [instance_name, egg_name]:
if i and i.endswith("client"):
return True
return False
def _get_template_and_rpm_name(self, instance):
rpm_name = None
template_name = None
try:
egg_name = instance.egg_info['name']
if self._is_client(instance.name, egg_name):
rpm_name = egg_name
template_name = "python-commonclient.spec"
elif instance.name in self.SERVER_NAMES:
rpm_name = "openstack-%s" % (egg_name)
else:
rpm_name = self.TRANSLATION_NAMES.get(instance.name)
except AttributeError:
rpm_name = instance.name
template_name = "%s.spec" % rpm_name
return (rpm_name, template_name)
def _build_from_app_dir(self, instance, params):
app_dir = instance.get_option('app_dir')
cmdline = self.py2rpm_start_cmdline()
cmdline.extend(["--source-only"])
if 'release' in params:
cmdline.extend(["--release", params["release"]])
cmdline.extend(["--", app_dir])
out_filename = sh.joinpths(self.log_dir, "py2rpm-build-%s.log" % (instance.name))
sh.execute_save_output(cmdline, cwd=app_dir, out_filename=out_filename,
quiet=True)
def _build_openstack_package(self, instance):
params = self._package_parameters(instance)
patches = instance.list_patches("package")
params['patches'] = [sh.basename(fn) for fn in patches]
(rpm_name, template_name) = self._get_template_and_rpm_name(instance)
try:
egg_name = instance.egg_info['name']
params["version"] = instance.egg_info["version"]
if self._is_client(instance.name, egg_name):
client_name = utils.strip_prefix_suffix(egg_name, "python-", "client")
if not client_name:
msg = "Bad client package name %s" % (egg_name)
raise excp.PackageException(msg)
params["clientname"] = client_name
params["apiname"] = self.API_NAMES.get(client_name,
client_name.title())
except AttributeError:
spec_filename = None
if template_name:
spec_filename = sh.joinpths(settings.TEMPLATE_DIR,
self.SPEC_TEMPLATE_DIR,
template_name)
if not spec_filename or not sh.isfile(spec_filename):
rpm_name = None
if rpm_name:
if not template_name:
template_name = "%s.spec" % rpm_name
spec_filename = self._write_spec_file(instance, rpm_name,
template_name, params)
self._build_from_spec(instance, spec_filename, patches)
else:
self._build_from_app_dir(instance, params)
def _convert_names_python2rpm(self, python_names):
if not python_names:
return []
cmdline = self.py2rpm_start_cmdline() + ["--convert"] + python_names
rpm_names = []
for line in sh.execute(cmdline)[0].splitlines():
# format is "Requires: rpm-name <=> X"
if not line.startswith("Requires:"):
continue
line = line[len("Requires:"):].strip()
positions = [line.find(">"), line.find("<"), line.find("=")]
positions = sorted([p for p in positions if p != -1])
if positions:
line = line[0:positions[0]].strip()
if line and line not in rpm_names:
rpm_names.append(line)
return rpm_names
def _all_rpm_names(self):
# This file should have all the requirements (including test ones)
# that we need to install (and which should have been built as rpms
# in the previous build stages).
gathered_requires = sh.load_file(self.gathered_requires_filename).splitlines()
gathered_requires = [line.strip() for line in gathered_requires if line.strip()]
req_names = []
for line in gathered_requires:
req = pip_helper.extract_requirement(line)
req_names.append(req.key)
rpm_names = set(self._convert_names_python2rpm(req_names))
rpm_names |= self.requirements["requires"]
for inst in self.instances:
rpm_names |= inst.package_names()
return list(rpm_names)
def install(self):
super(YumDependencyHandler, self).install()
self.helper.clean()
# Erase conflicting packages
remove_pkgs = [pkg_name
for pkg_name in self.requirements["conflicts"]
if self.helper.is_installed(pkg_name)]
self.helper.transaction(install_pkgs=self._all_rpm_names(),
remove_pkgs=remove_pkgs,
tracewriter=self.tracewriter)
def uninstall(self):
super(YumDependencyHandler, self).uninstall()
if self.tracereader.exists():
remove_pkgs = self.tracereader.packages_installed()
self.helper.transaction(remove_pkgs=remove_pkgs)
|
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import os
from enum import Enum
from typing import Callable, Set, List
from ipaddress import IPv4Address
import simplejson as json
from pyqryptonight.pyqryptonight import UInt256ToString
from qrl.core import config
from qrl.core.misc import logger, ntp
from qrl.core.misc.expiring_set import ExpiringSet
from qrl.core.notification.Observable import Observable
from qrl.core.notification.ObservableEvent import ObservableEvent
from qrl.core.p2p.IPMetadata import IPMetadata
from qrl.core.p2p.p2pObserver import P2PBaseObserver
from qrl.core.p2p.p2pprotocol import P2PProtocol
from qrl.generated import qrllegacy_pb2, qrl_pb2
class P2PPeerManager(P2PBaseObserver):
class EventType(Enum):
NO_PEERS = 1
def __init__(self):
super().__init__()
self._ping_callLater = None
self._disconnect_callLater = None
self._channels = []
self._peer_node_status = dict()
self._known_peers = set()
self.peers_path = os.path.join(config.user.data_dir,
config.dev.peers_filename)
self.banned_peers_filename = os.path.join(config.user.wallet_dir, config.dev.banned_peers_filename)
self._banned_peer_ips = ExpiringSet(expiration_time=config.user.ban_minutes * 60,
filename=self.banned_peers_filename)
self._observable = Observable(self)
self._p2p_factory = None
def register(self, message_type: EventType, func: Callable):
self._observable.register(message_type, func)
def set_p2p_factory(self, p2p_factory):
self._p2p_factory = p2p_factory
@property
def known_peer_addresses(self):
return self._known_peers
def trusted_peer(self, channel: P2PProtocol):
if self.is_banned(channel.peer):
return False
if channel.valid_message_count < config.dev.trust_min_msgcount:
return False
if channel.connection_time < config.dev.trust_min_conntime:
return False
return True
@property
def trusted_addresses(self):
ip_public_port_set = set()
for peer in self._p2p_factory.connections:
if self.trusted_peer(peer) and peer.public_port != 0:
ip_public_port_set.add(peer.ip_public_port)
return ip_public_port_set
@property
def peer_node_status(self):
return self._peer_node_status
def load_known_peers(self) -> List[str]:
known_peers = []
try:
logger.info('Loading known peers')
with open(self.peers_path, 'r') as infile:
known_peers = json.load(infile)
except Exception as e:
logger.info("Could not open known_peers list")
return [IPMetadata.canonical_full_address(fa) for fa in known_peers]
def save_known_peers(self, known_peers: List[str]):
tmp = list(known_peers)[:3 * config.user.max_peers_limit]
config.create_path(config.user.data_dir)
with open(self.peers_path, 'w') as outfile:
json.dump(tmp, outfile)
def load_peer_addresses(self) -> None:
known_peers = self.load_known_peers()
self._known_peers = self.combine_peer_lists(known_peers, config.user.peer_list, )
logger.info('Loaded known peers: %s', self._known_peers)
self.save_known_peers(self._known_peers)
def extend_known_peers(self, new_peer_addresses: set) -> None:
new_addresses = set(new_peer_addresses) - self._known_peers
if self._p2p_factory is not None:
self._p2p_factory.connect_peer(new_addresses)
self._known_peers |= set(new_peer_addresses)
self.save_known_peers(list(self._known_peers))
@staticmethod
def combine_peer_lists(peer_ips, sender_full_addresses: List, check_global=False) -> Set[IPMetadata]:
tmp_list = list(peer_ips)
tmp_list.extend(sender_full_addresses)
answer = set()
for item in tmp_list:
try:
answer.add(IPMetadata.canonical_full_address(item, check_global))
except: # noqa
logger.warning("Invalid Peer Address {}".format(item))
return answer
def get_better_difficulty(self, current_cumulative_difficulty):
best_cumulative_difficulty = int(UInt256ToString(current_cumulative_difficulty))
local_best = best_cumulative_difficulty
best_channel = None
for channel in self._peer_node_status:
node_chain_state = self._peer_node_status[channel]
node_cumulative_difficulty = int(UInt256ToString(node_chain_state.cumulative_difficulty))
if node_cumulative_difficulty > best_cumulative_difficulty:
best_cumulative_difficulty = node_cumulative_difficulty
best_channel = channel
logger.debug('Local Best Diff : %s', local_best)
logger.debug('Remote Best Diff : %s', best_cumulative_difficulty)
return best_channel
def insert_to_last_connected_peer(self, ip_public_port, connected_peer=False):
known_peers = self.load_known_peers()
connection_set = set()
if self._p2p_factory is not None:
# Prepare set of connected peers
for conn in self._p2p_factory._peer_connections:
connection_set.add(conn.ip_public_port)
# Move the current peer to the last position of connected peers
# or to the start position of disconnected peers
try:
index = 0
if connected_peer:
if ip_public_port in known_peers:
known_peers.remove(ip_public_port)
else:
index = known_peers.index(ip_public_port)
del known_peers[index]
while index < len(known_peers):
if known_peers[index] not in connection_set:
break
index += 1
known_peers.insert(index, ip_public_port)
self.save_known_peers(known_peers)
except ValueError:
pass
def remove_channel(self, channel):
self.insert_to_last_connected_peer(channel.ip_public_port)
if channel in self._channels:
self._channels.remove(channel)
if channel in self._peer_node_status:
del self._peer_node_status[channel]
def new_channel(self, channel):
self._channels.append(channel)
self._peer_node_status[channel] = qrl_pb2.NodeChainState(block_number=0,
header_hash=b'',
cumulative_difficulty=b'\x00' * 32,
timestamp=ntp.getTime())
channel.register(qrllegacy_pb2.LegacyMessage.VE, self.handle_version)
channel.register(qrllegacy_pb2.LegacyMessage.PL, self.handle_peer_list)
channel.register(qrllegacy_pb2.LegacyMessage.CHAINSTATE, self.handle_chain_state)
channel.register(qrllegacy_pb2.LegacyMessage.SYNC, self.handle_sync)
channel.register(qrllegacy_pb2.LegacyMessage.P2P_ACK, self.handle_p2p_acknowledgement)
def _get_version_compatibility(self, version) -> bool:
# Ignore compatibility test on Testnet
if config.dev.hard_fork_heights == config.dev.testnet_hard_fork_heights:
return True
if self._p2p_factory is None:
return True
if self._p2p_factory.chain_height >= config.dev.hard_fork_heights[0]:
try:
major_version = version.split(".")[0]
if int(major_version) < 2:
return False
except Exception:
# Disabled warning as it is not required and could be annoying
# if a peer with dirty version is trying to connect with the node
# logger.warning("Exception while checking version for compatibility")
return True
if self._p2p_factory.chain_height >= config.dev.hard_fork_heights[1]:
try:
major_version = version.split(".")[0]
if int(major_version) < 3:
return False
except Exception:
# Disabled warning as it is not required and could be annoying
# if a peer with dirty version is trying to connect with the node
# logger.warning("Exception while checking version for compatibility")
return True
return True
def handle_version(self, source, message: qrllegacy_pb2.LegacyMessage):
"""
Version
If version is empty, it sends the version & genesis_prev_headerhash.
Otherwise, processes the content of data.
In case of mismatches, it disconnects from the peer
"""
self._validate_message(message, qrllegacy_pb2.LegacyMessage.VE)
if not message.veData.version:
msg = qrllegacy_pb2.LegacyMessage(
func_name=qrllegacy_pb2.LegacyMessage.VE,
veData=qrllegacy_pb2.VEData(version=config.dev.version,
genesis_prev_hash=config.user.genesis_prev_headerhash,
rate_limit=config.user.peer_rate_limit))
source.send(msg)
return
logger.info('%s version: %s | genesis prev_headerhash %s',
source.peer.ip,
message.veData.version,
message.veData.genesis_prev_hash)
if not self._get_version_compatibility(message.veData.version):
logger.warning("Disconnecting from Peer %s running incompatible node version %s",
source.peer.ip,
message.veData.version)
source.loseConnection()
self.ban_channel(source)
return
source.rate_limit = min(config.user.peer_rate_limit, message.veData.rate_limit)
if message.veData.genesis_prev_hash != config.user.genesis_prev_headerhash:
logger.warning('%s genesis_prev_headerhash mismatch', source.peer)
logger.warning('Expected: %s', config.user.genesis_prev_headerhash)
logger.warning('Found: %s', message.veData.genesis_prev_hash)
source.loseConnection()
self.ban_channel(source)
def handle_peer_list(self, source, message: qrllegacy_pb2.LegacyMessage):
P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.PL)
if not config.user.enable_peer_discovery:
return
if not message.plData.peer_ips:
return
# If public port is invalid, ignore rest of the data
if not (0 < message.plData.public_port < 65536):
return
source.set_public_port(message.plData.public_port)
self.insert_to_last_connected_peer(source.ip_public_port, True)
sender_peer = IPMetadata(source.peer.ip, message.plData.public_port)
# Check if peer list contains global ip, if it was sent by peer from a global ip address
new_peers = self.combine_peer_lists(message.plData.peer_ips,
[sender_peer.full_address],
check_global=IPv4Address(source.peer.ip).is_global)
logger.info('%s peers data received: %s', source.peer.ip, new_peers)
if self._p2p_factory is not None:
self._p2p_factory.add_new_peers_to_peer_q(new_peers)
def handle_sync(self, source, message: qrllegacy_pb2.LegacyMessage):
P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.SYNC)
if message.syncData.state == '':
if source.factory.synced:
source.send_sync(synced=True)
@staticmethod
def send_node_chain_state(dest_channel, node_chain_state: qrl_pb2.NodeChainState):
# FIXME: Not sure this belongs to peer management
msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.CHAINSTATE,
chainStateData=node_chain_state)
dest_channel.send(msg)
def monitor_chain_state(self):
# FIXME: Not sure this belongs to peer management
current_timestamp = ntp.getTime()
for channel in self._channels:
if channel not in self._peer_node_status:
channel.loseConnection()
continue
delta = current_timestamp - self._peer_node_status[channel].timestamp
if delta > config.user.chain_state_timeout:
del self._peer_node_status[channel]
logger.debug('>>>> No State Update [%18s] %2.2f (TIMEOUT)', channel.peer, delta)
channel.loseConnection()
def broadcast_chain_state(self, node_chain_state: qrl_pb2.NodeChainState):
# FIXME: Not sure this belongs to peer management
# TODO: Verify/Disconnect problematic channels
# Ping all channels
for channel in self._channels:
self.send_node_chain_state(channel, node_chain_state)
self._observable.notify(ObservableEvent(self.EventType.NO_PEERS))
def handle_chain_state(self, source, message: qrllegacy_pb2.LegacyMessage):
P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.CHAINSTATE)
message.chainStateData.timestamp = ntp.getTime() # Receiving time
try:
UInt256ToString(message.chainStateData.cumulative_difficulty)
except ValueError:
logger.warning('Invalid Cumulative Difficulty sent by peer')
source.loseConnection()
return
self._peer_node_status[source] = message.chainStateData
if not self._get_version_compatibility(message.chainStateData.version):
logger.warning("Disconnecting from Peer %s running incompatible node version %s",
source.peer.ip,
message.veData.version)
source.loseConnection()
return
def handle_p2p_acknowledgement(self, source, message: qrllegacy_pb2.LegacyMessage):
P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.P2P_ACK)
source.bytes_sent -= message.p2pAckData.bytes_processed
if source.bytes_sent < 0:
logger.warning('Disconnecting Peer %s', source.peer)
logger.warning('Reason: negative bytes_sent value')
logger.warning('bytes_sent %s', source.bytes_sent)
logger.warning('Ack bytes_processed %s', message.p2pAckData.bytes_processed)
source.loseConnection()
source.send_next()
####################################################
####################################################
####################################################
####################################################
def is_banned(self, peer: IPMetadata):
return peer.ip in self._banned_peer_ips
def ban_channel(self, channel: P2PProtocol):
self._banned_peer_ips.add(channel.peer.ip)
logger.warning('Banned %s', channel.peer.ip)
channel.loseConnection()
def get_peers_stat(self) -> list:
peers_stat = []
# Copying the list of keys, to avoid any change by other thread
for source in list(self.peer_node_status.keys()):
try:
peer_stat = qrl_pb2.PeerStat(peer_ip=source.peer.ip.encode(),
port=source.peer.port,
node_chain_state=self.peer_node_status[source])
peers_stat.append(peer_stat)
except KeyError:
# Ignore in case the key is deleted by other thread causing KeyError
continue
return peers_stat
|
|
from __future__ import absolute_import
from typing import Any
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext, loader
from django.utils.timezone import now
from django.utils.cache import patch_cache_control
from django.core.exceptions import ValidationError
from django.core import validators
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.forms.models import model_to_dict
from django.core.mail import send_mail
from django.middleware.csrf import get_token
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \
RealmFilter, \
PreregistrationUser, get_client, MitUser, UserActivity, PushDeviceToken, \
get_stream, UserPresence, get_recipient, \
split_email_to_domain, resolve_email_to_domain, email_to_username, get_realm, \
completely_open, get_unique_open_realm, remote_user_to_email, email_allowed_for_realm
from zerver.lib.actions import do_change_password, do_change_full_name, do_change_is_admin, \
do_activate_user, do_create_user, \
internal_send_message, update_user_presence, do_events_register, \
get_status_dict, do_change_enable_offline_email_notifications, \
do_change_enable_digest_emails, do_set_realm_name, do_set_realm_restricted_to_domain, \
do_set_realm_invite_required, do_set_realm_invite_by_admins_only, get_default_subs, \
user_email_is_unique, do_invite_users, do_refer_friend, compute_mit_user_fullname, \
do_set_muted_topics, clear_followup_emails_queue, do_update_pointer, realm_user_count
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.forms import RegistrationForm, HomepageForm, ToSForm, \
CreateUserForm, is_inactive, OurAuthenticationForm
from django.views.decorators.csrf import csrf_exempt
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib import bugdown
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.decorator import require_post, authenticated_json_post_view, \
has_request_variables, authenticated_json_view, to_non_negative_int, \
JsonableError, get_user_profile_by_email, REQ, require_realm_admin, \
zulip_login_required
from zerver.lib.avatar import avatar_url
from zerver.lib.upload import upload_message_image_through_web_client, \
get_signed_upload_url, get_realm_for_filename
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd, generate_random_token
from zproject.backends import password_auth_enabled, dev_auth_enabled
from confirmation.models import Confirmation
import requests
import subprocess
import calendar
import datetime
import ujson
import simplejson
import re
from six.moves import urllib
import base64
import time
import logging
import jwt
import hashlib
import hmac
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
def name_changes_disabled(realm):
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
@require_post
def accounts_register(request):
key = request.POST['key']
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
email = prereg_user.email
mit_beta_user = isinstance(confirmation.content_object, MitUser)
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
validators.validate_email(email)
unique_open_realm = get_unique_open_realm()
if unique_open_realm:
realm = unique_open_realm
domain = realm.domain
elif not mit_beta_user and prereg_user.referred_by:
# If someone invited you, you are joining their realm regardless
# of your e-mail address.
#
# MitUsers can't be referred and don't have a referred_by field.
realm = prereg_user.referred_by.realm
domain = realm.domain
if not email_allowed_for_realm(email, realm):
return render_to_response("zerver/closed_realm.html", {"closed_domain_name": realm.name})
elif not mit_beta_user and prereg_user.realm:
# You have a realm set, even though nobody referred you. This
# happens if you sign up through a special URL for an open
# realm.
domain = prereg_user.realm.domain
realm = get_realm(domain)
else:
domain = resolve_email_to_domain(email)
realm = get_realm(domain)
if realm and realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return render_to_response("zerver/deactivated.html",
{"deactivated_domain_name": realm.name,
"zulip_administrator": settings.ZULIP_ADMINISTRATOR})
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.parse.quote_plus(email))
name_validated = False
full_name = None
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
if domain == "mit.edu":
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""})
name_validated = True
elif settings.POPULATE_PROFILE_VIA_LDAP:
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
try:
request.session['authenticated_full_name'] = ldap_attrs[settings.AUTH_LDAP_USER_ATTR_MAP['full_name']][0]
name_validated = True
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm(
{'full_name': request.session['authenticated_full_name']})
# FIXME: This will result in the user getting
# validation errors if they have to enter a password.
# Not relevant for ONLY_SSO, though.
break
except TypeError:
# Let the user fill out a name and/or try another backend
form = RegistrationForm()
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')}
)
else:
form = RegistrationForm()
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
# back to the full name in the request.
try:
postdata.update({'full_name': request.session['authenticated_full_name']})
name_validated = True
except KeyError:
pass
form = RegistrationForm(postdata)
if not password_auth_enabled(realm):
form['password'].field.required = False
if form.is_valid():
if password_auth_enabled(realm):
password = form.cleaned_data['password']
else:
# SSO users don't need no passwords
password = None
full_name = form.cleaned_data['full_name']
short_name = email_to_username(email)
first_in_realm = len(UserProfile.objects.filter(realm=realm, is_bot=False)) == 0
# FIXME: sanitize email addresses and fullname
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
try:
user_profile = existing_user_profile
do_activate_user(user_profile)
do_change_password(user_profile, password)
do_change_full_name(user_profile, full_name)
except UserProfile.DoesNotExist:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
else:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
# This logs you in using the ZulipDummyBackend, since honestly nothing
# more fancy than this is required.
login(request, authenticate(username=user_profile.email, use_dummy_backend=True))
if first_in_realm:
do_change_is_admin(user_profile, True)
return HttpResponseRedirect(reverse('zerver.views.initial_invite_page'))
else:
return HttpResponseRedirect(reverse('zerver.views.home'))
return render_to_response('zerver/register.html',
{'form': form,
'company_name': domain,
'email': email,
'key': key,
'full_name': request.session.get('authenticated_full_name', None),
'lock_name': name_validated and name_changes_disabled(realm),
# password_auth_enabled is normally set via our context processor,
# but for the registration form, there is no logged in user yet, so
# we have to set it here.
'password_auth_enabled': password_auth_enabled(realm),
},
context_instance=RequestContext(request))
@zulip_login_required
def accounts_accept_terms(request):
email = request.user.email
domain = resolve_email_to_domain(email)
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
full_name = form.cleaned_data['full_name']
send_mail('Terms acceptance for ' + full_name,
loader.render_to_string('zerver/tos_accept_body.txt',
{'name': full_name,
'email': email,
'ip': request.META['REMOTE_ADDR'],
'browser': request.META.get('HTTP_USER_AGENT', "Unspecified")}),
settings.EMAIL_HOST_USER,
["[email protected]"])
do_change_full_name(request.user, full_name)
return redirect(home)
else:
form = ToSForm()
return render_to_response('zerver/accounts_accept_terms.html',
{ 'form': form, 'company_name': domain, 'email': email },
context_instance=RequestContext(request))
from zerver.lib.ccache import make_ccache
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(request, user_profile,
cred=REQ(default=None)):
if cred is None:
return json_error("Could not find Kerberos credential")
if not user_profile.realm.domain == "mit.edu":
return json_error("Webathena login only for mit.edu realm")
try:
parsed_cred = ujson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user == "golem":
# Hack for an mit.edu user whose Kerberos username doesn't
# match what he zephyrs as
user = "ctl"
assert(user == user_profile.email.split("@")[0])
ccache = make_ccache(parsed_cred)
except Exception:
return json_error("Invalid Kerberos cache")
# TODO: Send these data via (say) rabbitmq
try:
subprocess.check_call(["ssh", "[email protected]", "--",
"/home/zulip/zulip/bots/process_ccache",
user,
user_profile.api_key,
base64.b64encode(ccache)])
except Exception:
logging.exception("Error updating the user's ccache")
return json_error("We were unable to setup mirroring for you")
return json_success()
def api_endpoint_docs(request):
raw_calls = open('templates/zerver/api_content.json', 'r').read()
calls = ujson.loads(raw_calls)
langs = set()
for call in calls:
call["endpoint"] = "%s/v1/%s" % (settings.EXTERNAL_API_URI, call["endpoint"])
call["example_request"]["curl"] = call["example_request"]["curl"].replace("https://api.zulip.com", settings.EXTERNAL_API_URI)
response = call['example_response']
if not '\n' in response:
# For 1-line responses, pretty-print them
extended_response = response.replace(", ", ",\n ")
else:
extended_response = response
call['rendered_response'] = bugdown.convert("~~~ .py\n" + extended_response + "\n~~~\n", "default")
for example_type in ('request', 'response'):
for lang in call.get('example_' + example_type, []):
langs.add(lang)
return render_to_response(
'zerver/api_endpoints.html', {
'content': calls,
'langs': langs,
},
context_instance=RequestContext(request))
@authenticated_json_post_view
@has_request_variables
def json_invite_users(request, user_profile, invitee_emails=REQ):
if not invitee_emails:
return json_error("You must specify at least one email address.")
invitee_emails = set(re.split(r'[, \n]', invitee_emails))
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error("You must specify at least one stream for invitees to join.")
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = []
for stream_name in stream_names:
stream = get_stream(stream_name, user_profile.realm)
if stream is None:
return json_error("Stream does not exist: %s. No invites were sent." % (stream_name,))
streams.append(stream)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
return json_success()
def create_homepage_form(request, user_info=None):
if user_info:
return HomepageForm(user_info, domain=request.session.get("domain"))
# An empty fields dict is not treated the same way as not
# providing it.
return HomepageForm(domain=request.session.get("domain"))
def maybe_send_to_registration(request, email, full_name=''):
form = create_homepage_form(request, user_info={'email': email})
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(email__iexact=email).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request)
else:
prereg_user = create_preregistration_user(email, request)
return redirect("".join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
"/",
# Split this so we only get the part after the /
Confirmation.objects.get_link_for_object(prereg_user).split("/", 3)[3],
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.parse.quote_plus(full_name.encode('utf8')))))
else:
return render_to_response('zerver/accounts_home.html', {'form': form},
context_instance=RequestContext(request))
def login_or_register_remote_user(request, remote_username, user_profile, full_name=''):
if user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, the client specified a remote user
# but no associated user account exists. Send them over to the
# PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username), full_name)
else:
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
def remote_user_sso(request):
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
raise JsonableError("No REMOTE_USER set.")
user_profile = authenticate(remote_user=remote_user)
return login_or_register_remote_user(request, remote_user, user_profile)
@csrf_exempt
def remote_user_jwt(request):
try:
json_web_token = request.POST["json_web_token"]
payload, signing_input, header, signature = jwt.load(json_web_token)
except KeyError:
raise JsonableError("No JSON web token passed in request")
except jwt.DecodeError:
raise JsonableError("Bad JSON web token")
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError("No user specified in JSON web token claims")
domain = payload.get('realm', None)
if domain is None:
raise JsonableError("No domain specified in JSON web token claims")
email = "%s@%s" % (remote_user, domain)
try:
jwt.verify_signature(payload, signing_input, header, signature,
settings.JWT_AUTH_KEYS[domain])
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
user_profile = authenticate(username=email, use_dummy_backend=True)
except (jwt.DecodeError, jwt.ExpiredSignature):
raise JsonableError("Bad JSON web token signature")
except KeyError:
raise JsonableError("Realm not authorized for JWT login")
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request, value):
return hmac.new(get_token(request).encode('utf-8'), value, hashlib.sha256).hexdigest()
def start_google_oauth2(request):
uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '{}:{}'.format(
cur_time,
google_oauth2_csrf(request, cur_time),
)
prams = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(uri + urllib.parse.urlencode(prams))
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
if requests_json_is_function:
return resp.json()
else:
return resp.json
def finish_google_oauth2(request):
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.warning('Error from google oauth2 login %r', request.GET)
return HttpResponse(status=400)
value, hmac_value = request.GET.get('state').split(':')
if hmac_value != google_oauth2_csrf(request, value):
logging.warning('Google oauth2 CSRF error')
return HttpResponse(status=400)
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'grant_type': 'authorization_code',
},
)
if resp.status_code == 400:
logging.warning('User error converting Google oauth2 login to token: %r' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
raise Exception('Could not convert google oauth2 code to access_token\r%r' % (resp.text,))
access_token = extract_json_response(resp)['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code == 400:
logging.warning('Google login failed making info API call: %r' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
raise Exception('Google login failed making API call\r%r' % (resp.text,))
body = extract_json_response(resp)
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formated name. I am ignoring i18n here.
full_name = u'{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
raise Exception('Google oauth2 account email not found %r' % (body,))
email_address = email['value']
user_profile = authenticate(username=email_address, use_dummy_backend=True)
return login_or_register_remote_user(request, email_address, user_profile, full_name)
def login_page(request, **kwargs):
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
# Development environments usually have only a few users, but
# it still makes sense to limit how many users we render to
# support performance testing with DevAuthBackend.
MAX_DEV_BACKEND_USERS = 100
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
users = users_query.order_by('email')[0:MAX_DEV_BACKEND_USERS]
extra_context['direct_admins'] = [u.email for u in users if u.is_realm_admin]
extra_context['direct_users'] = [u.email for u in users if not u.is_realm_admin]
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
return template_response
def dev_direct_login(request, **kwargs):
# This function allows logging in without a password and should only be called in development environments.
# It may be called if the DevAuthBackend is included in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without an enabled DevAuthBackend.
raise Exception('Direct login not supported.')
email = request.POST['direct_email']
user_profile = authenticate(username=email)
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
@authenticated_json_post_view
@has_request_variables
def json_bulk_invite_users(request, user_profile,
invitee_emails=REQ(validator=check_list(check_string))):
invitee_emails = set(invitee_emails)
streams = get_default_subs(user_profile)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
# Report bulk invites to internal Zulip.
invited = PreregistrationUser.objects.filter(referred_by=user_profile)
internal_message = "%s <`%s`> invited %d people to Zulip." % (
user_profile.full_name, user_profile.email, invited.count())
internal_send_message(settings.NEW_USER_BOT, "stream", "signups",
user_profile.realm.domain, internal_message)
return json_success()
@zulip_login_required
def initial_invite_page(request):
user = request.user
# Only show the bulk-invite page for the first user in a realm
domain_count = len(UserProfile.objects.filter(realm=user.realm))
if domain_count > 1:
return redirect('zerver.views.home')
params = {'company_name': user.realm.domain}
if (user.realm.restricted_to_domain):
params['invite_suffix'] = user.realm.domain
return render_to_response('zerver/initial_invite_page.html', params,
context_instance=RequestContext(request))
@require_post
def logout_then_login(request, **kwargs):
return django_logout_then_login(request, kwargs)
def create_preregistration_user(email, request):
domain = request.session.get("domain")
if completely_open(domain):
# Clear the "domain" from the session object; it's no longer needed
request.session["domain"] = None
# The user is trying to sign up for a completely open realm,
# so create them a PreregistrationUser for that realm
return PreregistrationUser.objects.create(email=email,
realm=get_realm(domain))
# MIT users who are not explicitly signing up for an open realm
# require special handling (They may already have an (inactive)
# account, for example)
if split_email_to_domain(email) == "mit.edu":
return MitUser.objects.get_or_create(email=email)[0]
return PreregistrationUser.objects.create(email=email)
def accounts_home_with_domain(request, domain):
if completely_open(domain):
# You can sign up for a completely open realm through a
# special registration path that contains the domain in the
# URL. We store this information in the session rather than
# elsewhere because we don't have control over URL or form
# data for folks registering through OpenID.
request.session["domain"] = domain
return accounts_home(request)
else:
return HttpResponseRedirect(reverse('zerver.views.accounts_home'))
def send_registration_completion_email(email, request):
"""
Send an email with a confirmation link to the provided e-mail so the user
can complete their registration.
"""
prereg_user = create_preregistration_user(email, request)
context = {'support_email': settings.ZULIP_ADMINISTRATOR,
'voyager': settings.VOYAGER}
Confirmation.objects.send_confirmation(prereg_user, email,
additional_context=context)
def accounts_home(request):
if request.method == 'POST':
form = create_homepage_form(request, user_info=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
send_registration_completion_email(email, request)
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
# Note: We don't check for uniqueness
is_inactive(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.parse.quote_plus(email))
else:
form = create_homepage_form(request)
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': request.get_full_path},
context_instance=RequestContext(request))
def approximate_unread_count(user_profile):
not_in_home_view_recipients = [sub.recipient.id for sub in \
Subscription.objects.filter(
user_profile=user_profile, in_home_view=False)]
muted_topics = ujson.loads(user_profile.muted_topics)
# If muted_topics is empty, it looks like []. If it is non-empty, it look
# like [[u'devel', u'test']]. We should switch to a consistent envelope, but
# until we do we still have both in the database.
if muted_topics:
muted_topics = muted_topics[0]
return UserMessage.objects.filter(
user_profile=user_profile, message_id__gt=user_profile.pointer).exclude(
message__recipient__type=Recipient.STREAM,
message__recipient__id__in=not_in_home_view_recipients).exclude(
message__subject__in=muted_topics).exclude(
flags=UserMessage.flags.read).count()
def sent_time_in_epoch_seconds(user_message):
# user_message is a UserMessage object.
if not user_message:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
@zulip_login_required
def home(request):
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
request._email = request.user.email
request.client = get_client("website")
narrow = [] # type: List[List[str]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream = get_stream(request.GET.get("stream"), user_profile.realm)
assert(narrow_stream is not None)
assert(narrow_stream.is_public())
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if not user_profile.last_reminder is None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
desktop_notifications_enabled = user_profile.enable_desktop_notifications
if narrow_stream is not None:
desktop_notifications_enabled = False
if user_profile.realm.notifications_stream:
notifications_stream = user_profile.realm.notifications_stream.name
else:
notifications_stream = ""
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
voyager = settings.VOYAGER,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
password_auth_enabled = password_auth_enabled(user_profile.realm),
have_initial_messages = user_has_messages,
subbed_info = register_ret['subscriptions'],
unsubbed_info = register_ret['unsubscribed'],
email_dict = register_ret['email_dict'],
people_list = register_ret['realm_users'],
bot_list = register_ret['realm_bots'],
initial_pointer = register_ret['pointer'],
initial_presences = register_ret['presences'],
initial_servertime = time.time(), # Used for calculating relative presence age
fullname = user_profile.full_name,
email = user_profile.email,
domain = user_profile.realm.domain,
realm_name = register_ret['realm_name'],
realm_invite_required = register_ret['realm_invite_required'],
realm_invite_by_admins_only = register_ret['realm_invite_by_admins_only'],
realm_restricted_to_domain = register_ret['realm_restricted_to_domain'],
enter_sends = user_profile.enter_sends,
left_side_userlist = register_ret['left_side_userlist'],
referrals = register_ret['referrals'],
realm_emoji = register_ret['realm_emoji'],
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
notifications_stream = notifications_stream,
# Stream message notification settings:
stream_desktop_notifications_enabled =
user_profile.enable_stream_desktop_notifications,
stream_sounds_enabled = user_profile.enable_stream_sounds,
# Private message and @-mention notification settings:
desktop_notifications_enabled = desktop_notifications_enabled,
sounds_enabled =
user_profile.enable_sounds,
enable_offline_email_notifications =
user_profile.enable_offline_email_notifications,
enable_offline_push_notifications =
user_profile.enable_offline_push_notifications,
twenty_four_hour_time = register_ret['twenty_four_hour_time'],
enable_digest_emails = user_profile.enable_digest_emails,
event_queue_id = register_ret['queue_id'],
last_event_id = register_ret['last_event_id'],
max_message_id = register_ret['max_message_id'],
unread_count = approximate_unread_count(user_profile),
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
staging = settings.ZULIP_COM_STAGING or settings.DEVELOPMENT,
alert_words = register_ret['alert_words'],
muted_topics = register_ret['muted_topics'],
realm_filters = register_ret['realm_filters'],
is_admin = user_profile.is_realm_admin,
can_create_streams = user_profile.can_create_streams(),
name_changes_disabled = name_changes_disabled(user_profile.realm),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
autoscroll_forever = user_profile.autoscroll_forever,
default_desktop_notifications = user_profile.default_desktop_notifications,
avatar_url = avatar_url(user_profile),
mandatory_topics = user_profile.realm.mandatory_topics,
show_digest_email = user_profile.realm.show_digest_email,
)
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["initial_pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
product_name = "Zulip"
page_params['product_name'] = product_name
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render_to_response('zerver/index.html',
{'user_profile': user_profile,
'page_params' : simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.domain == "mit.edu",
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
'product_name': product_name
},
context_instance=RequestContext(request))
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
return HttpResponseRedirect(reverse('zerver.views.home'))
def is_buggy_ua(agent):
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
not "Mac" in agent
def get_pointer_backend(request, user_profile):
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError("Invalid message ID")
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
return generate_random_token(32)
# The order of creation of the various dictionaries are important.
# We filter on {userprofile,stream,subscription_recipient}_ids.
@require_realm_admin
def export(request, user_profile):
if (Message.objects.filter(sender__realm=user_profile.realm).count() > 1000000 or
UserMessage.objects.filter(user_profile__realm=user_profile.realm).count() > 3000000):
return json_error("Realm has too much data for non-batched export.")
response = {}
response['zerver_realm'] = [model_to_dict(x)
for x in Realm.objects.select_related().filter(id=user_profile.realm.id)]
response['zerver_userprofile'] = [model_to_dict(x, exclude=["password", "api_key"])
for x in UserProfile.objects.select_related().filter(realm=user_profile.realm)]
userprofile_ids = set(userprofile["id"] for userprofile in response['zerver_userprofile'])
response['zerver_stream'] = [model_to_dict(x, exclude=["email_token"])
for x in Stream.objects.select_related().filter(realm=user_profile.realm, invite_only=False)]
stream_ids = set(x["id"] for x in response['zerver_stream'])
response['zerver_usermessage'] = [model_to_dict(x) for x in UserMessage.objects.select_related()
if x.user_profile_id in userprofile_ids]
user_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=1)
if x.type_id in userprofile_ids]
stream_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=2)
if x.type_id in stream_ids]
stream_recipient_ids = set(x["id"] for x in stream_recipients)
# only check for subscriptions to streams
response['zerver_subscription'] = [model_to_dict(x) for x in Subscription.objects.select_related()
if x.user_profile_id in userprofile_ids
and x.recipient_id in stream_recipient_ids]
subscription_recipient_ids = set(x["recipient"] for x in response['zerver_subscription'])
huddle_recipients = [model_to_dict(r)
for r in Recipient.objects.select_related().filter(type=3)
if r.type_id in subscription_recipient_ids]
huddle_ids = set(x["type_id"] for x in huddle_recipients)
response["zerver_recipient"] = user_recipients + stream_recipients + huddle_recipients
response['zerver_huddle'] = [model_to_dict(h)
for h in Huddle.objects.select_related()
if h.id in huddle_ids]
recipient_ids = set(x["id"] for x in response['zerver_recipient'])
response["zerver_message"] = [model_to_dict(m) for m in Message.objects.select_related()
if m.recipient_id in recipient_ids
and m.sender_id in userprofile_ids]
for (table, model) in [("defaultstream", DefaultStream),
("realmemoji", RealmEmoji),
("realmalias", RealmAlias),
("realmfilter", RealmFilter)]:
response["zerver_"+table] = [model_to_dict(x) for x in
model.objects.select_related().filter(realm_id=user_profile.realm.id)] # type: ignore
return json_success(response)
def get_profile_backend(request, user_profile):
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
@require_realm_admin
@has_request_variables
def update_realm(request, user_profile, name=REQ(validator=check_string, default=None),
restricted_to_domain=REQ(validator=check_bool, default=None),
invite_required=REQ(validator=check_bool, default=None),
invite_by_admins_only=REQ(validator=check_bool, default=None)):
realm = user_profile.realm
data = {}
if name is not None and realm.name != name:
do_set_realm_name(realm, name)
data['name'] = 'updated'
if restricted_to_domain is not None and realm.restricted_to_domain != restricted_to_domain:
do_set_realm_restricted_to_domain(realm, restricted_to_domain)
data['restricted_to_domain'] = restricted_to_domain
if invite_required is not None and realm.invite_required != invite_required:
do_set_realm_invite_required(realm, invite_required)
data['invite_required'] = invite_required
if invite_by_admins_only is not None and realm.invite_by_admins_only != invite_by_admins_only:
do_set_realm_invite_by_admins_only(realm, invite_by_admins_only)
data['invite_by_admins_only'] = invite_by_admins_only
return json_success(data)
@authenticated_json_post_view
@has_request_variables
def json_upload_file(request, user_profile):
if len(request.FILES) == 0:
return json_error("You must specify a file to upload")
if len(request.FILES) != 1:
return json_error("You may only upload one file at a time")
user_file = list(request.FILES.values())[0]
if ((settings.MAX_FILE_UPLOAD_SIZE * 1024 * 1024) < user_file._get_size()):
return json_error("File Upload is larger than allowed limit")
uri = upload_message_image_through_web_client(request, user_file, user_profile)
return json_success({'uri': uri})
@zulip_login_required
@has_request_variables
def get_uploaded_file(request, realm_id, filename,
redir=REQ(validator=check_bool, default=True)):
if settings.LOCAL_UPLOADS_DIR is not None:
return HttpResponseForbidden() # Should have been served by nginx
user_profile = request.user
url_path = "%s/%s" % (realm_id, filename)
if realm_id == "unk":
realm_id = get_realm_for_filename(url_path)
if realm_id is None:
# File does not exist
return json_error("That file does not exist.", status=404)
# Internal users can access all uploads so we can receive attachments in cross-realm messages
if user_profile.realm.id == int(realm_id) or user_profile.realm.domain == 'zulip.com':
uri = get_signed_upload_url(url_path)
if redir:
return redirect(uri)
else:
return json_success({'uri': uri})
else:
return HttpResponseForbidden()
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request, username=REQ, password=REQ):
# type: (Any, Any, Any) -> Any
return_data = {} # type: Dict[str, bool]
if username == "google-oauth2-token":
user_profile = authenticate(google_oauth2_token=password, return_data=return_data)
else:
user_profile = authenticate(username=username, password=password)
if user_profile is None:
if return_data.get("valid_attestation") == True:
# We can leak that the user is unregistered iff they present a valid authentication string for the user.
return json_error("This user is not registered; do so from a browser.", data={"reason": "unregistered"}, status=403)
return json_error("Your username or password is incorrect.", data={"reason": "incorrect_creds"}, status=403)
if not user_profile.is_active:
return json_error("Your account has been disabled.", data={"reason": "disabled"}, status=403)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@authenticated_json_post_view
@has_request_variables
def json_fetch_api_key(request, user_profile, password=REQ(default='')):
if password_auth_enabled(user_profile.realm) and not user_profile.check_password(password):
return json_error("Your username or password is incorrect.")
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request):
if not settings.GOOGLE_CLIENT_ID:
return json_error("GOOGLE_CLIENT_ID is not configured", status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
def get_status_list(requesting_user_profile):
return {'presences': get_status_dict(requesting_user_profile),
'server_timestamp': time.time()}
@has_request_variables
def update_active_status_backend(request, user_profile, status=REQ,
new_user_input=REQ(validator=check_bool, default=False)):
status_val = UserPresence.status_from_string(status)
if status_val is None:
raise JsonableError("Invalid presence status: %s" % (status,))
else:
update_user_presence(user_profile, request.client, now(), status_val,
new_user_input)
ret = get_status_list(user_profile)
if user_profile.realm.domain == "mit.edu":
try:
activity = UserActivity.objects.get(user_profile = user_profile,
query="get_events_backend",
client__name="zephyr_mirror")
ret['zephyr_mirror_active'] = \
(activity.last_visit.replace(tzinfo=None) >
datetime.datetime.utcnow() - datetime.timedelta(minutes=5))
except UserActivity.DoesNotExist:
ret['zephyr_mirror_active'] = False
return json_success(ret)
@authenticated_json_post_view
def json_get_active_statuses(request, user_profile):
return json_success(get_status_list(user_profile))
# Does not need to be authenticated because it's called from rest_dispatch
@has_request_variables
def api_events_register(request, user_profile,
apply_markdown=REQ(default=False, validator=check_bool),
all_public_streams=REQ(default=None, validator=check_bool)):
return events_register_backend(request, user_profile,
apply_markdown=apply_markdown,
all_public_streams=all_public_streams)
def _default_all_public_streams(user_profile, all_public_streams):
if all_public_streams is not None:
return all_public_streams
else:
return user_profile.default_all_public_streams
def _default_narrow(user_profile, narrow):
default_stream = user_profile.default_events_register_stream
if not narrow and user_profile.default_events_register_stream is not None:
narrow = [('stream', default_stream.name)]
return narrow
@has_request_variables
def events_register_backend(request, user_profile, apply_markdown=True,
all_public_streams=None,
event_types=REQ(validator=check_list(check_string), default=None),
narrow=REQ(validator=check_list(check_list(check_string, length=2)), default=[]),
queue_lifespan_secs=REQ(converter=int, default=0)):
all_public_streams = _default_all_public_streams(user_profile, all_public_streams)
narrow = _default_narrow(user_profile, narrow)
ret = do_events_register(user_profile, request.client, apply_markdown,
event_types, queue_lifespan_secs, all_public_streams,
narrow=narrow)
return json_success(ret)
@authenticated_json_post_view
@has_request_variables
def json_refer_friend(request, user_profile, email=REQ):
if not email:
return json_error("No email address specified")
if user_profile.invites_granted - user_profile.invites_used <= 0:
return json_error("Insufficient invites")
do_refer_friend(user_profile, email);
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_set_muted_topics(request, user_profile,
muted_topics=REQ(validator=check_list(check_list(check_string, length=2)), default=[])):
do_set_muted_topics(user_profile, muted_topics)
return json_success()
def add_push_device_token(request, user_profile, token, kind, ios_app_id=None):
if token == '' or len(token) > 4096:
return json_error('Empty or invalid length token')
# If another user was previously logged in on the same device and didn't
# properly log out, the token will still be registered to the wrong account
PushDeviceToken.objects.filter(token=token).delete()
# Overwrite with the latest value
token, created = PushDeviceToken.objects.get_or_create(user=user_profile,
token=token,
kind=kind,
ios_app_id=ios_app_id)
if not created:
token.last_updated = now()
token.save(update_fields=['last_updated'])
return json_success()
@has_request_variables
def add_apns_device_token(request, user_profile, token=REQ, appid=REQ(default=settings.ZULIP_IOS_APP_ID)):
return add_push_device_token(request, user_profile, token, PushDeviceToken.APNS, ios_app_id=appid)
@has_request_variables
def add_android_reg_id(request, user_profile, token=REQ):
return add_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def remove_push_device_token(request, user_profile, token, kind):
if token == '' or len(token) > 4096:
return json_error('Empty or invalid length token')
try:
token = PushDeviceToken.objects.get(token=token, kind=kind)
token.delete()
except PushDeviceToken.DoesNotExist:
return json_error("Token does not exist")
return json_success()
@has_request_variables
def remove_apns_device_token(request, user_profile, token=REQ):
return remove_push_device_token(request, user_profile, token, PushDeviceToken.APNS)
@has_request_variables
def remove_android_reg_id(request, user_profile, token=REQ):
return remove_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def generate_204(request):
return HttpResponse(content=None, status=204)
def process_unsubscribe(token, type, unsubscribe_function):
try:
confirmation = Confirmation.objects.get(confirmation_key=token)
except Confirmation.DoesNotExist:
return render_to_response('zerver/unsubscribe_link_error.html')
user_profile = confirmation.content_object
unsubscribe_function(user_profile)
return render_to_response('zerver/unsubscribe_success.html',
{"subscription_type": type,
"external_host": settings.EXTERNAL_HOST})
# Email unsubscribe functions. All have the function signature
# processor(user_profile).
def do_missedmessage_unsubscribe(user_profile):
do_change_enable_offline_email_notifications(user_profile, False)
def do_welcome_unsubscribe(user_profile):
clear_followup_emails_queue(user_profile.email)
def do_digest_unsubscribe(user_profile):
do_change_enable_digest_emails(user_profile, False)
# The keys are part of the URL for the unsubscribe link and must be valid
# without encoding.
# The values are a tuple of (display name, unsubscribe function), where the
# display name is what we call this class of email in user-visible text.
email_unsubscribers = {
"missed_messages": ("missed messages", do_missedmessage_unsubscribe),
"welcome": ("welcome", do_welcome_unsubscribe),
"digest": ("digest", do_digest_unsubscribe)
}
# Login NOT required. These are for one-click unsubscribes.
def email_unsubscribe(request, type, token):
if type in email_unsubscribers:
display_name, unsubscribe_function = email_unsubscribers[type]
return process_unsubscribe(token, display_name, unsubscribe_function)
return render_to_response('zerver/unsubscribe_link_error.html', {},
context_instance=RequestContext(request))
|
|
# game.py
# -------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from util import *
from util import raiseNotDefined
import time, os
import traceback
try:
import boinc
_BOINC_ENABLED = True
except:
_BOINC_ENABLED = False
#######################
# Parts worth reading #
#######################
class Agent:
"""
An agent must define a getAction method, but may also define the
following methods which will be called if they exist:
def registerInitialState(self, state): # inspects the starting state
"""
def __init__(self, index=0):
self.index = index
def getAction(self, state):
"""
The Agent will receive a GameState (from either {pacman, capture, sonar}.py) and
must return an action from Directions.{North, South, East, West, Stop}
"""
raiseNotDefined()
class Directions:
NORTH = 'North'
SOUTH = 'South'
EAST = 'East'
WEST = 'West'
STOP = 'Stop'
LEFT = {NORTH: WEST,
SOUTH: EAST,
EAST: NORTH,
WEST: SOUTH,
STOP: STOP}
RIGHT = dict([(y,x) for x, y in list(LEFT.items())])
REVERSE = {NORTH: SOUTH,
SOUTH: NORTH,
EAST: WEST,
WEST: EAST,
STOP: STOP}
class Configuration:
"""
A Configuration holds the (x,y) coordinate of a character, along with its
traveling direction.
The convention for positions, like a graph, is that (0,0) is the lower left corner, x increases
horizontally and y increases vertically. Therefore, north is the direction of increasing y, or (0,1).
"""
def __init__(self, pos, direction):
self.pos = pos
self.direction = direction
def getPosition(self):
return (self.pos)
def getDirection(self):
return self.direction
def isInteger(self):
x,y = self.pos
return x == int(x) and y == int(y)
def __eq__(self, other):
if other == None: return False
return (self.pos == other.pos and self.direction == other.direction)
def __hash__(self):
x = hash(self.pos)
y = hash(self.direction)
return hash(x + 13 * y)
def __str__(self):
return "(x,y)="+str(self.pos)+", "+str(self.direction)
def generateSuccessor(self, vector):
"""
Generates a new configuration reached by translating the current
configuration by the action vector. This is a low-level call and does
not attempt to respect the legality of the movement.
Actions are movement vectors.
"""
x, y= self.pos
dx, dy = vector
direction = Actions.vectorToDirection(vector)
if direction == Directions.STOP:
direction = self.direction # There is no stop direction
return Configuration((x + dx, y+dy), direction)
class AgentState:
"""
AgentStates hold the state of an agent (configuration, speed, scared, etc).
"""
def __init__( self, startConfiguration, isPacman ):
self.start = startConfiguration
self.configuration = startConfiguration
self.isPacman = isPacman
self.scaredTimer = 0
def __str__( self ):
if self.isPacman:
return "Pacman: " + str( self.configuration )
else:
return "Ghost: " + str( self.configuration )
def __eq__( self, other ):
if other == None:
return False
return self.configuration == other.configuration and self.scaredTimer == other.scaredTimer
def __hash__(self):
return hash(hash(self.configuration) + 13 * hash(self.scaredTimer))
def copy( self ):
state = AgentState( self.start, self.isPacman )
state.configuration = self.configuration
state.scaredTimer = self.scaredTimer
return state
def getPosition(self):
if self.configuration == None: return None
return self.configuration.getPosition()
def getDirection(self):
return self.configuration.getDirection()
class Grid:
"""
A 2-dimensional array of objects backed by a list of lists. Data is accessed
via grid[x][y] where (x,y) are positions on a Pacman map with x horizontal,
y vertical and the origin (0,0) in the bottom left corner.
The __str__ method constructs an output that is oriented like a pacman board.
"""
def __init__(self, width, height, initialValue=False, bitRepresentation=None):
if initialValue not in [False, True]: raise Exception('Grids can only contain booleans')
self.CELLS_PER_INT = 30
self.width = width
self.height = height
self.data = [[initialValue for y in range(height)] for x in range(width)]
if bitRepresentation:
self._unpackBits(bitRepresentation)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, key, item):
self.data[key] = item
def __str__(self):
out = [[str(self.data[x][y])[0] for x in range(self.width)] for y in range(self.height)]
out.reverse()
return '\n'.join([''.join(x) for x in out])
def __eq__(self, other):
if other == None: return False
return self.data == other.data
def __hash__(self):
# return hash(str(self))
base = 1
h = 0
for l in self.data:
for i in l:
if i:
h += base
base *= 2
return hash(h)
def copy(self):
g = Grid(self.width, self.height)
g.data = [x[:] for x in self.data]
return g
def deepCopy(self):
return self.copy()
def shallowCopy(self):
g = Grid(self.width, self.height)
g.data = self.data
return g
def count(self, item =True ):
return sum([x.count(item) for x in self.data])
def asList(self, key = True):
list = []
for x in range(self.width):
for y in range(self.height):
if self[x][y] == key: list.append( (x,y) )
return list
def packBits(self):
"""
Returns an efficient int list representation
(width, height, bitPackedInts...)
"""
bits = [self.width, self.height]
currentInt = 0
for i in range(self.height * self.width):
bit = self.CELLS_PER_INT - (i % self.CELLS_PER_INT) - 1
x, y = self._cellIndexToPosition(i)
if self[x][y]:
currentInt += 2 ** bit
if (i + 1) % self.CELLS_PER_INT == 0:
bits.append(currentInt)
currentInt = 0
bits.append(currentInt)
return tuple(bits)
def _cellIndexToPosition(self, index):
x = index / self.height
y = index % self.height
return x, y
def _unpackBits(self, bits):
"""
Fills in data from a bit-level representation
"""
cell = 0
for packed in bits:
for bit in self._unpackInt(packed, self.CELLS_PER_INT):
if cell == self.width * self.height: break
x, y = self._cellIndexToPosition(cell)
self[x][y] = bit
cell += 1
def _unpackInt(self, packed, size):
bools = []
if packed < 0: raise ValueError("must be a positive integer")
for i in range(size):
n = 2 ** (self.CELLS_PER_INT - i - 1)
if packed >= n:
bools.append(True)
packed -= n
else:
bools.append(False)
return bools
def reconstituteGrid(bitRep):
if type(bitRep) is not type((1,2)):
return bitRep
width, height = bitRep[:2]
return Grid(width, height, bitRepresentation= bitRep[2:])
####################################
# Parts you shouldn't have to read #
####################################
class Actions:
"""
A collection of static methods for manipulating move actions.
"""
# Directions
_directions = {Directions.NORTH: (0, 1),
Directions.SOUTH: (0, -1),
Directions.EAST: (1, 0),
Directions.WEST: (-1, 0),
Directions.STOP: (0, 0)}
_directionsAsList = list(_directions.items())
TOLERANCE = .001
def reverseDirection(action):
if action == Directions.NORTH:
return Directions.SOUTH
if action == Directions.SOUTH:
return Directions.NORTH
if action == Directions.EAST:
return Directions.WEST
if action == Directions.WEST:
return Directions.EAST
return action
reverseDirection = staticmethod(reverseDirection)
def vectorToDirection(vector):
dx, dy = vector
if dy > 0:
return Directions.NORTH
if dy < 0:
return Directions.SOUTH
if dx < 0:
return Directions.WEST
if dx > 0:
return Directions.EAST
return Directions.STOP
vectorToDirection = staticmethod(vectorToDirection)
def directionToVector(direction, speed = 1.0):
dx, dy = Actions._directions[direction]
return (dx * speed, dy * speed)
directionToVector = staticmethod(directionToVector)
def getPossibleActions(config, walls):
possible = []
x, y = config.pos
x_int, y_int = int(x + 0.5), int(y + 0.5)
# In between grid points, all agents must continue straight
if (abs(x - x_int) + abs(y - y_int) > Actions.TOLERANCE):
return [config.getDirection()]
for dir, vec in Actions._directionsAsList:
dx, dy = vec
next_y = y_int + dy
next_x = x_int + dx
if not walls[next_x][next_y]: possible.append(dir)
return possible
getPossibleActions = staticmethod(getPossibleActions)
def getLegalNeighbors(position, walls):
x,y = position
x_int, y_int = int(x + 0.5), int(y + 0.5)
neighbors = []
for dir, vec in Actions._directionsAsList:
dx, dy = vec
next_x = x_int + dx
if next_x < 0 or next_x == walls.width: continue
next_y = y_int + dy
if next_y < 0 or next_y == walls.height: continue
if not walls[next_x][next_y]: neighbors.append((next_x, next_y))
return neighbors
getLegalNeighbors = staticmethod(getLegalNeighbors)
def getSuccessor(position, action):
dx, dy = Actions.directionToVector(action)
x, y = position
return (x + dx, y + dy)
getSuccessor = staticmethod(getSuccessor)
class GameStateData:
"""
"""
def __init__( self, prevState = None ):
"""
Generates a new data packet by copying information from its predecessor.
"""
if prevState != None:
self.food = prevState.food.shallowCopy()
self.capsules = prevState.capsules[:]
self.agentStates = self.copyAgentStates( prevState.agentStates )
self.layout = prevState.layout
self._eaten = prevState._eaten
self.score = prevState.score
self._foodEaten = None
self._capsuleEaten = None
self._agentMoved = None
self._lose = False
self._win = False
self.scoreChange = 0
def deepCopy( self ):
state = GameStateData( self )
state.food = self.food.deepCopy()
state.layout = self.layout.deepCopy()
state._agentMoved = self._agentMoved
state._foodEaten = self._foodEaten
state._capsuleEaten = self._capsuleEaten
return state
def copyAgentStates( self, agentStates ):
copiedStates = []
for agentState in agentStates:
copiedStates.append( agentState.copy() )
return copiedStates
def __eq__( self, other ):
"""
Allows two states to be compared.
"""
if other == None: return False
# TODO Check for type of other
if not self.agentStates == other.agentStates: return False
if not self.food == other.food: return False
if not self.capsules == other.capsules: return False
if not self.score == other.score: return False
return True
def __hash__( self ):
"""
Allows states to be keys of dictionaries.
"""
for i, state in enumerate( self.agentStates ):
try:
int(hash(state))
except TypeError as e:
print(e)
#hash(state)
return int((hash(tuple(self.agentStates)) + 13*hash(self.food) + 113* hash(tuple(self.capsules)) + 7 * hash(self.score)) % 1048575 )
def __str__( self ):
width, height = self.layout.width, self.layout.height
map = Grid(width, height)
if type(self.food) == type((1,2)):
self.food = reconstituteGrid(self.food)
for x in range(width):
for y in range(height):
food, walls = self.food, self.layout.walls
map[x][y] = self._foodWallStr(food[x][y], walls[x][y])
for agentState in self.agentStates:
if agentState == None: continue
if agentState.configuration == None: continue
x,y = [int( i ) for i in nearestPoint( agentState.configuration.pos )]
agent_dir = agentState.configuration.direction
if agentState.isPacman:
map[x][y] = self._pacStr( agent_dir )
else:
map[x][y] = self._ghostStr( agent_dir )
for x, y in self.capsules:
map[x][y] = 'o'
return str(map) + ("\nScore: %d\n" % self.score)
def _foodWallStr( self, hasFood, hasWall ):
if hasFood:
return '.'
elif hasWall:
return '%'
else:
return ' '
def _pacStr( self, dir ):
if dir == Directions.NORTH:
return 'v'
if dir == Directions.SOUTH:
return '^'
if dir == Directions.WEST:
return '>'
return '<'
def _ghostStr( self, dir ):
return 'G'
if dir == Directions.NORTH:
return 'M'
if dir == Directions.SOUTH:
return 'W'
if dir == Directions.WEST:
return '3'
return 'E'
def initialize( self, layout, numGhostAgents ):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.food = layout.food.copy()
self.capsules = layout.capsules[:]
self.layout = layout
self.score = 0
self.scoreChange = 0
self.agentStates = []
numGhosts = 0
for isPacman, pos in layout.agentPositions:
if not isPacman:
if numGhosts == numGhostAgents: continue # Max ghosts reached already
else: numGhosts += 1
self.agentStates.append( AgentState( Configuration( pos, Directions.STOP), isPacman) )
self._eaten = [False for a in self.agentStates]
class Game:
"""
The Game manages the control flow, soliciting actions from agents.
"""
def __init__( self, agents, display, rules, startingIndex=0, muteAgents=False, catchExceptions=False ):
self.agentCrashed = False
self.agents = agents
self.display = display
self.rules = rules
self.startingIndex = startingIndex
self.gameOver = False
self.muteAgents = muteAgents
self.catchExceptions = catchExceptions
self.moveHistory = []
self.totalAgentTimes = [0 for agent in agents]
self.totalAgentTimeWarnings = [0 for agent in agents]
self.agentTimeout = False
def getProgress(self):
if self.gameOver:
return 1.0
else:
return self.rules.getProgress(self)
def _agentCrash( self, agentIndex, quiet=False):
"Helper method for handling agent crashes"
if not quiet: traceback.print_exc()
self.gameOver = True
self.agentCrashed = True
self.rules.agentCrash(self, agentIndex)
OLD_STDOUT = None
OLD_STDERR = None
def mute(self):
if not self.muteAgents: return
global OLD_STDOUT, OLD_STDERR
import io
OLD_STDOUT = sys.stdout
OLD_STDERR = sys.stderr
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
def unmute(self):
if not self.muteAgents: return
global OLD_STDOUT, OLD_STDERR
sys.stdout.close()
sys.stderr.close()
# Revert stdout/stderr to originals
sys.stdout = OLD_STDOUT
sys.stderr = OLD_STDERR
def run( self ):
"""
Main control loop for game play.
"""
self.display.initialize(self.state.data)
self.numMoves = 0
###self.display.initialize(self.state.makeObservation(1).data)
# inform learning agents of the game start
for i in range(len(self.agents)):
agent = self.agents[i]
if not agent:
# this is a null agent, meaning it failed to load
# the other team wins
self._agentCrash(i, quiet=True)
return
if ("registerInitialState" in dir(agent)):
self.mute()
if self.catchExceptions:
try:
timed_func = TimeoutFunction(agent.registerInitialState, int(self.rules.getMaxStartupTime(i)))
try:
start_time = time.time()
timed_func(self.state.deepCopy())
time_taken = time.time() - start_time
self.totalAgentTimes[i] += time_taken
except TimeoutFunctionException:
print(("Agent %d ran out of time on startup!" % i))
self.unmute()
self.agentTimeout = True
self._agentCrash(i, quiet=True)
return
except Exception as data:
self.unmute()
self._agentCrash(i, quiet=True)
return
else:
agent.registerInitialState(self.state.deepCopy())
## TODO: could this exceed the total time
self.unmute()
agentIndex = self.startingIndex
numAgents = len( self.agents )
while not self.gameOver:
# Fetch the next agent
agent = self.agents[agentIndex]
move_time = 0
skip_action = False
# Generate an observation of the state
if 'observationFunction' in dir( agent ):
self.mute()
if self.catchExceptions:
try:
timed_func = TimeoutFunction(agent.observationFunction, int(self.rules.getMoveTimeout(agentIndex)))
try:
start_time = time.time()
observation = timed_func(self.state.deepCopy())
except TimeoutFunctionException:
skip_action = True
move_time += time.time() - start_time
self.unmute()
except Exception as data:
self.unmute()
self._agentCrash(agentIndex, quiet=True)
return
else:
observation = agent.observationFunction(self.state.deepCopy())
self.unmute()
else:
observation = self.state.deepCopy()
# Solicit an action
action = None
self.mute()
if self.catchExceptions:
try:
timed_func = TimeoutFunction(agent.getAction, int(self.rules.getMoveTimeout(agentIndex)) - int(move_time))
try:
start_time = time.time()
if skip_action:
raise TimeoutFunctionException()
action = timed_func( observation )
except TimeoutFunctionException:
print(("Agent %d timed out on a single move!" % agentIndex))
self.agentTimeout = True
self.unmute()
self._agentCrash(agentIndex, quiet=True)
return
move_time += time.time() - start_time
if move_time > self.rules.getMoveWarningTime(agentIndex):
self.totalAgentTimeWarnings[agentIndex] += 1
print(("Agent %d took too long to make a move! This is warning %d" % (agentIndex, self.totalAgentTimeWarnings[agentIndex])))
if self.totalAgentTimeWarnings[agentIndex] > self.rules.getMaxTimeWarnings(agentIndex):
print(("Agent %d exceeded the maximum number of warnings: %d" % (agentIndex, self.totalAgentTimeWarnings[agentIndex])))
self.agentTimeout = True
self.unmute()
self._agentCrash(agentIndex, quiet=True)
self.totalAgentTimes[agentIndex] += move_time
#print "Agent: %d, time: %f, total: %f" % (agentIndex, move_time, self.totalAgentTimes[agentIndex])
if self.totalAgentTimes[agentIndex] > self.rules.getMaxTotalTime(agentIndex):
print(("Agent %d ran out of time! (time: %1.2f)" % (agentIndex, self.totalAgentTimes[agentIndex])))
self.agentTimeout = True
self.unmute()
self._agentCrash(agentIndex, quiet=True)
return
self.unmute()
except Exception as data:
self.unmute()
self._agentCrash(agentIndex)
return
else:
action = agent.getAction(observation)
self.unmute()
# Execute the action
self.moveHistory.append( (agentIndex, action) )
if self.catchExceptions:
try:
self.state = self.state.generateSuccessor( agentIndex, action )
except Exception as data:
self._agentCrash(agentIndex)
return
else:
self.state = self.state.generateSuccessor( agentIndex, action )
# Change the display
self.display.update( self.state.data )
###idx = agentIndex - agentIndex % 2 + 1
###self.display.update( self.state.makeObservation(idx).data )
# Allow for game specific conditions (winning, losing, etc.)
self.rules.process(self.state, self)
# Track progress
if agentIndex == numAgents + 1: self.numMoves += 1
# Next agent
agentIndex = ( agentIndex + 1 ) % numAgents
if _BOINC_ENABLED:
boinc.set_fraction_done(self.getProgress())
# inform a learning agent of the game result
for agent in self.agents:
if "final" in dir( agent ) :
try:
self.mute()
agent.final( self.state )
self.unmute()
except Exception as data:
if not self.catchExceptions: raise
self.unmute()
print(("Exception",data))
self._agentCrash(agent.index)
return
self.display.finish()
|
|
from __future__ import unicode_literals
import re
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from django.utils.encoding import force_text
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
@deconstructible
class RegexValidator(object):
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, six.string_types):
raise TypeError("If the flags are set, regex must be a regular expression string.")
# Compile the regex if it was not passed pre-compiled.
if isinstance(self.regex, six.string_types):
self.regex = re.compile(self.regex, self.flags)
def __call__(self, value):
"""
Validates that the input matches the regular expression
if inverse_match is False, otherwise raises ValidationError.
"""
if not (self.inverse_match is not bool(self.regex.search(
force_text(value)))):
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
def __ne__(self, other):
return not (self == other)
@deconstructible
class URLValidator(RegexValidator):
regex = re.compile(
r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super(URLValidator, self).__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
value = force_text(value)
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super(URLValidator, self).__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
scheme, netloc, path, query, fragment = urlsplit(value)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
def validate_integer(value):
try:
int(value)
except (ValueError, TypeError):
raise ValidationError(_('Enter a valid integer.'), code='invalid')
@deconstructible
class EmailValidator(object):
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"$)', # quoted-string
re.IGNORECASE)
domain_regex = re.compile(
# max length of the domain is 249: 254 (max email length) minus one
# period, two characters for the TLD, @ sign, & one character before @.
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))$',
re.IGNORECASE)
literal_regex = re.compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]$',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return (
isinstance(other, EmailValidator) and
(self.domain_whitelist == other.domain_whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
validate_email = EmailValidator()
slug_re = re.compile(r'^[-a-zA-Z0-9_]+$')
validate_slug = RegexValidator(
slug_re,
_("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."),
'invalid'
)
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters returns the appropriate validators for
the GenericIPAddressField.
This code is here, because it is exactly the same for the model and the form field.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(
comma_separated_int_list_re,
_('Enter only digits separated by commas.'),
'invalid'
)
@deconstructible
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value, message=None):
self.limit_value = limit_value
if message:
self.message = message
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
(self.limit_value == other.limit_value)
and (self.message == other.message)
and (self.code == other.code)
)
@deconstructible
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
@deconstructible
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
@deconstructible
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
@deconstructible
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
|
|
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import json
import os
import subprocess
import sys
import unittest
import mock
import merge_results
import merge_steps
import merge_lib as merger
class MergeProfilesTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(MergeProfilesTest, self).__init__(*args, **kwargs)
self.maxDiff = None
def test_merge_script_api_parameters(self):
"""Test the step-level merge front-end."""
build_properties = json.dumps({
'some': {
'complicated': ['nested', {
'json': None,
'object': 'thing',
}]
}
})
task_output_dir = 'some/task/output/dir'
profdata_dir = '/some/different/path/to/profdata/default.profdata'
profdata_file = os.path.join(profdata_dir, 'base_unittests.profdata')
args = [
'script_name', '--output-json', 'output.json', '--build-properties',
build_properties, '--summary-json', 'summary.json', '--task-output-dir',
task_output_dir, '--profdata-dir', profdata_dir, '--llvm-profdata',
'llvm-profdata', 'a.json', 'b.json', 'c.json', '--test-target-name',
'base_unittests', '--sparse'
]
with mock.patch.object(merger, 'merge_profiles') as mock_merge:
mock_merge.return_value = None, None
with mock.patch.object(sys, 'argv', args):
merge_results.main()
self.assertEqual(
mock_merge.call_args,
mock.call(task_output_dir, profdata_file, '.profraw',
'llvm-profdata', sparse=True,
skip_validation=False), None)
def test_merge_steps_parameters(self):
"""Test the build-level merge front-end."""
input_dir = 'some/task/output/dir'
output_file = '/some/different/path/to/profdata/merged.profdata'
args = [
'script_name',
'--input-dir',
input_dir,
'--output-file',
output_file,
'--llvm-profdata',
'llvm-profdata',
'--profdata-filename-pattern',
'.*'
]
with mock.patch.object(merger, 'merge_profiles') as mock_merge:
mock_merge.return_value = None
with mock.patch.object(sys, 'argv', args):
merge_steps.main()
self.assertEqual(
mock_merge.call_args,
mock.call(input_dir, output_file, '.profdata', 'llvm-profdata',
'.*', sparse=False))
@mock.patch.object(merger, '_validate_and_convert_profraws')
def test_merge_profraw(self, mock_validate_and_convert_profraws):
mock_input_dir_walk = [
('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
('/b/some/path/0', [],
['output.json', 'default-1.profraw', 'default-2.profraw']),
('/b/some/path/1', [],
['output.json', 'default-1.profraw', 'default-2.profraw']),
]
mock_validate_and_convert_profraws.return_value = [
'/b/some/path/0/default-1.profdata',
'/b/some/path/1/default-2.profdata',
], [
'/b/some/path/0/default-2.profraw',
'/b/some/path/1/default-1.profraw',
], [
'/b/some/path/1/default-1.profraw',
]
with mock.patch.object(os, 'walk') as mock_walk:
with mock.patch.object(os, 'remove'):
mock_walk.return_value = mock_input_dir_walk
with mock.patch.object(subprocess, 'check_call') as mock_exec_cmd:
merger.merge_profiles('/b/some/path', 'output/dir/default.profdata',
'.profraw', 'llvm-profdata')
self.assertEqual(
mock.call(
[
'llvm-profdata',
'merge',
'-o',
'output/dir/default.profdata',
'/b/some/path/0/default-1.profdata',
'/b/some/path/1/default-2.profdata',
],
stderr=-2,
), mock_exec_cmd.call_args)
self.assertTrue(mock_validate_and_convert_profraws.called)
@mock.patch.object(merger, '_validate_and_convert_profraws')
def test_profraw_skip_validation(self, mock_validate_and_convert_profraws):
mock_input_dir_walk = [
('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
('/b/some/path/0', [],
['output.json', 'default-1.profraw', 'default-2.profraw']),
('/b/some/path/1', [],
['output.json', 'default-1.profraw', 'default-2.profraw']),
]
with mock.patch.object(os, 'walk') as mock_walk:
with mock.patch.object(os, 'remove'):
mock_walk.return_value = mock_input_dir_walk
with mock.patch.object(subprocess, 'check_call') as mock_exec_cmd:
merger.merge_profiles('/b/some/path',
'output/dir/default.profdata',
'.profraw',
'llvm-profdata',
skip_validation=True)
self.assertEqual(
mock.call(
[
'llvm-profdata',
'merge',
'-o',
'output/dir/default.profdata',
'/b/some/path/0/default-1.profraw',
'/b/some/path/0/default-2.profraw',
'/b/some/path/1/default-1.profraw',
'/b/some/path/1/default-2.profraw'
],
stderr=-2,
), mock_exec_cmd.call_args)
# Skip validation should've passed all profraw files directly, and
# this validate call should not have been invoked.
self.assertFalse(mock_validate_and_convert_profraws.called)
def test_merge_profraw_skip_if_there_is_no_file(self):
mock_input_dir_walk = [
('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
]
with mock.patch.object(os, 'walk') as mock_walk:
mock_walk.return_value = mock_input_dir_walk
with mock.patch.object(subprocess, 'check_call') as mock_exec_cmd:
merger.merge_profiles('/b/some/path', 'output/dir/default.profdata',
'.profraw', 'llvm-profdata')
self.assertFalse(mock_exec_cmd.called)
@mock.patch.object(merger, '_validate_and_convert_profraws')
def test_merge_profdata(self, mock_validate_and_convert_profraws):
mock_input_dir_walk = [
('/b/some/path', ['base_unittests', 'url_unittests'], ['summary.json']),
('/b/some/path/base_unittests', [], ['output.json',
'default.profdata']),
('/b/some/path/url_unittests', [], ['output.json', 'default.profdata']),
]
with mock.patch.object(os, 'walk') as mock_walk:
with mock.patch.object(os, 'remove'):
mock_walk.return_value = mock_input_dir_walk
with mock.patch.object(subprocess, 'check_call') as mock_exec_cmd:
merger.merge_profiles('/b/some/path', 'output/dir/default.profdata',
'.profdata', 'llvm-profdata')
self.assertEqual(
mock.call(
[
'llvm-profdata',
'merge',
'-o',
'output/dir/default.profdata',
'/b/some/path/base_unittests/default.profdata',
'/b/some/path/url_unittests/default.profdata',
],
stderr=-2,
), mock_exec_cmd.call_args)
# The mock method should only apply when merging .profraw files.
self.assertFalse(mock_validate_and_convert_profraws.called)
@mock.patch.object(merger, '_validate_and_convert_profraws')
def test_merge_profdata_pattern(self, mock_validate_and_convert_profraws):
mock_input_dir_walk = [
('/b/some/path', ['base_unittests', 'url_unittests'], ['summary.json']),
('/b/some/path/base_unittests', [], ['output.json',
'base_unittests.profdata']),
('/b/some/path/url_unittests', [], ['output.json',
'url_unittests.profdata'],),
('/b/some/path/ios_chrome_smoke_eg2tests',
[], ['output.json','ios_chrome_smoke_eg2tests.profdata'],),
]
with mock.patch.object(os, 'walk') as mock_walk:
with mock.patch.object(os, 'remove'):
mock_walk.return_value = mock_input_dir_walk
with mock.patch.object(subprocess, 'check_call') as mock_exec_cmd:
input_profdata_filename_pattern = '.+_unittests\.profdata'
merger.merge_profiles('/b/some/path',
'output/dir/default.profdata',
'.profdata',
'llvm-profdata',
input_profdata_filename_pattern)
self.assertEqual(
mock.call(
[
'llvm-profdata',
'merge',
'-o',
'output/dir/default.profdata',
'/b/some/path/base_unittests/base_unittests.profdata',
'/b/some/path/url_unittests/url_unittests.profdata',
],
stderr=-2,
), mock_exec_cmd.call_args)
# The mock method should only apply when merging .profraw files.
self.assertFalse(mock_validate_and_convert_profraws.called)
@mock.patch('merge_lib._JAVA_PATH', 'java')
def test_merge_java_exec_files(self):
mock_input_dir_walk = [
('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
('/b/some/path/0', [],
['output.json', 'default-1.exec', 'default-2.exec']),
('/b/some/path/1', [],
['output.json', 'default-3.exec', 'default-4.exec']),
]
with mock.patch.object(os, 'walk') as mock_walk:
mock_walk.return_value = mock_input_dir_walk
with mock.patch.object(subprocess, 'check_call') as mock_exec_cmd:
merger.merge_java_exec_files(
'/b/some/path', 'output/path', 'path/to/jacococli.jar')
self.assertEqual(
mock.call(
[
'java',
'-jar',
'path/to/jacococli.jar',
'merge',
'/b/some/path/0/default-1.exec',
'/b/some/path/0/default-2.exec',
'/b/some/path/1/default-3.exec',
'/b/some/path/1/default-4.exec',
'--destfile',
'output/path',
],
stderr=-2,
), mock_exec_cmd.call_args)
def test_merge_java_exec_files_if_there_is_no_file(self):
mock_input_dir_walk = [
('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
]
with mock.patch.object(os, 'walk') as mock_walk:
mock_walk.return_value = mock_input_dir_walk
with mock.patch.object(subprocess, 'check_call') as mock_exec_cmd:
merger.merge_java_exec_files(
'/b/some/path', 'output/path', 'path/to/jacococli.jar')
self.assertFalse(mock_exec_cmd.called)
def test_argparse_sparse(self):
"""Ensure that sparse flag defaults to true, and is set to correct value"""
# Basic required args
build_properties = json.dumps({
'some': {
'complicated': ['nested', {
'json': None,
'object': 'thing',
}]
}
})
task_output_dir = 'some/task/output/dir'
profdata_dir = '/some/different/path/to/profdata/default.profdata'
profdata_file = os.path.join(profdata_dir, 'base_unittests.profdata')
args = [
'script_name', '--output-json', 'output.json', '--build-properties',
build_properties, '--summary-json', 'summary.json', '--task-output-dir',
task_output_dir, '--profdata-dir', profdata_dir, '--llvm-profdata',
'llvm-profdata', 'a.json', 'b.json', 'c.json', '--test-target-name',
'base_unittests'
]
test_scenarios = [
{
# Base set of args should set --sparse to false by default
'args': None,
'expected_outcome': False,
},
{
# Sparse should parse True when only --sparse is specified
'args': ['--sparse'],
'expected_outcome': True,
}
]
for scenario in test_scenarios:
args = copy.deepcopy(args)
additional_args = scenario['args']
if additional_args:
args.extend(additional_args)
expected_outcome = scenario['expected_outcome']
with mock.patch.object(merger, 'merge_profiles') as mock_merge:
mock_merge.return_value = None, None
with mock.patch.object(sys, 'argv', args):
merge_results.main()
self.assertEqual(
mock_merge.call_args,
mock.call(task_output_dir, profdata_file, '.profraw',
'llvm-profdata', sparse=expected_outcome,
skip_validation=False), None)
if __name__ == '__main__':
unittest.main()
|
|
from direct.task.Task import Task
from pandac.PandaModules import VBase4, PandaNode
from direct.distributed.ClockDelta import globalClockDelta
from toontown.margins.MarginVisible import MarginVisible
from toontown.nametag import NametagGlobals
from toontown.nametag.Nametag2d import Nametag2d
from toontown.nametag.Nametag3d import Nametag3d
class NametagGroup:
CHAT_TIMEOUT_MIN = 4.0
CHAT_TIMEOUT_MAX = 12.0
CHAT_STOMP_DELAY = 0.2
def __init__(self):
self.avatar = None
self.active = True
self.objectCode = None
self.chatButton = NametagGlobals.noButton
self.chatReversed = False
self.font = None
self.chatFont = None
self.shadow = None
self.marginManager = None
self.visible3d = True
self.chatType = NametagGlobals.CHAT
self.chatBalloonType = NametagGlobals.CHAT_BALLOON
self.nametagColor = NametagGlobals.NametagColors[NametagGlobals.CCNormal]
self.chatColor = NametagGlobals.ChatColors[NametagGlobals.CCNormal]
self.speedChatColor = VBase4(1, 1, 1, 1)
self.wordWrap = 8
self.chatWordWrap = 12
self.text = ''
self.chatPages = []
self.chatPageIndex = 0
self.chatTimeoutTask = None
self.chatTimeoutTaskName = self.getUniqueName() + '-timeout'
self.stompChatText = ''
self.stompTask = None
self.stompTaskName = self.getUniqueName() + '-stomp'
self.icon = PandaNode('icon')
self.nametag2d = Nametag2d()
self.nametag3d = Nametag3d()
self.nametags = set()
self.add(self.nametag2d)
self.add(self.nametag3d)
# Add the tick task:
self.tickTaskName = self.getUniqueName() + '-tick'
self.tickTask = taskMgr.add(self.tick, self.tickTaskName, sort=45)
def destroy(self):
if self.marginManager is not None:
self.unmanage(self.marginManager)
if self.tickTask is not None:
taskMgr.remove(self.tickTask)
self.tickTask = None
self.clearChatText()
for nametag in list(self.nametags):
self.remove(nametag)
self.nametag2d = None
self.nametag3d = None
if self.icon is not None:
self.icon.removeAllChildren()
self.icon = None
self.chatFont = None
self.font = None
self.chatButton = NametagGlobals.noButton
self.avatar = None
def getUniqueName(self):
return 'NametagGroup-' + str(id(self))
def tick(self, task):
if (self.avatar is None) or (self.avatar.isEmpty()):
return Task.cont
chatText = self.getChatText()
if (NametagGlobals.forceOnscreenChat and
chatText and
self.chatBalloonType == NametagGlobals.CHAT_BALLOON):
visible3d = False
elif self.avatar == NametagGlobals.me:
if (chatText and
self.chatBalloonType == NametagGlobals.CHAT_BALLOON and
not base.cam.node().isInView(self.avatar.getPos(base.cam))):
visible3d = False
else:
visible3d = True
elif NametagGlobals.force2dNametags:
visible3d = False
elif (not NametagGlobals.want2dNametags and
((not chatText) or (self.chatBalloonType != NametagGlobals.CHAT_BALLOON))):
visible3d = True
elif self.avatar.isHidden():
visible3d = False
else:
visible3d = base.cam.node().isInView(self.avatar.getPos(base.cam))
if visible3d != self.visible3d:
self.visible3d = visible3d
if self.nametag2d is not None:
self.nametag2d.setVisible(not visible3d)
return Task.cont
def setAvatar(self, avatar):
self.avatar = avatar
for nametag in self.nametags:
nametag.setAvatar(self.avatar)
def getAvatar(self):
return self.avatar
def setActive(self, active):
self.active = active
for nametag in self.nametags:
nametag.setActive(self.active)
def getActive(self):
return self.active
def setObjectCode(self, objectCode):
self.objectCode = objectCode
def getObjectCode(self):
return self.objectCode
def setChatButton(self, chatButton):
self.chatButton = chatButton
for nametag in self.nametags:
nametag.setChatButton(self.chatButton)
def getChatButton(self):
return self.chatButton
def hasChatButton(self):
return self.chatButton != NametagGlobals.noButton
def setChatReversed(self, reversed):
self.chatReversed = reversed
for nametag in self.nametags:
nametag.setChatReversed(reversed)
def getChatReversed(self):
return self.chatReversed
def setFont(self, font):
self.font = font
for nametag in self.nametags:
nametag.setFont(self.font)
def getFont(self):
return self.font
def setChatFont(self, chatFont):
self.chatFont = chatFont
for nametag in self.nametags:
nametag.setChatFont(self.chatFont)
def getChatFont(self):
return self.chatFont
def setShadow(self, shadow):
self.shadow = shadow
for nametag in self.nametags:
nametag.setShadow(self.shadow)
def getShadow(self):
return self.shadow
def clearShadow(self):
self.shadow = None
for nametag in self.nametags:
nametag.clearShadow()
def setChatType(self, chatType):
self.chatType = chatType
for nametag in self.nametags:
nametag.setChatType(self.chatType)
def getChatType(self):
return self.chatType
def setChatBalloonType(self, chatBalloonType):
self.chatBalloonType = chatBalloonType
for nametag in self.nametags:
nametag.setChatBalloonType(self.chatBalloonType)
def getChatBalloonType(self):
return self.chatBalloonType
def setNametagColor(self, nametagColor):
self.nametagColor = nametagColor
for nametag in self.nametags:
nametag.setNametagColor(self.nametagColor)
def getNametagColor(self):
return self.nametagColor
def setChatColor(self, chatColor):
self.chatColor = chatColor
for nametag in self.nametags:
nametag.setChatColor(self.chatColor)
def getChatColor(self):
return self.chatColor
def setSpeedChatColor(self, speedChatColor):
self.speedChatColor = speedChatColor
for nametag in self.nametags:
nametag.setSpeedChatColor(self.speedChatColor)
def getSpeedChatColor(self):
return self.speedChatColor
def setWordWrap(self, wordWrap):
self.wordWrap = wordWrap
for nametag in self.nametags:
nametag.setWordWrap(self.wordWrap)
def getWordWrap(self):
return self.wordWrap
def setChatWordWrap(self, chatWordWrap):
self.chatWordWrap = chatWordWrap
for nametag in self.nametags:
nametag.setChatWordWrap(self.chatWordWrap)
def getChatWordWrap(self):
return self.chatWordWrap
def setText(self, text):
self.text = text
for nametag in self.nametags:
nametag.setText(self.text)
nametag.update()
def getText(self):
return self.text
def getNumChatPages(self):
return len(self.chatPages)
def setChatPageIndex(self, chatPageIndex):
if chatPageIndex >= self.getNumChatPages():
return
self.chatPageIndex = chatPageIndex
chatText = self.chatPages[chatPageIndex]
for nametag in self.nametags:
if chatText and isinstance(nametag, Nametag3d):
nametag.contents.hide()
nametag.setChatText(chatText)
nametag.update()
if chatText and isinstance(nametag, Nametag3d):
nametag.animateChatBalloon()
def getChatPageIndex(self):
return self.chatPageIndex
def setChatText(self, chatText, timeout=False):
# If we are currently displaying chat text, we need to "stomp" it. In
# other words, we need to clear the current chat text, pause for a
# brief moment, and then display the new chat text:
if self.getChatText():
self.clearChatText()
self.stompChatText = chatText
self.stompTask = taskMgr.doMethodLater(
self.CHAT_STOMP_DELAY, self.__chatStomp, self.stompTaskName,
extraArgs=[timeout])
return
self.clearChatText()
self.chatPages = chatText.split('\x07')
self.setChatPageIndex(0)
if timeout:
delay = len(self.getChatText()) * 0.5
if delay < self.CHAT_TIMEOUT_MIN:
delay = self.CHAT_TIMEOUT_MIN
elif delay > self.CHAT_TIMEOUT_MAX:
delay = self.CHAT_TIMEOUT_MAX
self.chatTimeoutTask = taskMgr.doMethodLater(
delay, self.clearChatText, self.chatTimeoutTaskName)
def getChatText(self):
if self.chatPageIndex >= self.getNumChatPages():
return ''
return self.chatPages[self.chatPageIndex]
def clearChatText(self, task=None):
if self.stompTask is not None:
taskMgr.remove(self.stompTask)
self.stompTask = None
self.stompChatText = ''
if self.chatTimeoutTask is not None:
taskMgr.remove(self.chatTimeoutTask)
self.chatTimeoutTask = None
self.chatPages = []
self.chatPageIndex = 0
for nametag in self.nametags:
nametag.setChatText('')
nametag.update()
if task is not None:
return Task.done
def getStompChatText(self):
return self.stompChatText
def setIcon(self, icon):
self.icon = icon
for nametag in self.nametags:
nametag.setIcon(self.icon)
def getIcon(self):
return self.icon
def setNametag2d(self, nametag2d):
if self.nametag2d is not None:
self.remove(self.nametag2d)
self.nametag2d = None
if nametag2d is None:
return
self.nametag2d = nametag2d
self.add(self.nametag2d)
def getNametag2d(self):
return self.nametag2d
def setNametag3d(self, nametag3d):
if self.nametag3d is not None:
self.remove(self.nametag3d)
self.nametag3d = None
if nametag3d is None:
return
self.nametag3d = nametag3d
self.add(self.nametag3d)
def getNametag3d(self):
return self.nametag3d
def add(self, nametag):
self.nametags.add(nametag)
nametag.setAvatar(self.avatar)
nametag.setActive(self.active)
nametag.setClickEvent(self.getUniqueName())
nametag.setChatButton(self.chatButton)
nametag.setFont(self.font)
nametag.setChatFont(self.chatFont)
nametag.setChatType(self.chatType)
nametag.setChatBalloonType(self.chatBalloonType)
nametag.setNametagColor(self.nametagColor)
nametag.setChatColor(self.chatColor)
nametag.setSpeedChatColor(self.speedChatColor)
nametag.setWordWrap(self.wordWrap)
nametag.setChatWordWrap(self.chatWordWrap)
nametag.setText(self.text)
nametag.setChatText(self.getChatText())
nametag.setIcon(self.icon)
nametag.update()
def remove(self, nametag):
nametag.destroy()
self.nametags.remove(nametag)
def updateAll(self):
for nametag in self.nametags:
nametag.update()
def manage(self, marginManager):
if self.marginManager is not None:
self.unmanage(self.marginManager)
self.marginManager = marginManager
for nametag in self.nametags:
if isinstance(nametag, MarginVisible):
nametag.manage(self.marginManager)
def unmanage(self, marginManager):
if marginManager != self.marginManager:
return
if self.marginManager is None:
return
self.marginManager = marginManager
for nametag in self.nametags:
if isinstance(nametag, MarginVisible):
nametag.unmanage(self.marginManager)
def hideNametag(self):
for nametag in self.nametags:
nametag.hideNametag()
def showNametag(self):
for nametag in self.nametags:
nametag.showNametag()
def hideChat(self):
for nametag in self.nametags:
nametag.hideChat()
def showChat(self):
for nametag in self.nametags:
nametag.showChat()
def hideThought(self):
for nametag in self.nametags:
nametag.hideThought()
def showThought(self):
for nametag in self.nametags:
nametag.showThought()
def __chatStomp(self, timeout=False):
self.setChatText(self.stompChatText, timeout=timeout)
self.stompChatText = ''
|
|
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
import pytz
import six
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
from cinder.tests.unit import utils
fake_backup = {
'id': fake.BACKUP_ID,
'volume_id': fake.VOLUME_ID,
'status': fields.BackupStatus.CREATING,
'size': 1,
'display_name': 'fake_name',
'display_description': 'fake_description',
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'temp_volume_id': None,
'temp_snapshot_id': None,
'snapshot_id': None,
'data_timestamp': None,
'restore_volume_id': None,
'backup_metadata': {},
}
vol_props = {'status': 'available', 'size': 1}
fake_vol = fake_volume.fake_db_volume(**vol_props)
snap_props = {'status': fields.BackupStatus.AVAILABLE,
'volume_id': fake_vol['id'],
'expected_attrs': ['metadata']}
fake_snap = fake_snapshot.fake_db_snapshot(**snap_props)
class TestBackup(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.get_by_id', return_value=fake_backup)
def test_get_by_id(self, backup_get):
backup = objects.Backup.get_by_id(self.context, fake.USER_ID)
self._compare(self, fake_backup, backup)
backup_get.assert_called_once_with(self.context, models.Backup,
fake.USER_ID)
@mock.patch('cinder.db.sqlalchemy.api.model_query')
def test_get_by_id_no_existing_id(self, model_query):
query = mock.Mock()
filter_by = mock.Mock()
query_options = mock.Mock()
filter_by.first.return_value = None
query_options.filter_by.return_value = filter_by
query.options.return_value = query_options
model_query.return_value = query
self.assertRaises(exception.BackupNotFound, objects.Backup.get_by_id,
self.context, 123)
@mock.patch('cinder.db.backup_create', return_value=fake_backup)
def test_create(self, backup_create):
backup = objects.Backup(context=self.context)
backup.create()
self.assertEqual(fake_backup['id'], backup.id)
self.assertEqual(fake_backup['volume_id'], backup.volume_id)
@mock.patch('cinder.db.backup_update')
def test_save(self, backup_update):
backup = objects.Backup._from_db_object(
self.context, objects.Backup(), fake_backup)
backup.display_name = 'foobar'
backup.save()
backup_update.assert_called_once_with(self.context, backup.id,
{'display_name': 'foobar'})
@mock.patch('cinder.db.backup_metadata_update',
return_value={'key1': 'value1'})
@mock.patch('cinder.db.backup_update')
def test_save_with_metadata(self, backup_update, metadata_update):
backup = objects.Backup._from_db_object(
self.context, objects.Backup(), fake_backup)
backup.metadata = {'key1': 'value1'}
self.assertEqual({'metadata': {'key1': 'value1'}},
backup.obj_get_changes())
backup.save()
metadata_update.assert_called_once_with(self.context, backup.id,
{'key1': 'value1'}, True)
@mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow())
@mock.patch('cinder.db.sqlalchemy.api.backup_destroy')
def test_destroy(self, backup_destroy, utcnow_mock):
backup_destroy.return_value = {
'status': fields.BackupStatus.DELETED,
'deleted': True,
'deleted_at': utcnow_mock.return_value}
backup = objects.Backup(context=self.context, id=fake.BACKUP_ID)
backup.destroy()
self.assertTrue(backup_destroy.called)
admin_context = backup_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
self.assertTrue(backup.deleted)
self.assertEqual(fields.BackupStatus.DELETED, backup.status)
self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC),
backup.deleted_at)
def test_obj_field_temp_volume_snapshot_id(self):
backup = objects.Backup(context=self.context,
temp_volume_id='2',
temp_snapshot_id='3')
self.assertEqual('2', backup.temp_volume_id)
self.assertEqual('3', backup.temp_snapshot_id)
def test_obj_field_snapshot_id(self):
backup = objects.Backup(context=self.context,
snapshot_id='2')
self.assertEqual('2', backup.snapshot_id)
def test_obj_field_restore_volume_id(self):
backup = objects.Backup(context=self.context,
restore_volume_id='2')
self.assertEqual('2', backup.restore_volume_id)
def test_obj_field_metadata(self):
backup = objects.Backup(context=self.context,
metadata={'test_key': 'test_value'})
self.assertEqual({'test_key': 'test_value'}, backup.metadata)
def test_import_record(self):
utils.replace_obj_loader(self, objects.Backup)
backup = objects.Backup(context=self.context, id=fake.BACKUP_ID,
parent_id=None,
num_dependent_backups=0)
export_string = backup.encode_record()
imported_backup = objects.Backup.decode_record(export_string)
# Make sure we don't lose data when converting from string
self.assertDictEqual(self._expected_backup(backup), imported_backup)
def test_import_record_additional_info(self):
utils.replace_obj_loader(self, objects.Backup)
backup = objects.Backup(context=self.context, id=fake.BACKUP_ID,
parent_id=None,
num_dependent_backups=0)
extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}}
extra_info_copy = extra_info.copy()
export_string = backup.encode_record(extra_info=extra_info)
imported_backup = objects.Backup.decode_record(export_string)
# Dictionary passed should not be modified
self.assertDictEqual(extra_info_copy, extra_info)
# Make sure we don't lose data when converting from string and that
# extra info is still there
expected = self._expected_backup(backup)
expected['extra_info'] = extra_info
self.assertDictEqual(expected, imported_backup)
def _expected_backup(self, backup):
record = {name: field.to_primitive(backup, name, getattr(backup, name))
for name, field in backup.fields.items()}
return record
def test_import_record_additional_info_cant_overwrite(self):
utils.replace_obj_loader(self, objects.Backup)
backup = objects.Backup(context=self.context, id=fake.BACKUP_ID,
parent_id=None,
num_dependent_backups=0)
export_string = backup.encode_record(id='fake_id')
imported_backup = objects.Backup.decode_record(export_string)
# Make sure the extra_info can't overwrite basic data
self.assertDictEqual(self._expected_backup(backup), imported_backup)
def test_import_record_decoding_error(self):
export_string = '123456'
self.assertRaises(exception.InvalidInput,
objects.Backup.decode_record,
export_string)
def test_import_record_parsing_error(self):
export_string = ''
self.assertRaises(exception.InvalidInput,
objects.Backup.decode_record,
export_string)
@mock.patch('cinder.db.sqlalchemy.api.backup_get')
def test_refresh(self, backup_get):
db_backup1 = fake_backup.copy()
db_backup2 = db_backup1.copy()
db_backup2['display_name'] = 'foobar'
# On the second backup_get, return the backup with an updated
# display_name
backup_get.side_effect = [db_backup1, db_backup2]
backup = objects.Backup.get_by_id(self.context, fake.BACKUP_ID)
self._compare(self, db_backup1, backup)
# display_name was updated, so a backup refresh should have a new value
# for that field
backup.refresh()
self._compare(self, db_backup2, backup)
if six.PY3:
call_bool = mock.call.__bool__()
else:
call_bool = mock.call.__nonzero__()
backup_get.assert_has_calls([mock.call(self.context, fake.BACKUP_ID),
call_bool,
mock.call(self.context, fake.BACKUP_ID)])
class TestBackupList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
def test_get_all(self, backup_get_all):
backups = objects.BackupList.get_all(self.context)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all_by_project',
return_value=[fake_backup])
def test_get_all_by_project(self, get_all_by_project):
backups = objects.BackupList.get_all_by_project(
self.context, self.project_id)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all_by_host',
return_value=[fake_backup])
def test_get_all_by_host(self, get_all_by_host):
backups = objects.BackupList.get_all_by_host(self.context, "fake_host")
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
def test_get_all_tenants(self, backup_get_all):
search_opts = {'all_tenants': 1}
backups = objects.BackupList.get_all(self.context, search_opts)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all_by_volume',
return_value=[fake_backup])
def test_get_all_by_volume(self, get_all_by_volume):
backups = objects.BackupList.get_all_by_volume(self.context,
fake.VOLUME_ID)
self.assertEqual(1, len(backups))
get_all_by_volume.assert_called_once_with(self.context,
fake.VOLUME_ID, None)
TestBackup._compare(self, fake_backup, backups[0])
class BackupDeviceInfoTestCase(test_objects.BaseObjectsTestCase):
def setUp(self):
super(BackupDeviceInfoTestCase, self).setUp()
self.vol_obj = fake_volume.fake_volume_obj(self.context, **vol_props)
self.snap_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snap_props)
self.backup_device_dict = {'secure_enabled': False,
'is_snapshot': False, }
@mock.patch('cinder.db.volume_get', return_value=fake_vol)
def test_from_primitive_with_volume(self, mock_fake_vol):
vol_obj = self.vol_obj
self.backup_device_dict['backup_device'] = vol_obj
backup_device_info = objects.BackupDeviceInfo.from_primitive(
self.backup_device_dict, self.context)
self.assertFalse(backup_device_info.is_snapshot)
self.assertEqual(self.backup_device_dict['secure_enabled'],
backup_device_info.secure_enabled)
self.assertEqual(vol_obj, backup_device_info.volume)
self.backup_device_dict['backup_device'] = fake_vol
backup_device_info = objects.BackupDeviceInfo.from_primitive(
self.backup_device_dict, self.context)
vol_obj_from_db = objects.Volume._from_db_object(self.context,
objects.Volume(),
fake_vol)
self.assertEqual(vol_obj_from_db, backup_device_info.volume)
@mock.patch('cinder.db.snapshot_get', return_value=fake_snap)
def test_from_primitive_with_snapshot(self, mock_fake_snap):
snap_obj = self.snap_obj
self.backup_device_dict['is_snapshot'] = True
self.backup_device_dict['backup_device'] = snap_obj
backup_device_info = objects.BackupDeviceInfo.from_primitive(
self.backup_device_dict, self.context, expected_attrs=['metadata'])
self.assertTrue(backup_device_info.is_snapshot)
self.assertEqual(self.backup_device_dict['secure_enabled'],
backup_device_info.secure_enabled)
self.assertEqual(snap_obj, backup_device_info.snapshot)
self.backup_device_dict['backup_device'] = fake_snap
backup_device_info = objects.BackupDeviceInfo.from_primitive(
self.backup_device_dict, self.context, expected_attrs=['metadata'])
self.assertEqual(snap_obj, backup_device_info.snapshot)
@mock.patch('cinder.db.volume_get', return_value=fake_vol)
def test_to_primitive_with_volume(self, mock_fake_vol):
vol_obj = self.vol_obj
self.backup_device_dict['backup_device'] = fake_vol
backup_device_info = objects.BackupDeviceInfo()
backup_device_info.volume = vol_obj
backup_device_info.secure_enabled = (
self.backup_device_dict['secure_enabled'])
backup_device_ret_dict = backup_device_info.to_primitive(self.context)
self.assertEqual(self.backup_device_dict['secure_enabled'],
backup_device_ret_dict['secure_enabled'])
self.assertFalse(backup_device_ret_dict['is_snapshot'])
self.assertEqual(self.backup_device_dict['backup_device'],
backup_device_ret_dict['backup_device'])
@mock.patch('cinder.db.snapshot_get', return_value=fake_snap)
def test_to_primitive_with_snapshot(self, mock_fake_snap):
snap_obj = self.snap_obj
backup_device_info = objects.BackupDeviceInfo()
backup_device_info.snapshot = snap_obj
backup_device_info.secure_enabled = (
self.backup_device_dict['secure_enabled'])
backup_device_ret_dict = backup_device_info.to_primitive(self.context)
self.assertEqual(self.backup_device_dict['secure_enabled'],
backup_device_ret_dict['secure_enabled'])
self.assertTrue(backup_device_ret_dict['is_snapshot'])
# NOTE(sborkows): since volume in sqlalchemy snapshot is a sqlalchemy
# object too, to compare snapshots we need to convert their volumes to
# dicts.
snap_actual_dict = fake_snap
snap_ref_dict = backup_device_ret_dict['backup_device']
snap_actual_dict['volume'] = self.vol_obj.obj_to_primitive()
snap_ref_dict['volume'] = snap_ref_dict['volume']
self.assertEqual(snap_actual_dict, snap_ref_dict)
def test_is_snapshot_both_volume_and_snapshot_raises_error(self):
snap = self.snap_obj
vol = self.vol_obj
backup_device_info = objects.BackupDeviceInfo()
backup_device_info.snapshot = snap
backup_device_info.volume = vol
backup_device_info.secure_enabled = (
self.backup_device_dict['secure_enabled'])
self.assertRaises(exception.ProgrammingError, getattr,
backup_device_info, 'is_snapshot')
def test_is_snapshot_neither_volume_nor_snapshot_raises_error(self):
backup_device_info = objects.BackupDeviceInfo()
backup_device_info.secure_enabled = (
self.backup_device_dict['secure_enabled'])
self.assertRaises(exception.ProgrammingError, getattr,
backup_device_info, 'is_snapshot')
def test_device_obj_with_volume(self):
vol = self.vol_obj
backup_device_info = objects.BackupDeviceInfo()
backup_device_info.volume = vol
backup_device_info.secure_enabled = (
self.backup_device_dict['secure_enabled'])
backup_device_obj = backup_device_info.device_obj
self.assertIsInstance(backup_device_obj, objects.Volume)
self.assertEqual(vol, backup_device_obj)
def test_device_obj_with_snapshot(self):
snap = self.snap_obj
backup_device_info = objects.BackupDeviceInfo()
backup_device_info.snapshot = snap
backup_device_info.secure_enabled = (
self.backup_device_dict['secure_enabled'])
backup_device_obj = backup_device_info.device_obj
self.assertIsInstance(backup_device_obj, objects.Snapshot)
self.assertEqual(snap, backup_device_obj)
|
|
# Created By: Virgil Dupras
# Created On: 2009-09-19
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from PyQt4.QtCore import Qt, SIGNAL, QMimeData, QByteArray
from PyQt4.QtGui import QPixmap
from hscommon.conflict import is_conflicted
from hscommon.util import dedupe, format_size, format_time
from hscommon.path import Path
from qtlib.tree_model import TreeNode, TreeModel
from core.fs_utils import smart_move
MIME_PATHS = 'application/musicguru.paths'
DESIGN_BOARD_NAME = '<design board>'
IGNORE_BOX_NAME = '<ignore box>'
class FSNode(TreeNode):
def __init__(self, model, parent, ref, row):
TreeNode.__init__(self, model, parent, row)
self.ref = ref
self._data = None
self._imageName = None
def __repr__(self):
return "<FSNode %s>" % self.ref.name
def _getData(self):
raise NotImplementedError()
def _getImageName(self):
raise NotImplementedError()
def invalidate(self, with_subnodes=False):
if with_subnodes:
for node in self.subnodes:
node.invalidate(with_subnodes=True)
self._data = None
self._imageName = None
TreeNode.invalidate(self)
@property
def data(self):
if self._data is None:
self._data = self._getData()
return self._data
@property
def imageName(self):
if self._imageName is None:
self._imageName = self._getImageName()
return self._imageName
class SongNode(FSNode):
def _getData(self):
song = self.ref
return [
song.name,
song.original.parent_volume.name,
0,
format_size(song.size, 2, 2, False),
format_time(song.duration, with_hours=False),
]
def _getImageName(self):
return 'song_conflict' if is_conflicted(self.ref.name) else 'song'
def _getChildren(self):
return []
class FolderNode(FSNode):
def _getData(self):
folder = self.ref
parent_volumes = dedupe(song.original.parent_volume for song in folder.iterallfiles())
return [
folder.name,
','.join(l.name for l in parent_volumes),
folder.get_stat('filecount'),
format_size(folder.get_stat('size'), 2, 2, False),
format_time(folder.get_stat('duration')),
]
def _getImageName(self):
return 'folder_conflict' if self.ref.allconflicts else 'folder'
def _createNode(self, ref, row):
if ref.is_container:
return FolderNode(self.model, self, ref, row)
else:
return SongNode(self.model, self, ref, row)
def _getChildren(self):
return self.ref.dirs + self.ref.files
class DummyNode(FSNode):
def _getData(self):
return [''] * 5
def _getImageName(self):
return ''
def _getChildren(self):
return []
class FSModel(TreeModel):
HEADER = ['Name', 'Location', 'Songs', 'Size (MB)', 'Time']
def __init__(self, app, ref, name):
self.app = app
self.ref = ref
self.name = name # the name is going to be the first item in the paths passed around in d&d
TreeModel.__init__(self)
def _createDummyNode(self, parent, row):
return DummyNode(self, parent, None, row)
def _createNode(self, ref, row):
if ref.is_container:
return FolderNode(self, None, ref, row)
else:
return SongNode(self, None, ref, row)
def _getChildren(self):
return self.ref.dirs
def columnCount(self, parent):
return len(self.HEADER)
def data(self, index, role):
if not index.isValid():
return None
node = index.internalPointer()
if role == Qt.DisplayRole:
return node.data[index.column()]
elif role == Qt.DecorationRole:
if index.column() == 0:
return QPixmap(":/{0}".format(node.imageName))
elif role == Qt.EditRole:
if index.column() == 0:
return node.data[index.column()]
return None
def dropMimeData(self, mimeData, action, row, column, parentIndex):
# In the test I have made, the row and column args always seem to be -1/-1 except when
# parentIndex is invalid (which means that the drop destination is the root node).
def find_path(path):
if path[0] == DESIGN_BOARD_NAME:
return self.app.board.find_path(path[1:])
elif path[0] == IGNORE_BOX_NAME:
return self.app.board.ignore_box.find_path(path[1:])
if not mimeData.hasFormat(MIME_PATHS):
return False
if parentIndex.isValid():
destNode = parentIndex.internalPointer()
else:
destNode = self
paths = str(mimeData.data(MIME_PATHS), 'utf-8').split('\n')
sourceItems = set(find_path(Path(path)) for path in paths)
sourceItems = set(item for item in sourceItems if item.parent not in sourceItems | set([destNode.ref]))
if not sourceItems:
return False
smart_move(sourceItems, destNode.ref, allow_merge=True)
destNode.invalidate()
# InsertRow calls have to be made at correct indexes or else the subsequent removeRows call
# will be made at incorrect indexes. To do so, we just go through every subitem of destNode.ref
# and if it's in sourceItems, we call insertRow.
# destNode.subnodes
for index, node in enumerate(destNode.subnodes):
if node.ref in sourceItems:
self.insertRow(index, parentIndex)
return True
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled | Qt.ItemIsDropEnabled
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled
if index.column() == 0:
flags |= Qt.ItemIsEditable
return flags
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole and section < len(self.HEADER):
return self.HEADER[section]
return None
def insertRows(self, row, count, parentIndex):
node = parentIndex.internalPointer() if parentIndex.isValid() else self
self.beginInsertRows(parentIndex, row, row + count - 1)
node.invalidate()
self.endInsertRows()
return True
def mimeData(self, indexes):
nodes = dedupe(index.internalPointer() for index in indexes)
paths = [str(self.name + node.ref.path) for node in nodes]
data = '\n'.join(paths).encode('utf-8')
mimeData = QMimeData()
mimeData.setData(MIME_PATHS, QByteArray(data))
return mimeData
def mimeTypes(self):
return [MIME_PATHS]
def removeRows(self, row, count, parentIndex):
node = parentIndex.internalPointer() if parentIndex.isValid() else self
self.beginRemoveRows(parentIndex, row, row + count - 1)
node.invalidate()
self.endRemoveRows()
return True
def refreshNode(self, node):
if node is None:
self.invalidate()
return
node.invalidate(with_subnodes=True)
self.emit(SIGNAL('layoutChanged()'))
def supportedDropActions(self):
return Qt.MoveAction
|
|
class Changes(object):
def test_update_wo_move(self):
""" Update node w/o move
initial state of the tree :mod:`sqlalchemy_mptt.tests.add_mptt_tree`
.. code::
level Nested sets example
1 1(1)22
_______________|___________________
| | |
2 2(2)5 6(4)11 12(7)21
| ^ ^
3 3(3)4 7(5)8 9(6)10 13(8)16 17(10)20
| |
4 14(9)15 18(11)19
"""
node = self.session.query(self.model)\
.filter(self.model.get_pk_column() == 4).one()
node.visible = True
self.session.add(node)
_level = node.get_default_level()
self.assertEqual(
[
# id lft rgt lvl parent tree
(1, 1, 22, _level + 0, None, 1),
(2, 2, 5, _level + 1, 1, 1),
(3, 3, 4, _level + 2, 2, 1),
(4, 6, 11, _level + 1, 1, 1),
(5, 7, 8, _level + 2, 4, 1),
(6, 9, 10, _level + 2, 4, 1),
(7, 12, 21, _level + 1, 1, 1),
(8, 13, 16, _level + 2, 7, 1),
(9, 14, 15, _level + 3, 8, 1),
(10, 17, 20, _level + 2, 7, 1),
(11, 18, 19, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2)
],
self.result.all()) # flake8: noqa
def test_update_wo_move_like_sacrud_save(self):
""" Just change attr from node w/o move
initial state of the tree :mod:`sqlalchemy_mptt.tests.add_mptt_tree`
.. code::
level Nested sets example
1 1(1)22
_______________|___________________
| | |
2 2(2)5 6(4)11 12(7)21
| ^ ^
3 3(3)4 7(5)8 9(6)10 13(8)16 17(10)20
| |
4 14(9)15 18(11)19
"""
node = self.session.query(self.model)\
.filter(self.model.get_pk_column() == 4).one()
node.parent_id = '1'
node.visible = True
self.session.add(node)
_level = node.get_default_level()
# id lft rgt lvl parent tree
self.assertEqual([(1, 1, 22, _level + 0, None, 1),
(2, 2, 5, _level + 1, 1, 1),
(3, 3, 4, _level + 2, 2, 1),
(4, 6, 11, _level + 1, 1, 1),
(5, 7, 8, _level + 2, 4, 1),
(6, 9, 10, _level + 2, 4, 1),
(7, 12, 21, _level + 1, 1, 1),
(8, 13, 16, _level + 2, 7, 1),
(9, 14, 15, _level + 3, 8, 1),
(10, 17, 20, _level + 2, 7, 1),
(11, 18, 19, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2)], self.result.all())
def test_insert_node(self):
""" Insert node with parent==6
initial state of the tree :mod:`sqlalchemy_mptt.tests.add_mptt_tree`
.. code::
level Nested sets example
1 1(1)22
_______________|___________________
| | |
2 2(2)5 6(4)11 12(7)21
| ^ ^
3 3(3)4 7(5)8 9(6)10 13(8)16 17(10)20
| |
4 14(9)15 18(11)19
level Insert node with parent_id == 6
1 1(1)24
_______________|_________________
| | |
2 2(2)5 6(4)13 14(7)23
| ____|____ ___|____
| | | | |
3 3(3)4 7(5)8 9(6)12 15(8)18 19(10)22
| | |
4 10(23)11 16(9)17 20(11)21
"""
node = self.model(parent_id=6)
self.session.add(node)
_level = node.get_default_level()
self.assertEqual(
[
# id lft rgt lvl parent tree
(1, 1, 24, _level + 0, None, 1),
(2, 2, 5, _level + 1, 1, 1),
(3, 3, 4, _level + 2, 2, 1),
(4, 6, 13, _level + 1, 1, 1),
(5, 7, 8, _level + 2, 4, 1),
(6, 9, 12, _level + 2, 4, 1),
(7, 14, 23, _level + 1, 1, 1),
(8, 15, 18, _level + 2, 7, 1),
(9, 16, 17, _level + 3, 8, 1),
(10, 19, 22, _level + 2, 7, 1),
(11, 20, 21, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2),
(23, 10, 11, _level + 3, 6, 1)
],
self.result.all())
def test_insert_node_near_subtree(self):
""" Insert node with parent==4
initial state of the tree :mod:`sqlalchemy_mptt.tests.add_mptt_tree`
.. code::
level Nested sets example
1 1(1)22
_______________|___________________
| | |
2 2(2)5 6(4)11 12(7)21
| ^ ^
3 3(3)4 7(5)8 9(6)10 13(8)16 17(10)20
| |
4 14(9)15 18(11)19
level Insert node with parent_id == 4
1 1(1)24
_______________|_____________________
| | |
2 2(2)5 6(4)13 14(7)23
| ______|________ __|______
| | | | | |
3 3(3)4 7(5)8 9(6)10 11(23)12 15(8)18 19(10)22
| |
4 16(9)17 20(11)21
"""
node = self.model(parent_id=4)
self.session.add(node)
_level = node.get_default_level()
self.assertEqual(
[
# id lft rgt lvl parent tree
(1, 1, 24, _level + 0, None, 1),
(2, 2, 5, _level + 1, 1, 1),
(3, 3, 4, _level + 2, 2, 1),
(4, 6, 13, _level + 1, 1, 1),
(5, 7, 8, _level + 2, 4, 1),
(6, 9, 10, _level + 2, 4, 1),
(7, 14, 23, _level + 1, 1, 1),
(8, 15, 18, _level + 2, 7, 1),
(9, 16, 17, _level + 3, 8, 1),
(10, 19, 22, _level + 2, 7, 1),
(11, 20, 21, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2),
(23, 11, 12, _level + 2, 4, 1)
],
self.result.all())
def test_insert_after_node(self):
pass
def test_delete_node(self):
""" Delete node(4)
initial state of the tree :mod:`sqlalchemy_mptt.tests.add_mptt_tree`
.. code::
level Test delete node
1 1(1)22
_______________|___________________
| | |
2 2(2)5 6(4)11 12(7)21
| ^ ^
3 3(3)4 7(5)8 9(6)10 13(8)16 17(10)20
| |
4 14(9)15 18(11)19
level Delete node == 4
1 1(1)16
_______________|_____
| |
2 2(2)5 6(7)15
| ^
3 3(3)4 7(8)10 11(10)14
| |
4 8(9)9 12(11)13
"""
node = self.session.query(self.model)\
.filter(self.model.get_pk_column() == 4).one()
self.session.delete(node)
_level = node.get_default_level()
self.assertEqual(
[
# id lft rgt lvl parent tree
(1, 1, 16, _level + 0, None, 1),
(2, 2, 5, _level + 1, 1, 1),
(3, 3, 4, _level + 2, 2, 1),
(7, 6, 15, _level + 1, 1, 1),
(8, 7, 10, _level + 2, 7, 1),
(9, 8, 9, _level + 3, 8, 1),
(10, 11, 14, _level + 2, 7, 1),
(11, 12, 13, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2)
],
self.result.all())
def test_update_node(self):
""" Set parent_id==5 for node(8)
initial state of the tree :mod:`sqlalchemy_mptt.tests.add_mptt_tree`
.. code::
level Test update node
1 1(1)22
_______________|___________________
| | |
2 2(2)5 6(4)11 12(7)21
| ^ ^
3 3(3)4 7(5)8 9(6)10 13(8)16 17(10)20
| |
4 14(9)15 18(11)19
level Move 8 - > 5
1 1(1)22
_______________|__________________
| | |
2 2(2)5 6(4)15 16(7)21
| ^ |
3 3(3)4 7(5)12 13(6)14 17(10)20
| |
4 8(8)11 18(11)19
|
5 9(9)10
"""
node = self.session.query(self.model)\
.filter(self.model.get_pk_column() == 8).one()
node.parent_id = 5
self.session.add(node)
_level = node.get_default_level()
self.assertEqual(
[
# id lft rgt lvl parent tree
(1, 1, 22, _level + 0, None, 1),
(2, 2, 5, _level + 1, 1, 1),
(3, 3, 4, _level + 2, 2, 1),
(4, 6, 15, _level + 1, 1, 1),
(5, 7, 12, _level + 2, 4, 1),
(6, 13, 14, _level + 2, 4, 1),
(7, 16, 21, _level + 1, 1, 1),
(8, 8, 11, _level + 3, 5, 1),
(9, 9, 10, _level + 4, 8, 1),
(10, 17, 20, _level + 2, 7, 1),
(11, 18, 19, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2)
],
self.result.all())
""" level Move 8 - > 5
1 1(1)22
_______________|__________________
| | |
2 2(2)5 6(4)15 16(7)21
| ^ |
3 3(3)4 7(5)12 13(6)14 17(10)20
| |
4 8(8)11 18(11)19
|
5 9(9)10
level Move 4 - > 2
1 1(1)22
________|_____________
| |
2 2(2)15 16(7)21
____|_____ |
| | |
3 3(4)12 13(3)14 17(10)20
^ |
4 4(5)9 10(6)11 18(11)19
|
5 5(8)8
|
6 6(9)7
"""
node = self.session.query(self.model)\
.filter(self.model.get_pk_column() == 4).one()
node.parent_id = 2
self.session.add(node)
self.assertEqual(
[
# id lft rgt lvl parent tree
(1, 1, 22, _level + 0, None, 1),
(2, 2, 15, _level + 1, 1, 1),
(3, 13, 14, _level + 2, 2, 1),
(4, 3, 12, _level + 2, 2, 1),
(5, 4, 9, _level + 3, 4, 1),
(6, 10, 11, _level + 3, 4, 1),
(7, 16, 21, _level + 1, 1, 1),
(8, 5, 8, _level + 4, 5, 1),
(9, 6, 7, _level + 5, 8, 1),
(10, 17, 20, _level + 2, 7, 1),
(11, 18, 19, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2)
],
self.result.all())
""" level Move 4 - > 2
1 1(1)22
________|_____________
| |
2 2(2)15 16(7)21
____|_____ |
| | |
3 3(4)12 13(3)14 17(10)20
^ |
4 4(5)9 10(6)11 18(11)19
|
5 5(8)8
|
6 6(9)7
level Move 8 - > 10
1 1(1)22
________|_____________
| |
2 2(2)11 12(7)21
______|_____ |
| | |
3 3(4)8 9(3)10 13(10)20
__|____ _|______
| | | |
4 4(5)5 6(6)7 14(8)17 18(11)19
|
5 15(9)16
"""
node = self.session.query(self.model)\
.filter(self.model.get_pk_column() == 8).one()
node.parent_id = 10
self.session.add(node)
self.assertEqual(
[
# id lft rgt lvl parent tree
(1, 1, 22, _level + 0, None, 1),
(2, 2, 11, _level + 1, 1, 1),
(3, 9, 10, _level + 2, 2, 1),
(4, 3, 8, _level + 2, 2, 1),
(5, 4, 5, _level + 3, 4, 1),
(6, 6, 7, _level + 3, 4, 1),
(7, 12, 21, _level + 1, 1, 1),
(8, 14, 17, _level + 3, 10, 1),
(9, 15, 16, _level + 4, 8, 1),
(10, 13, 20, _level + 2, 7, 1),
(11, 18, 19, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2)
],
self.result.all())
def test_rebuild(self):
""" Rebuild tree with tree_id==1
.. code::
level Nested sets w/o left & right (or broken left & right)
1 (1)
_______________|___________________
| | |
2 (2) (4) (7)
| ^ ^
3 (3) (5) (6) (8) (10)
| |
4 (9) (11)
level Nested sets after rebuild
1 1(1)22
_______________|___________________
| | |
2 2(2)5 6(4)11 12(7)21
| ^ ^
3 3(3)4 7(5)8 9(6)10 13(8)16 17(10)20
| |
4 14(9)15 18(11)19
"""
self.session.query(self.model).update({
self.model.left: 0,
self.model.right: 0,
self.model.level: 0
})
self.model.rebuild(self.session, 1)
_level = self.model.get_default_level()
self.assertEqual(
self.result.all(),
[
# id lft rgt lvl parent tree
(1, 1, 22, _level + 0, None, 1),
(2, 2, 5, _level + 1, 1, 1),
(3, 3, 4, _level + 2, 2, 1),
(4, 6, 11, _level + 1, 1, 1),
(5, 7, 8, _level + 2, 4, 1),
(6, 9, 10, _level + 2, 4, 1),
(7, 12, 21, _level + 1, 1, 1),
(8, 13, 16, _level + 2, 7, 1),
(9, 14, 15, _level + 3, 8, 1),
(10, 17, 20, _level + 2, 7, 1),
(11, 18, 19, _level + 3, 10, 1),
(12, 0, 0, 0, None, 2),
(13, 0, 0, 0, 12, 2),
(14, 0, 0, 0, 13, 2),
(15, 0, 0, 0, 12, 2),
(16, 0, 0, 0, 15, 2),
(17, 0, 0, 0, 15, 2),
(18, 0, 0, 0, 12, 2),
(19, 0, 0, 0, 18, 2),
(20, 0, 0, 0, 19, 2),
(21, 0, 0, 0, 18, 2),
(22, 0, 0, 0, 21, 2)
]
)
self.model.rebuild(self.session)
self.assertEqual(
self.result.all(),
[
# id lft rgt lvl parent tree
(1, 1, 22, _level + 0, None, 1),
(2, 2, 5, _level + 1, 1, 1),
(3, 3, 4, _level + 2, 2, 1),
(4, 6, 11, _level + 1, 1, 1),
(5, 7, 8, _level + 2, 4, 1),
(6, 9, 10, _level + 2, 4, 1),
(7, 12, 21, _level + 1, 1, 1),
(8, 13, 16, _level + 2, 7, 1),
(9, 14, 15, _level + 3, 8, 1),
(10, 17, 20, _level + 2, 7, 1),
(11, 18, 19, _level + 3, 10, 1),
(12, 1, 22, _level + 0, None, 2),
(13, 2, 5, _level + 1, 12, 2),
(14, 3, 4, _level + 2, 13, 2),
(15, 6, 11, _level + 1, 12, 2),
(16, 7, 8, _level + 2, 15, 2),
(17, 9, 10, _level + 2, 15, 2),
(18, 12, 21, _level + 1, 12, 2),
(19, 13, 16, _level + 2, 18, 2),
(20, 14, 15, _level + 3, 19, 2),
(21, 17, 20, _level + 2, 18, 2),
(22, 18, 19, _level + 3, 21, 2)
]
)
|
|
"""
Tests for Constrain Locator To Vertex module
offline tests run in mayapy in a terminal
skipping tests requireing GUI
for offline testing run:
mayapy constrainLoc2vtx_tests.py
"""
import os
from os.path import abspath, dirname
import unittest
import pymel.core
import constrainLoc2vtx
reload(constrainLoc2vtx)
gui = False if __name__ == '__main__' else True
### skipping tests:
# @unittest.skip('--> Passed...')
# @unittest.skipIf(not gui, '--> Not in Maya GUI...')
class ContrsainLoc2vtxUnittest(unittest.TestCase):
@classmethod
def setUp(cls):
pymel.core.newFile(force=True)
transform, shape = pymel.core.polyCube()
pymel.core.select(clear=True)
transform.rename('test_cube')
pymel.core.setKeyframe(transform)
transform.setTranslation((1, 2, 3))
transform.setRotation((35, 148, 323))
transform.setScale((1, 2, 1))
pymel.core.setKeyframe(transform, time=120)
# @unittest.skip('--> Passed...')
def test__test_is_running(self):
self.assertTrue(True)
class ContrsainLoc2vtxTest(unittest.TestCase):
def setUp(self):
ContrsainLoc2vtxUnittest.setUp()
def test__functional_test(self):
pymel.core.select('test_cubeShape.vtx[1]')
constrainLoc2vtx.constrain_loc_to_vtx()
"""
creates locator with name:
locator_vtx_constrain_test_cube
FIX THIS
"""
self.assertTrue(pymel.core.PyNode('locator_vertexConstrained'))
"""
creates expression node with name:
expression_vtx_constrain_test_cube
FIX THIS
"""
self.assertTrue(pymel.core.PyNode('locator_vertexConstrained1'))
"""
expression is:
"""
expression = ("""float $BBoxSize = test_cube.boundingBoxMinX;"""
"""\n\n$vertexWorldPos = `pointPosition -world test_cubeShape.vtx[1]`;"""
"""\nlocator_vertexConstrained.translateX = $vertexWorldPos[0];"""
"""\nlocator_vertexConstrained.translateY = $vertexWorldPos[1];"""
"""\nlocator_vertexConstrained.translateZ = $vertexWorldPos[2];""")
self.assertEqual(
pymel.core.PyNode('locator_vertexConstrained1').getExpression(),
expression
)
"""
Locator position equals
vertex position
"""
loc = pymel.core.PyNode('locator_vertexConstrained')
vtx = pymel.core.PyNode('test_cubeShape.vtx[1]')
loc_x, loc_y, loc_z = loc.getTranslation(space='world')
vtx_x, vtx_y, vtx_z = vtx.getPosition(space='world')
pymel.core.currentTime(2)
self.assertEqual(loc_x, vtx_x)
self.assertEqual(loc_y, vtx_y)
self.assertEqual(loc_z, vtx_z)
pymel.core.currentTime(50)
self.assertEqual(loc_x, vtx_x)
self.assertEqual(loc_y, vtx_y)
self.assertEqual(loc_z, vtx_z)
def test__functional_test_2(self):
ContrsainLoc2vtxUnittest.setUp()
pymel.core.select('test_cubeShape.vtx[2]')
constrainLoc2vtx.constrain_loc_to_vtx()
"""
creates locator with name:
locator_vtx_constrain_test_cube
FIX THIS
"""
self.assertTrue(pymel.core.PyNode('locator_vertexConstrained'))
"""
creates expression node with name:
expression_vtx_constrain_test_cube
FIX THIS
"""
self.assertTrue(pymel.core.PyNode('locator_vertexConstrained1'))
"""
expression is:
"""
expression = ("""float $BBoxSize = test_cube.boundingBoxMinX;"""
"""\n\n$vertexWorldPos = `pointPosition -world test_cubeShape.vtx[2]`;"""
"""\nlocator_vertexConstrained.translateX = $vertexWorldPos[0];"""
"""\nlocator_vertexConstrained.translateY = $vertexWorldPos[1];"""
"""\nlocator_vertexConstrained.translateZ = $vertexWorldPos[2];""")
self.assertEqual(
pymel.core.PyNode('locator_vertexConstrained1').getExpression(),
expression
)
"""
Locator position equals
vertex position
"""
loc = pymel.core.PyNode('locator_vertexConstrained')
vtx = pymel.core.PyNode('test_cubeShape.vtx[2]')
loc_x, loc_y, loc_z = loc.getTranslation(space='world')
vtx_x, vtx_y, vtx_z = vtx.getPosition(space='world')
pymel.core.currentTime(2)
self.assertEqual(loc_x, vtx_x)
self.assertEqual(loc_y, vtx_y)
self.assertEqual(loc_z, vtx_z)
pymel.core.currentTime(50)
self.assertEqual(loc_x, vtx_x)
self.assertEqual(loc_y, vtx_y)
self.assertEqual(loc_z, vtx_z)
### PART 2
pymel.core.select('test_cubeShape.vtx[5]')
constrainLoc2vtx.constrain_loc_to_vtx()
"""
creates locator with name:
locator_vtx_constrain_test_cube
FIX THIS
"""
self.assertTrue(pymel.core.PyNode('locator_vertexConstrained'))
"""
creates expression node with name:
expression_vtx_constrain_test_cube
FIX THIS
"""
self.assertTrue(pymel.core.PyNode('locator_vertexConstrained1'))
"""
expression is:
"""
expression = ("""float $BBoxSize = test_cube.boundingBoxMinX;"""
"""\n\n$vertexWorldPos = `pointPosition -world test_cubeShape.vtx[5]`;"""
"""\nlocator_vertexConstrained2.translateX = $vertexWorldPos[0];"""
"""\nlocator_vertexConstrained2.translateY = $vertexWorldPos[1];"""
"""\nlocator_vertexConstrained2.translateZ = $vertexWorldPos[2];""")
self.assertEqual(
pymel.core.PyNode('locator_vertexConstrained3').getExpression(),
expression
)
"""
Locator position equals
vertex position
"""
loc = pymel.core.PyNode('locator_vertexConstrained2')
vtx = pymel.core.PyNode('test_cubeShape.vtx[5]')
loc_x, loc_y, loc_z = loc.getTranslation(space='world')
vtx_x, vtx_y, vtx_z = vtx.getPosition(space='world')
pymel.core.currentTime(2)
self.assertEqual(loc_x, vtx_x)
self.assertEqual(loc_y, vtx_y)
self.assertEqual(loc_z, vtx_z)
pymel.core.currentTime(50)
self.assertEqual(loc_x, vtx_x)
self.assertEqual(loc_y, vtx_y)
self.assertEqual(loc_z, vtx_z)
# def run():
# print('//'*25)
# print('\\\\'*25)
# print('//'*25)
# unittest.main(module=__name__, exit=False, verbosity=1)
# if __name__ == '__main__':
# run()
def main():
unittest.main(module=__name__, exit=False)
|
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import mock
import six
from testtools import matchers
import webob
import webob.exc
import webob.multidict
from nova.api.openstack import common
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import utils
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
class LimiterTest(test.NoDBTestCase):
"""Unit tests for the `nova.api.openstack.common.limited` method which
takes in a list of items and, depending on the 'offset' and 'limit' GET
params, returns a subset or complete set of the given items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
# Test offset key works with 0.
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
# Test offset key works with a medium sized number.
req = webob.Request.blank('/?offset=10')
self.assertEqual(0, len(common.limited(self.tiny, req)))
self.assertEqual(common.limited(self.small, req), self.small[10:])
self.assertEqual(common.limited(self.medium, req), self.medium[10:])
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
# Test offset key works with a number over 1000 (max_limit).
req = webob.Request.blank('/?offset=1001')
self.assertEqual(0, len(common.limited(self.tiny, req)))
self.assertEqual(0, len(common.limited(self.small, req)))
self.assertEqual(0, len(common.limited(self.medium, req)))
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
# Test offset key works with a blank offset.
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
# Test offset key works with a BAD offset.
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
# Test request with no offset or limit.
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
# Test limit of zero.
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
# Test limit of 10.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium[:10])
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
# Test limit of 3000.
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
# Test request with both limit and offset.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(0, len(common.limited(items, req)))
def test_limiter_custom_max_limit(self):
# Test a max_limit other than 1000.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(0, len(common.limited(items, req, max_limit=2000)))
def test_limiter_negative_limit(self):
# Test a negative limit.
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
# Test a negative offset.
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class SortParamUtilsTest(test.NoDBTestCase):
def test_get_sort_params_defaults(self):
'''Verifies the default sort key and direction.'''
sort_keys, sort_dirs = common.get_sort_params({})
self.assertEqual(['created_at'], sort_keys)
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_override_defaults(self):
'''Verifies that the defaults can be overriden.'''
sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
default_dir='dir1')
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
sort_keys, sort_dirs = common.get_sort_params({}, default_key=None,
default_dir=None)
self.assertEqual([], sort_keys)
self.assertEqual([], sort_dirs)
def test_get_sort_params_single_value(self):
'''Verifies a single sort key and direction.'''
params = webob.multidict.MultiDict()
params.add('sort_key', 'key1')
params.add('sort_dir', 'dir1')
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_with_default(self):
'''Verifies a single sort value with a default.'''
params = webob.multidict.MultiDict()
params.add('sort_key', 'key1')
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# sort_key was supplied, sort_dir should be defaulted
self.assertEqual(['desc'], sort_dirs)
params = webob.multidict.MultiDict()
params.add('sort_dir', 'dir1')
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['created_at'], sort_keys)
# sort_dir was supplied, sort_key should be defaulted
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_multiple_values(self):
'''Verifies multiple sort parameter values.'''
params = webob.multidict.MultiDict()
params.add('sort_key', 'key1')
params.add('sort_key', 'key2')
params.add('sort_key', 'key3')
params.add('sort_dir', 'dir1')
params.add('sort_dir', 'dir2')
params.add('sort_dir', 'dir3')
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
# Also ensure that the input parameters are not modified
sort_key_vals = []
sort_dir_vals = []
while 'sort_key' in params:
sort_key_vals.append(params.pop('sort_key'))
while 'sort_dir' in params:
sort_dir_vals.append(params.pop('sort_dir'))
self.assertEqual(['key1', 'key2', 'key3'], sort_key_vals)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dir_vals)
self.assertEqual(0, len(params))
class PaginationParamsTest(test.NoDBTestCase):
"""Unit tests for the `nova.api.openstack.common.get_pagination_params`
method which takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_no_params(self):
# Test no params.
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
# Test valid marker param.
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
# Test valid limit param.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
# Test invalid limit param.
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
# Test valid limit and marker parameters.
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
{'marker': marker, 'limit': 20})
def test_valid_page_size(self):
# Test valid page_size param.
req = webob.Request.blank('/?page_size=10')
self.assertEqual(common.get_pagination_params(req),
{'page_size': 10})
def test_invalid_page_size(self):
# Test invalid page_size param.
req = webob.Request.blank('/?page_size=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_page_size(self):
# Test valid limit and page_size parameters.
req = webob.Request.blank('/?limit=20&page_size=5')
self.assertEqual(common.get_pagination_params(req),
{'page_size': 5, 'limit': 20})
class MiscFunctionsTest(test.TestCase):
def test_remove_trailing_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1'
expected = 'http://www.testsite.com'
actual = common.remove_trailing_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_trailing_version_from_href_2(self):
fixture = 'http://www.testsite.com/compute/v1.1'
expected = 'http://www.testsite.com/compute'
actual = common.remove_trailing_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_trailing_version_from_href_3(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/v1.1/images'
actual = common.remove_trailing_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_trailing_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/v1.1/images'
self.assertRaises(ValueError,
common.remove_trailing_version_from_href,
fixture)
def test_remove_trailing_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/images/v'
self.assertRaises(ValueError,
common.remove_trailing_version_from_href,
fixture)
def test_remove_trailing_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_trailing_version_from_href,
fixture)
def test_get_id_from_href_with_int_url(self):
fixture = 'http://www.testsite.com/dir/45'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_int(self):
fixture = '45'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_int_url_query(self):
fixture = 'http://www.testsite.com/dir/45?asdf=jkl'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid_url(self):
fixture = 'http://www.testsite.com/dir/abc123'
actual = common.get_id_from_href(fixture)
expected = "abc123"
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid_url_query(self):
fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl'
actual = common.get_id_from_href(fixture)
expected = "abc123"
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid(self):
fixture = 'abc123'
actual = common.get_id_from_href(fixture)
expected = 'abc123'
self.assertEqual(actual, expected)
def test_raise_http_conflict_for_instance_invalid_state(self):
exc = exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
try:
common.raise_http_conflict_for_instance_invalid_state(exc,
'meow', 'fake_server_id')
except webob.exc.HTTPConflict as e:
self.assertEqual(six.text_type(e),
"Cannot 'meow' instance fake_server_id while it is in "
"fake_attr fake_state")
else:
self.fail("webob.exc.HTTPConflict was not raised")
def test_check_img_metadata_properties_quota_valid_metadata(self):
ctxt = utils.get_test_admin_context()
metadata1 = {"key": "value"}
actual = common.check_img_metadata_properties_quota(ctxt, metadata1)
self.assertIsNone(actual)
metadata2 = {"key": "v" * 260}
actual = common.check_img_metadata_properties_quota(ctxt, metadata2)
self.assertIsNone(actual)
metadata3 = {"key": ""}
actual = common.check_img_metadata_properties_quota(ctxt, metadata3)
self.assertIsNone(actual)
def test_check_img_metadata_properties_quota_inv_metadata(self):
ctxt = utils.get_test_admin_context()
metadata1 = {"a" * 260: "value"}
self.assertRaises(webob.exc.HTTPBadRequest,
common.check_img_metadata_properties_quota, ctxt, metadata1)
metadata2 = {"": "value"}
self.assertRaises(webob.exc.HTTPBadRequest,
common.check_img_metadata_properties_quota, ctxt, metadata2)
metadata3 = "invalid metadata"
self.assertRaises(webob.exc.HTTPBadRequest,
common.check_img_metadata_properties_quota, ctxt, metadata3)
metadata4 = None
self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
metadata4))
metadata5 = {}
self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
metadata5))
def test_status_from_state(self):
for vm_state in (vm_states.ACTIVE, vm_states.STOPPED):
for task_state in (task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
actual = common.status_from_state(vm_state, task_state)
expected = 'RESIZE'
self.assertEqual(expected, actual)
def test_status_rebuild_from_state(self):
for vm_state in (vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR):
for task_state in (task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING):
actual = common.status_from_state(vm_state, task_state)
expected = 'REBUILD'
self.assertEqual(expected, actual)
def test_status_migrating_from_state(self):
for vm_state in (vm_states.ACTIVE, vm_states.PAUSED):
task_state = task_states.MIGRATING
actual = common.status_from_state(vm_state, task_state)
expected = 'MIGRATING'
self.assertEqual(expected, actual)
def test_task_and_vm_state_from_status(self):
fixture1 = ['reboot']
actual = common.task_and_vm_state_from_status(fixture1)
expected = [vm_states.ACTIVE], [task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING]
self.assertEqual(expected, actual)
fixture2 = ['resize']
actual = common.task_and_vm_state_from_status(fixture2)
expected = ([vm_states.ACTIVE, vm_states.STOPPED],
[task_states.RESIZE_FINISH,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP])
self.assertEqual(expected, actual)
fixture3 = ['resize', 'reboot']
actual = common.task_and_vm_state_from_status(fixture3)
expected = ([vm_states.ACTIVE, vm_states.STOPPED],
[task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING,
task_states.RESIZE_FINISH,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP])
self.assertEqual(expected, actual)
def test_is_all_tenants_true(self):
for value in ('', '1', 'true', 'True'):
search_opts = {'all_tenants': value}
self.assertTrue(common.is_all_tenants(search_opts))
self.assertIn('all_tenants', search_opts)
def test_is_all_tenants_false(self):
for value in ('0', 'false', 'False'):
search_opts = {'all_tenants': value}
self.assertFalse(common.is_all_tenants(search_opts))
self.assertIn('all_tenants', search_opts)
def test_is_all_tenants_missing(self):
self.assertFalse(common.is_all_tenants({}))
def test_is_all_tenants_invalid(self):
search_opts = {'all_tenants': 'wonk'}
self.assertRaises(exception.InvalidInput, common.is_all_tenants,
search_opts)
class TestCollectionLinks(test.NoDBTestCase):
"""Tests the _get_collection_links method."""
@mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
def test_items_less_than_limit(self, href_link_mock):
items = [
{"uuid": "123"}
]
req = mock.MagicMock()
params = mock.PropertyMock(return_value=dict(limit=10))
type(req).params = params
builder = common.ViewBuilder()
results = builder._get_collection_links(req, items, "ignored", "uuid")
self.assertFalse(href_link_mock.called)
self.assertThat(results, matchers.HasLength(0))
@mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
def test_items_equals_given_limit(self, href_link_mock):
items = [
{"uuid": "123"}
]
req = mock.MagicMock()
params = mock.PropertyMock(return_value=dict(limit=1))
type(req).params = params
builder = common.ViewBuilder()
results = builder._get_collection_links(req, items,
mock.sentinel.coll_key,
"uuid")
href_link_mock.assert_called_once_with(req, "123",
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
@mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
def test_items_equals_default_limit(self, href_link_mock):
items = [
{"uuid": "123"}
]
req = mock.MagicMock()
params = mock.PropertyMock(return_value=dict())
type(req).params = params
self.flags(osapi_max_limit=1)
builder = common.ViewBuilder()
results = builder._get_collection_links(req, items,
mock.sentinel.coll_key,
"uuid")
href_link_mock.assert_called_once_with(req, "123",
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
@mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
def test_items_equals_default_limit_with_given(self, href_link_mock):
items = [
{"uuid": "123"}
]
req = mock.MagicMock()
# Given limit is greater than default max, only return default max
params = mock.PropertyMock(return_value=dict(limit=2))
type(req).params = params
self.flags(osapi_max_limit=1)
builder = common.ViewBuilder()
results = builder._get_collection_links(req, items,
mock.sentinel.coll_key,
"uuid")
href_link_mock.assert_called_once_with(req, "123",
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
class LinkPrefixTest(test.NoDBTestCase):
def test_update_link_prefix(self):
vb = common.ViewBuilder()
result = vb._update_link_prefix("http://192.168.0.243:24/",
"http://127.0.0.1/compute")
self.assertEqual("http://127.0.0.1/compute", result)
result = vb._update_link_prefix("http://foo.x.com/v1",
"http://new.prefix.com")
self.assertEqual("http://new.prefix.com/v1", result)
result = vb._update_link_prefix(
"http://foo.x.com/v1",
"http://new.prefix.com:20455/new_extra_prefix")
self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1",
result)
class UrlJoinTest(test.NoDBTestCase):
def test_url_join(self):
pieces = ["one", "two", "three"]
joined = common.url_join(*pieces)
self.assertEqual("one/two/three", joined)
def test_url_join_extra_slashes(self):
pieces = ["one/", "/two//", "/three/"]
joined = common.url_join(*pieces)
self.assertEqual("one/two/three", joined)
def test_url_join_trailing_slash(self):
pieces = ["one", "two", "three", ""]
joined = common.url_join(*pieces)
self.assertEqual("one/two/three/", joined)
def test_url_join_empty_list(self):
pieces = []
joined = common.url_join(*pieces)
self.assertEqual("", joined)
def test_url_join_single_empty_string(self):
pieces = [""]
joined = common.url_join(*pieces)
self.assertEqual("", joined)
def test_url_join_single_slash(self):
pieces = ["/"]
joined = common.url_join(*pieces)
self.assertEqual("", joined)
class ViewBuilderLinkTest(test.NoDBTestCase):
project_id = "fake"
api_version = "2.1"
def setUp(self):
super(ViewBuilderLinkTest, self).setUp()
self.request = self.req("/%s" % self.project_id)
self.vb = common.ViewBuilder()
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context, version=self.api_version)
def test_get_project_id(self):
proj_id = self.vb._get_project_id(self.request)
self.assertEqual(self.project_id, proj_id)
def test_get_next_link(self):
identifier = "identifier"
collection = "collection"
next_link = self.vb._get_next_link(self.request, identifier,
collection)
expected = "/".join((self.request.url,
"%s?marker=%s" % (collection, identifier)))
self.assertEqual(expected, next_link)
def test_get_href_link(self):
identifier = "identifier"
collection = "collection"
href_link = self.vb._get_href_link(self.request, identifier,
collection)
expected = "/".join((self.request.url, collection, identifier))
self.assertEqual(expected, href_link)
def test_get_bookmark_link(self):
identifier = "identifier"
collection = "collection"
bookmark_link = self.vb._get_bookmark_link(self.request, identifier,
collection)
bmk_url = common.remove_trailing_version_from_href(
self.request.application_url)
expected = "/".join((bmk_url, self.project_id, collection, identifier))
self.assertEqual(expected, bookmark_link)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""
Unit tests for refactor.py.
"""
from __future__ import with_statement
import sys
import os
import codecs
import operator
import io
import tempfile
import shutil
import unittest
import warnings
from lib2to3 import refactor, pygram, fixer_base
from lib2to3.pgen2 import token
from . import support
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
FIXER_DIR = os.path.join(TEST_DATA_DIR, "fixers")
sys.path.append(FIXER_DIR)
try:
_DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
finally:
sys.path.pop()
_2TO3_FIXERS = refactor.get_fixers_from_package("lib2to3.fixes")
class TestRefactoringTool(unittest.TestCase):
def setUp(self):
sys.path.append(FIXER_DIR)
def tearDown(self):
sys.path.pop()
def check_instances(self, instances, classes):
for inst, cls in zip(instances, classes):
if not isinstance(inst, cls):
self.fail("%s are not instances of %s" % instances, classes)
def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
return refactor.RefactoringTool(fixers, options, explicit)
def test_print_function_option(self):
rt = self.rt({"print_function" : True})
self.assertIs(rt.grammar, pygram.python_grammar_no_print_statement)
self.assertIs(rt.driver.grammar,
pygram.python_grammar_no_print_statement)
def test_write_unchanged_files_option(self):
rt = self.rt()
self.assertFalse(rt.write_unchanged_files)
rt = self.rt({"write_unchanged_files" : True})
self.assertTrue(rt.write_unchanged_files)
def test_fixer_loading_helpers(self):
contents = ["explicit", "first", "last", "parrot", "preorder"]
non_prefixed = refactor.get_all_fix_names("myfixes")
prefixed = refactor.get_all_fix_names("myfixes", False)
full_names = refactor.get_fixers_from_package("myfixes")
self.assertEqual(prefixed, ["fix_" + name for name in contents])
self.assertEqual(non_prefixed, contents)
self.assertEqual(full_names,
["myfixes.fix_" + name for name in contents])
def test_detect_future_features(self):
run = refactor._detect_future_features
fs = frozenset
empty = fs()
self.assertEqual(run(""), empty)
self.assertEqual(run("from __future__ import print_function"),
fs(("print_function",)))
self.assertEqual(run("from __future__ import generators"),
fs(("generators",)))
self.assertEqual(run("from __future__ import generators, feature"),
fs(("generators", "feature")))
inp = "from __future__ import generators, print_function"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp ="from __future__ import print_function, generators"
self.assertEqual(run(inp), fs(("print_function", "generators")))
inp = "from __future__ import (print_function,)"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "from __future__ import (generators, print_function)"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp = "from __future__ import (generators, nested_scopes)"
self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
inp = """from __future__ import generators
from __future__ import print_function"""
self.assertEqual(run(inp), fs(("generators", "print_function")))
invalid = ("from",
"from 4",
"from x",
"from x 5",
"from x im",
"from x import",
"from x import 4",
)
for inp in invalid:
self.assertEqual(run(inp), empty)
inp = "'docstring'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
inp = "# comment\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "# comment\n'doc'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "class x: pass\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
def test_get_headnode_dict(self):
class NoneFix(fixer_base.BaseFix):
pass
class FileInputFix(fixer_base.BaseFix):
PATTERN = "file_input< any * >"
class SimpleFix(fixer_base.BaseFix):
PATTERN = "'name'"
no_head = NoneFix({}, [])
with_head = FileInputFix({}, [])
simple = SimpleFix({}, [])
d = refactor._get_headnode_dict([no_head, with_head, simple])
top_fixes = d.pop(pygram.python_symbols.file_input)
self.assertEqual(top_fixes, [with_head, no_head])
name_fixes = d.pop(token.NAME)
self.assertEqual(name_fixes, [simple, no_head])
for fixes in d.values():
self.assertEqual(fixes, [no_head])
def test_fixer_loading(self):
from myfixes.fix_first import FixFirst
from myfixes.fix_last import FixLast
from myfixes.fix_parrot import FixParrot
from myfixes.fix_preorder import FixPreorder
rt = self.rt()
pre, post = rt.get_fixers()
self.check_instances(pre, [FixPreorder])
self.check_instances(post, [FixFirst, FixParrot, FixLast])
def test_naughty_fixers(self):
self.assertRaises(ImportError, self.rt, fixers=["not_here"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
def test_refactor_string(self):
rt = self.rt()
input = "def parrot(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertNotEqual(str(tree), input)
input = "def f(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertEqual(str(tree), input)
def test_refactor_stdin(self):
class MyRT(refactor.RefactoringTool):
def print_output(self, old_text, new_text, filename, equal):
results.extend([old_text, new_text, filename, equal])
results = []
rt = MyRT(_DEFAULT_FIXERS)
save = sys.stdin
sys.stdin = io.StringIO("def parrot(): pass\n\n")
try:
rt.refactor_stdin()
finally:
sys.stdin = save
expected = ["def parrot(): pass\n\n",
"def cheese(): pass\n\n",
"<stdin>", False]
self.assertEqual(results, expected)
def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
options=None, mock_log_debug=None,
actually_write=True):
tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
self.addCleanup(shutil.rmtree, tmpdir)
# make a copy of the tested file that we can write to
shutil.copy(test_file, tmpdir)
test_file = os.path.join(tmpdir, os.path.basename(test_file))
os.chmod(test_file, 0o644)
def read_file():
with open(test_file, "rb") as fp:
return fp.read()
old_contents = read_file()
rt = self.rt(fixers=fixers, options=options)
if mock_log_debug:
rt.log_debug = mock_log_debug
rt.refactor_file(test_file)
self.assertEqual(old_contents, read_file())
if not actually_write:
return
rt.refactor_file(test_file, True)
new_contents = read_file()
self.assertNotEqual(old_contents, new_contents)
return new_contents
def test_refactor_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
self.check_file_refactoring(test_file, _DEFAULT_FIXERS)
def test_refactor_file_write_unchanged_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
debug_messages = []
def recording_log_debug(msg, *args):
debug_messages.append(msg % args)
self.check_file_refactoring(test_file, fixers=(),
options={"write_unchanged_files": True},
mock_log_debug=recording_log_debug,
actually_write=False)
# Testing that it logged this message when write=False was passed is
# sufficient to see that it did not bail early after "No changes".
message_regex = r"Not writing changes to .*%s%s" % (
os.sep, os.path.basename(test_file))
for message in debug_messages:
if "Not writing changes" in message:
self.assertRegex(message, message_regex)
break
else:
self.fail("%r not matched in %r" % (message_regex, debug_messages))
def test_refactor_dir(self):
def check(structure, expected):
def mock_refactor_file(self, f, *args):
got.append(f)
save_func = refactor.RefactoringTool.refactor_file
refactor.RefactoringTool.refactor_file = mock_refactor_file
rt = self.rt()
got = []
dir = tempfile.mkdtemp(prefix="2to3-test_refactor")
try:
os.mkdir(os.path.join(dir, "a_dir"))
for fn in structure:
open(os.path.join(dir, fn), "wb").close()
rt.refactor_dir(dir)
finally:
refactor.RefactoringTool.refactor_file = save_func
shutil.rmtree(dir)
self.assertEqual(got,
[os.path.join(dir, path) for path in expected])
check([], [])
tree = ["nothing",
"hi.py",
".dumb",
".after.py",
"notpy.npy",
"sappy"]
expected = ["hi.py"]
check(tree, expected)
tree = ["hi.py",
os.path.join("a_dir", "stuff.py")]
check(tree, tree)
def test_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
self.check_file_refactoring(fn)
def test_false_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
data = self.check_file_refactoring(fn)
def test_bom(self):
fn = os.path.join(TEST_DATA_DIR, "bom.py")
data = self.check_file_refactoring(fn)
self.assertTrue(data.startswith(codecs.BOM_UTF8))
def test_crlf_newlines(self):
old_sep = os.linesep
os.linesep = "\r\n"
try:
fn = os.path.join(TEST_DATA_DIR, "crlf.py")
fixes = refactor.get_fixers_from_package("lib2to3.fixes")
self.check_file_refactoring(fn, fixes)
finally:
os.linesep = old_sep
def test_refactor_docstring(self):
rt = self.rt()
doc = """
>>> example()
42
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertEqual(out, doc)
doc = """
>>> def parrot():
... return 43
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertNotEqual(out, doc)
def test_explicit(self):
from myfixes.fix_explicit import FixExplicit
rt = self.rt(fixers=["myfixes.fix_explicit"])
self.assertEqual(len(rt.post_order), 0)
rt = self.rt(explicit=["myfixes.fix_explicit"])
for fix in rt.post_order:
if isinstance(fix, FixExplicit):
break
else:
self.fail("explicit fixer not loaded")
=======
"""
Unit tests for refactor.py.
"""
from __future__ import with_statement
import sys
import os
import codecs
import operator
import io
import tempfile
import shutil
import unittest
import warnings
from lib2to3 import refactor, pygram, fixer_base
from lib2to3.pgen2 import token
from . import support
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
FIXER_DIR = os.path.join(TEST_DATA_DIR, "fixers")
sys.path.append(FIXER_DIR)
try:
_DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
finally:
sys.path.pop()
_2TO3_FIXERS = refactor.get_fixers_from_package("lib2to3.fixes")
class TestRefactoringTool(unittest.TestCase):
def setUp(self):
sys.path.append(FIXER_DIR)
def tearDown(self):
sys.path.pop()
def check_instances(self, instances, classes):
for inst, cls in zip(instances, classes):
if not isinstance(inst, cls):
self.fail("%s are not instances of %s" % instances, classes)
def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
return refactor.RefactoringTool(fixers, options, explicit)
def test_print_function_option(self):
rt = self.rt({"print_function" : True})
self.assertIs(rt.grammar, pygram.python_grammar_no_print_statement)
self.assertIs(rt.driver.grammar,
pygram.python_grammar_no_print_statement)
def test_write_unchanged_files_option(self):
rt = self.rt()
self.assertFalse(rt.write_unchanged_files)
rt = self.rt({"write_unchanged_files" : True})
self.assertTrue(rt.write_unchanged_files)
def test_fixer_loading_helpers(self):
contents = ["explicit", "first", "last", "parrot", "preorder"]
non_prefixed = refactor.get_all_fix_names("myfixes")
prefixed = refactor.get_all_fix_names("myfixes", False)
full_names = refactor.get_fixers_from_package("myfixes")
self.assertEqual(prefixed, ["fix_" + name for name in contents])
self.assertEqual(non_prefixed, contents)
self.assertEqual(full_names,
["myfixes.fix_" + name for name in contents])
def test_detect_future_features(self):
run = refactor._detect_future_features
fs = frozenset
empty = fs()
self.assertEqual(run(""), empty)
self.assertEqual(run("from __future__ import print_function"),
fs(("print_function",)))
self.assertEqual(run("from __future__ import generators"),
fs(("generators",)))
self.assertEqual(run("from __future__ import generators, feature"),
fs(("generators", "feature")))
inp = "from __future__ import generators, print_function"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp ="from __future__ import print_function, generators"
self.assertEqual(run(inp), fs(("print_function", "generators")))
inp = "from __future__ import (print_function,)"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "from __future__ import (generators, print_function)"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp = "from __future__ import (generators, nested_scopes)"
self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
inp = """from __future__ import generators
from __future__ import print_function"""
self.assertEqual(run(inp), fs(("generators", "print_function")))
invalid = ("from",
"from 4",
"from x",
"from x 5",
"from x im",
"from x import",
"from x import 4",
)
for inp in invalid:
self.assertEqual(run(inp), empty)
inp = "'docstring'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
inp = "# comment\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "# comment\n'doc'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "class x: pass\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
def test_get_headnode_dict(self):
class NoneFix(fixer_base.BaseFix):
pass
class FileInputFix(fixer_base.BaseFix):
PATTERN = "file_input< any * >"
class SimpleFix(fixer_base.BaseFix):
PATTERN = "'name'"
no_head = NoneFix({}, [])
with_head = FileInputFix({}, [])
simple = SimpleFix({}, [])
d = refactor._get_headnode_dict([no_head, with_head, simple])
top_fixes = d.pop(pygram.python_symbols.file_input)
self.assertEqual(top_fixes, [with_head, no_head])
name_fixes = d.pop(token.NAME)
self.assertEqual(name_fixes, [simple, no_head])
for fixes in d.values():
self.assertEqual(fixes, [no_head])
def test_fixer_loading(self):
from myfixes.fix_first import FixFirst
from myfixes.fix_last import FixLast
from myfixes.fix_parrot import FixParrot
from myfixes.fix_preorder import FixPreorder
rt = self.rt()
pre, post = rt.get_fixers()
self.check_instances(pre, [FixPreorder])
self.check_instances(post, [FixFirst, FixParrot, FixLast])
def test_naughty_fixers(self):
self.assertRaises(ImportError, self.rt, fixers=["not_here"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
def test_refactor_string(self):
rt = self.rt()
input = "def parrot(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertNotEqual(str(tree), input)
input = "def f(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertEqual(str(tree), input)
def test_refactor_stdin(self):
class MyRT(refactor.RefactoringTool):
def print_output(self, old_text, new_text, filename, equal):
results.extend([old_text, new_text, filename, equal])
results = []
rt = MyRT(_DEFAULT_FIXERS)
save = sys.stdin
sys.stdin = io.StringIO("def parrot(): pass\n\n")
try:
rt.refactor_stdin()
finally:
sys.stdin = save
expected = ["def parrot(): pass\n\n",
"def cheese(): pass\n\n",
"<stdin>", False]
self.assertEqual(results, expected)
def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
options=None, mock_log_debug=None,
actually_write=True):
tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
self.addCleanup(shutil.rmtree, tmpdir)
# make a copy of the tested file that we can write to
shutil.copy(test_file, tmpdir)
test_file = os.path.join(tmpdir, os.path.basename(test_file))
os.chmod(test_file, 0o644)
def read_file():
with open(test_file, "rb") as fp:
return fp.read()
old_contents = read_file()
rt = self.rt(fixers=fixers, options=options)
if mock_log_debug:
rt.log_debug = mock_log_debug
rt.refactor_file(test_file)
self.assertEqual(old_contents, read_file())
if not actually_write:
return
rt.refactor_file(test_file, True)
new_contents = read_file()
self.assertNotEqual(old_contents, new_contents)
return new_contents
def test_refactor_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
self.check_file_refactoring(test_file, _DEFAULT_FIXERS)
def test_refactor_file_write_unchanged_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
debug_messages = []
def recording_log_debug(msg, *args):
debug_messages.append(msg % args)
self.check_file_refactoring(test_file, fixers=(),
options={"write_unchanged_files": True},
mock_log_debug=recording_log_debug,
actually_write=False)
# Testing that it logged this message when write=False was passed is
# sufficient to see that it did not bail early after "No changes".
message_regex = r"Not writing changes to .*%s%s" % (
os.sep, os.path.basename(test_file))
for message in debug_messages:
if "Not writing changes" in message:
self.assertRegex(message, message_regex)
break
else:
self.fail("%r not matched in %r" % (message_regex, debug_messages))
def test_refactor_dir(self):
def check(structure, expected):
def mock_refactor_file(self, f, *args):
got.append(f)
save_func = refactor.RefactoringTool.refactor_file
refactor.RefactoringTool.refactor_file = mock_refactor_file
rt = self.rt()
got = []
dir = tempfile.mkdtemp(prefix="2to3-test_refactor")
try:
os.mkdir(os.path.join(dir, "a_dir"))
for fn in structure:
open(os.path.join(dir, fn), "wb").close()
rt.refactor_dir(dir)
finally:
refactor.RefactoringTool.refactor_file = save_func
shutil.rmtree(dir)
self.assertEqual(got,
[os.path.join(dir, path) for path in expected])
check([], [])
tree = ["nothing",
"hi.py",
".dumb",
".after.py",
"notpy.npy",
"sappy"]
expected = ["hi.py"]
check(tree, expected)
tree = ["hi.py",
os.path.join("a_dir", "stuff.py")]
check(tree, tree)
def test_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
self.check_file_refactoring(fn)
def test_false_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
data = self.check_file_refactoring(fn)
def test_bom(self):
fn = os.path.join(TEST_DATA_DIR, "bom.py")
data = self.check_file_refactoring(fn)
self.assertTrue(data.startswith(codecs.BOM_UTF8))
def test_crlf_newlines(self):
old_sep = os.linesep
os.linesep = "\r\n"
try:
fn = os.path.join(TEST_DATA_DIR, "crlf.py")
fixes = refactor.get_fixers_from_package("lib2to3.fixes")
self.check_file_refactoring(fn, fixes)
finally:
os.linesep = old_sep
def test_refactor_docstring(self):
rt = self.rt()
doc = """
>>> example()
42
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertEqual(out, doc)
doc = """
>>> def parrot():
... return 43
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertNotEqual(out, doc)
def test_explicit(self):
from myfixes.fix_explicit import FixExplicit
rt = self.rt(fixers=["myfixes.fix_explicit"])
self.assertEqual(len(rt.post_order), 0)
rt = self.rt(explicit=["myfixes.fix_explicit"])
for fix in rt.post_order:
if isinstance(fix, FixExplicit):
break
else:
self.fail("explicit fixer not loaded")
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
Unit tests for refactor.py.
"""
from __future__ import with_statement
import sys
import os
import codecs
import operator
import io
import tempfile
import shutil
import unittest
import warnings
from lib2to3 import refactor, pygram, fixer_base
from lib2to3.pgen2 import token
from . import support
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
FIXER_DIR = os.path.join(TEST_DATA_DIR, "fixers")
sys.path.append(FIXER_DIR)
try:
_DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
finally:
sys.path.pop()
_2TO3_FIXERS = refactor.get_fixers_from_package("lib2to3.fixes")
class TestRefactoringTool(unittest.TestCase):
def setUp(self):
sys.path.append(FIXER_DIR)
def tearDown(self):
sys.path.pop()
def check_instances(self, instances, classes):
for inst, cls in zip(instances, classes):
if not isinstance(inst, cls):
self.fail("%s are not instances of %s" % instances, classes)
def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
return refactor.RefactoringTool(fixers, options, explicit)
def test_print_function_option(self):
rt = self.rt({"print_function" : True})
self.assertIs(rt.grammar, pygram.python_grammar_no_print_statement)
self.assertIs(rt.driver.grammar,
pygram.python_grammar_no_print_statement)
def test_write_unchanged_files_option(self):
rt = self.rt()
self.assertFalse(rt.write_unchanged_files)
rt = self.rt({"write_unchanged_files" : True})
self.assertTrue(rt.write_unchanged_files)
def test_fixer_loading_helpers(self):
contents = ["explicit", "first", "last", "parrot", "preorder"]
non_prefixed = refactor.get_all_fix_names("myfixes")
prefixed = refactor.get_all_fix_names("myfixes", False)
full_names = refactor.get_fixers_from_package("myfixes")
self.assertEqual(prefixed, ["fix_" + name for name in contents])
self.assertEqual(non_prefixed, contents)
self.assertEqual(full_names,
["myfixes.fix_" + name for name in contents])
def test_detect_future_features(self):
run = refactor._detect_future_features
fs = frozenset
empty = fs()
self.assertEqual(run(""), empty)
self.assertEqual(run("from __future__ import print_function"),
fs(("print_function",)))
self.assertEqual(run("from __future__ import generators"),
fs(("generators",)))
self.assertEqual(run("from __future__ import generators, feature"),
fs(("generators", "feature")))
inp = "from __future__ import generators, print_function"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp ="from __future__ import print_function, generators"
self.assertEqual(run(inp), fs(("print_function", "generators")))
inp = "from __future__ import (print_function,)"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "from __future__ import (generators, print_function)"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp = "from __future__ import (generators, nested_scopes)"
self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
inp = """from __future__ import generators
from __future__ import print_function"""
self.assertEqual(run(inp), fs(("generators", "print_function")))
invalid = ("from",
"from 4",
"from x",
"from x 5",
"from x im",
"from x import",
"from x import 4",
)
for inp in invalid:
self.assertEqual(run(inp), empty)
inp = "'docstring'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
inp = "# comment\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "# comment\n'doc'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "class x: pass\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
def test_get_headnode_dict(self):
class NoneFix(fixer_base.BaseFix):
pass
class FileInputFix(fixer_base.BaseFix):
PATTERN = "file_input< any * >"
class SimpleFix(fixer_base.BaseFix):
PATTERN = "'name'"
no_head = NoneFix({}, [])
with_head = FileInputFix({}, [])
simple = SimpleFix({}, [])
d = refactor._get_headnode_dict([no_head, with_head, simple])
top_fixes = d.pop(pygram.python_symbols.file_input)
self.assertEqual(top_fixes, [with_head, no_head])
name_fixes = d.pop(token.NAME)
self.assertEqual(name_fixes, [simple, no_head])
for fixes in d.values():
self.assertEqual(fixes, [no_head])
def test_fixer_loading(self):
from myfixes.fix_first import FixFirst
from myfixes.fix_last import FixLast
from myfixes.fix_parrot import FixParrot
from myfixes.fix_preorder import FixPreorder
rt = self.rt()
pre, post = rt.get_fixers()
self.check_instances(pre, [FixPreorder])
self.check_instances(post, [FixFirst, FixParrot, FixLast])
def test_naughty_fixers(self):
self.assertRaises(ImportError, self.rt, fixers=["not_here"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
def test_refactor_string(self):
rt = self.rt()
input = "def parrot(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertNotEqual(str(tree), input)
input = "def f(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertEqual(str(tree), input)
def test_refactor_stdin(self):
class MyRT(refactor.RefactoringTool):
def print_output(self, old_text, new_text, filename, equal):
results.extend([old_text, new_text, filename, equal])
results = []
rt = MyRT(_DEFAULT_FIXERS)
save = sys.stdin
sys.stdin = io.StringIO("def parrot(): pass\n\n")
try:
rt.refactor_stdin()
finally:
sys.stdin = save
expected = ["def parrot(): pass\n\n",
"def cheese(): pass\n\n",
"<stdin>", False]
self.assertEqual(results, expected)
def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
options=None, mock_log_debug=None,
actually_write=True):
tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
self.addCleanup(shutil.rmtree, tmpdir)
# make a copy of the tested file that we can write to
shutil.copy(test_file, tmpdir)
test_file = os.path.join(tmpdir, os.path.basename(test_file))
os.chmod(test_file, 0o644)
def read_file():
with open(test_file, "rb") as fp:
return fp.read()
old_contents = read_file()
rt = self.rt(fixers=fixers, options=options)
if mock_log_debug:
rt.log_debug = mock_log_debug
rt.refactor_file(test_file)
self.assertEqual(old_contents, read_file())
if not actually_write:
return
rt.refactor_file(test_file, True)
new_contents = read_file()
self.assertNotEqual(old_contents, new_contents)
return new_contents
def test_refactor_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
self.check_file_refactoring(test_file, _DEFAULT_FIXERS)
def test_refactor_file_write_unchanged_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
debug_messages = []
def recording_log_debug(msg, *args):
debug_messages.append(msg % args)
self.check_file_refactoring(test_file, fixers=(),
options={"write_unchanged_files": True},
mock_log_debug=recording_log_debug,
actually_write=False)
# Testing that it logged this message when write=False was passed is
# sufficient to see that it did not bail early after "No changes".
message_regex = r"Not writing changes to .*%s%s" % (
os.sep, os.path.basename(test_file))
for message in debug_messages:
if "Not writing changes" in message:
self.assertRegex(message, message_regex)
break
else:
self.fail("%r not matched in %r" % (message_regex, debug_messages))
def test_refactor_dir(self):
def check(structure, expected):
def mock_refactor_file(self, f, *args):
got.append(f)
save_func = refactor.RefactoringTool.refactor_file
refactor.RefactoringTool.refactor_file = mock_refactor_file
rt = self.rt()
got = []
dir = tempfile.mkdtemp(prefix="2to3-test_refactor")
try:
os.mkdir(os.path.join(dir, "a_dir"))
for fn in structure:
open(os.path.join(dir, fn), "wb").close()
rt.refactor_dir(dir)
finally:
refactor.RefactoringTool.refactor_file = save_func
shutil.rmtree(dir)
self.assertEqual(got,
[os.path.join(dir, path) for path in expected])
check([], [])
tree = ["nothing",
"hi.py",
".dumb",
".after.py",
"notpy.npy",
"sappy"]
expected = ["hi.py"]
check(tree, expected)
tree = ["hi.py",
os.path.join("a_dir", "stuff.py")]
check(tree, tree)
def test_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
self.check_file_refactoring(fn)
def test_false_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
data = self.check_file_refactoring(fn)
def test_bom(self):
fn = os.path.join(TEST_DATA_DIR, "bom.py")
data = self.check_file_refactoring(fn)
self.assertTrue(data.startswith(codecs.BOM_UTF8))
def test_crlf_newlines(self):
old_sep = os.linesep
os.linesep = "\r\n"
try:
fn = os.path.join(TEST_DATA_DIR, "crlf.py")
fixes = refactor.get_fixers_from_package("lib2to3.fixes")
self.check_file_refactoring(fn, fixes)
finally:
os.linesep = old_sep
def test_refactor_docstring(self):
rt = self.rt()
doc = """
>>> example()
42
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertEqual(out, doc)
doc = """
>>> def parrot():
... return 43
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertNotEqual(out, doc)
def test_explicit(self):
from myfixes.fix_explicit import FixExplicit
rt = self.rt(fixers=["myfixes.fix_explicit"])
self.assertEqual(len(rt.post_order), 0)
rt = self.rt(explicit=["myfixes.fix_explicit"])
for fix in rt.post_order:
if isinstance(fix, FixExplicit):
break
else:
self.fail("explicit fixer not loaded")
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import time
from rally.common import logging
from rally import exceptions
from rally.task import utils
from glanceclient import exc as glance_exc
from oslo_config import cfg
import requests
import six
LOG = logging.getLogger(__name__)
GLANCE_BENCHMARK_OPTS = [
cfg.FloatOpt("glance_image_create_prepoll_delay",
default=2.0,
help="Time to sleep after creating a resource before "
"polling for it status"),
cfg.FloatOpt("glance_image_create_timeout",
default=120.0,
help="Time to wait for glance image to be created."),
cfg.FloatOpt("glance_image_create_poll_interval",
default=1.0,
help="Interval between checks when waiting for image "
"creation.")
]
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(GLANCE_BENCHMARK_OPTS, group=benchmark_group)
@six.add_metaclass(abc.ABCMeta)
class GlanceWrapper(object):
def __init__(self, client, owner):
self.owner = owner
self.client = client
def get_image(self, image):
"""Gets image.
This serves to fetch the latest data on the image for the
various wait_for_*() functions.
Must raise rally.exceptions.GetResourceNotFound if the
resource is not found or deleted.
"""
# NOTE(stpierre): This function actually has a single
# implementation that works for both Glance v1 and Glance v2,
# but since we need to use this function in both wrappers, it
# gets implemented here.
try:
return self.client.images.get(image.id)
except glance_exc.HTTPNotFound:
raise exceptions.GetResourceNotFound(resource=image)
@abc.abstractmethod
def create_image(self, container_format, image_location, disk_format):
"""Creates new image.
Accepts all Glance v2 parameters.
"""
@abc.abstractmethod
def set_visibility(self, image, visibility="public"):
"""Set an existing image to public or private."""
@abc.abstractmethod
def list_images(self, **filters):
"""List images.
Accepts all Glance v2 filters.
"""
class GlanceV1Wrapper(GlanceWrapper):
def create_image(self, container_format, image_location,
disk_format, **kwargs):
kw = {
"container_format": container_format,
"disk_format": disk_format,
}
kw.update(kwargs)
if "name" not in kw:
kw["name"] = self.owner.generate_random_name()
if "visibility" in kw:
kw["is_public"] = kw.pop("visibility") == "public"
image_location = os.path.expanduser(image_location)
try:
if os.path.isfile(image_location):
kw["data"] = open(image_location)
else:
kw["copy_from"] = image_location
image = self.client.images.create(**kw)
time.sleep(CONF.benchmark.glance_image_create_prepoll_delay)
image = utils.wait_for_status(
image, ["active"],
update_resource=self.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
glance_image_create_poll_interval)
finally:
if "data" in kw:
kw["data"].close()
return image
def set_visibility(self, image, visibility="public"):
self.client.images.update(image.id, is_public=(visibility == "public"))
def list_images(self, **filters):
kwargs = {"filters": filters}
if "owner" in filters:
# NOTE(stpierre): in glance v1, "owner" is not a filter,
# so we need to handle it separately.
kwargs["owner"] = kwargs["filters"].pop("owner")
visibility = kwargs["filters"].pop("visibility", None)
images = self.client.images.list(**kwargs)
# NOTE(stpierre): Glance v1 isn't smart enough to filter on
# public/private images, so we have to do it manually.
if visibility is not None:
is_public = visibility == "public"
return [i for i in images if i.is_public is is_public]
return images
class GlanceV2Wrapper(GlanceWrapper):
def create_image(self, container_format, image_location,
disk_format, **kwargs):
kw = {
"container_format": container_format,
"disk_format": disk_format,
}
kw.update(kwargs)
if "name" not in kw:
kw["name"] = self.owner.generate_random_name()
image_location = os.path.expanduser(image_location)
image = self.client.images.create(**kw)
time.sleep(CONF.benchmark.glance_image_create_prepoll_delay)
start = time.time()
image = utils.wait_for_status(
image, ["queued"],
update_resource=self.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
glance_image_create_poll_interval)
timeout = time.time() - start
image_data = None
response = None
try:
if os.path.isfile(image_location):
image_data = open(image_location)
else:
response = requests.get(image_location, stream=True)
image_data = response.raw
self.client.images.upload(image.id, image_data)
finally:
if image_data is not None:
image_data.close()
if response is not None:
response.close()
return utils.wait_for_status(
image, ["active"],
update_resource=self.get_image,
timeout=timeout,
check_interval=CONF.benchmark.
glance_image_create_poll_interval)
def set_visibility(self, image, visibility="public"):
self.client.images.update(image.id, visibility=visibility)
def list_images(self, **filters):
return self.client.images.list(filters=filters)
def wrap(client, owner):
"""Returns glanceclient wrapper based on glance client version."""
version = client.choose_version()
if version == "1":
return GlanceV1Wrapper(client(), owner)
elif version == "2":
return GlanceV2Wrapper(client(), owner)
else:
msg = "Version %s of the glance API could not be identified." % version
LOG.warning(msg)
raise exceptions.InvalidArgumentsException(msg)
|