gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import flavors_extraspecs
import nova.db
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_flavor_extra_specs_item(context, flavor_id, key):
return {key: stub_flavor_extra_specs()[key]}
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
def delete_flavor_extra_specs(context, flavor_id, key):
pass
def stub_flavor_extra_specs():
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
class FlavorsExtraSpecsTest(test.TestCase):
def setUp(self):
super(FlavorsExtraSpecsTest, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.controller = flavors_extraspecs.FlavorExtraSpecsController()
def test_index(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs')
res_dict = self.controller.index(req, 1)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs')
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_get_item',
return_flavor_extra_specs_item)
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key5')
res_dict = self.controller.show(req, 1, 'key5')
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key6')
def test_delete(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_delete',
delete_flavor_extra_specs)
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key5',
use_admin_context=True)
self.controller.delete(req, 1, 'key5')
def test_delete_no_admin(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_delete',
delete_flavor_extra_specs)
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key5')
self.assertRaises(exception.NotAuthorized, self.controller.delete,
req, 1, 'key 5')
def test_delete_spec_not_found(self):
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key6',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key6')
def test_create(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"extra_specs": {"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs',
use_admin_context=True)
res_dict = self.controller.create(req, 1, body)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
self.assertEqual(self.controller.create.wsgi_code, 201)
def test_create_no_admin(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"extra_specs": {"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs')
self.assertRaises(exception.NotAuthorized, self.controller.create,
req, 1, body)
def test_create_empty_body(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, 1, '')
def test_update_item(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key1',
use_admin_context=True)
res_dict = self.controller.update(req, 1, 'key1', body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_no_admin(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key1')
self.assertRaises(exception.NotAuthorized, self.controller.update,
req, 1, 'key1', body)
def test_update_item_empty_body(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', '')
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"key1": "value1", "key2": "value2"}
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = fakes.HTTPRequest.blank('/v3/flavors/1/extra-specs/bad',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body)
class FlavorsExtraSpecsXMLSerializerTest(test.TestCase):
def test_serializer(self):
serializer = flavors_extraspecs.ExtraSpecsTemplate()
expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><key1>value1</key1></extra_specs>')
text = serializer.serialize(dict(extra_specs={"key1": "value1"}))
self.assertEqual(text, expected)
def test_show_update_serializer(self):
serializer = flavors_extraspecs.ExtraSpecTemplate()
expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_spec key="key1">value1</extra_spec>')
text = serializer.serialize(dict({"key1": "value1"}))
self.assertEqual(text, expected)
def test_serializer_with_colon_tagname(self):
# Our test object to serialize
obj = {'extra_specs': {'foo:bar': '999'}}
serializer = flavors_extraspecs.ExtraSpecsTemplate()
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
'</extra_specs>'))
result = serializer.serialize(obj)
self.assertEqual(expected_xml, result)
|
|
#!/usr/bin/env python3
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Convert the ASCII download_file_types.asciipb proto into a binary resource.
We generate a separate variant of the binary proto for each platform,
each which contains only the values that platform needs.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import re
import sys
# Import the binary proto generator. Walks up to the root of the source tree
# which is five directories above, and the finds the protobufs directory from
# there.
proto_generator_path = os.path.normpath(os.path.join(os.path.abspath(__file__),
*[os.path.pardir] * 5 + ['components/resources/protobufs']))
sys.path.insert(0, proto_generator_path)
from binary_proto_generator import BinaryProtoGenerator
# Map of platforms for which we can generate binary protos.
# This must be run after the custom imports.
# key: type-name
# value: proto-platform_type (int)
def PlatformTypes():
return {
"android": download_file_types_pb2.DownloadFileType.PLATFORM_ANDROID,
"chromeos":
download_file_types_pb2.DownloadFileType.PLATFORM_CHROME_OS,
"fuchsia": download_file_types_pb2.DownloadFileType.PLATFORM_FUCHSIA,
"linux": download_file_types_pb2.DownloadFileType.PLATFORM_LINUX,
"mac": download_file_types_pb2.DownloadFileType.PLATFORM_MAC,
"win": download_file_types_pb2.DownloadFileType.PLATFORM_WINDOWS,
}
def PrunePlatformSettings(file_type, default_settings, platform_type):
# Modify this file_type's platform_settings by keeping the only the
# best one for this platform_type. In order of preference:
# * Exact match to platform_type
# * PLATFORM_ANY entry
# * or copy from the default file type.
last_platform = -1
setting_match = None
for s in file_type.platform_settings:
# Enforce: sorted and no dups (signs of mistakes).
assert last_platform < s.platform, (
"Extension '%s' has duplicate or out of order platform: '%s'" %
(file_type.extension, s.platform))
last_platform = s.platform
# Pick the most specific match.
if ((s.platform == platform_type) or
(s.platform == \
download_file_types_pb2.DownloadFileType.PLATFORM_ANY and \
setting_match is None)):
setting_match = s
# If platform_settings was empty, we'll fill in from the default
if setting_match is None:
assert default_settings is not None, (
"Missing default settings for platform %d" % platform_type)
setting_match = default_settings
# Now clear out the full list and replace it with 1 entry.
del file_type.platform_settings[:]
new_setting = file_type.platform_settings.add()
new_setting.CopyFrom(setting_match)
new_setting.ClearField('platform')
def FilterPbForPlatform(full_pb, platform_type):
""" Return a filtered protobuf for this platform_type """
assert type(platform_type) is int, "Bad platform_type type"
new_pb = download_file_types_pb2.DownloadFileTypeConfig()
new_pb.CopyFrom(full_pb)
# Ensure there's only one platform_settings for the default.
PrunePlatformSettings(new_pb.default_file_type, None, platform_type)
# This can be extended if we want to match weird extensions.
# Just no dots, non-UTF8, or uppercase chars.
invalid_char_re = re.compile('[^a-z0-9_-]')
# Filter platform_settings for each type.
uma_values_used = set()
extensions_used = set()
for file_type in new_pb.file_types:
assert not invalid_char_re.search(file_type.extension), (
"File extension '%s' contains non alpha-num-dash chars" %
(file_type.extension))
assert file_type.extension not in extensions_used, (
"Duplicate extension '%s'" % file_type.extension)
extensions_used.add(file_type.extension)
assert file_type.uma_value not in uma_values_used, (
"Extension '%s' reused UMA value %d." %
(file_type.extension, file_type.uma_value))
uma_values_used.add(file_type.uma_value)
# Modify file_type to include only the best match platform_setting.
PrunePlatformSettings(
file_type, new_pb.default_file_type.platform_settings[0], \
platform_type)
return new_pb
def FilterForPlatformAndWrite(full_pb, platform_type, outfile):
""" Filter and write out a file for this platform """
# Filter it
filtered_pb = FilterPbForPlatform(full_pb, platform_type)
# Serialize it
binary_pb_str = filtered_pb.SerializeToString()
# Write it to disk
open(outfile, 'wb').write(binary_pb_str)
def MakeSubDirs(outfile):
""" Make the subdirectories needed to create file |outfile| """
dirname = os.path.dirname(outfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
class DownloadFileTypeProtoGenerator(BinaryProtoGenerator):
def ImportProtoModule(self):
import download_file_types_pb2
globals()['download_file_types_pb2'] = download_file_types_pb2
def EmptyProtoInstance(self):
return download_file_types_pb2.DownloadFileTypeConfig()
def ValidatePb(self, opts, pb):
""" Validate the basic values of the protobuf. The
file_type_policies_unittest.cc will also validate it by platform,
but this will catch errors earlier.
"""
assert pb.version_id > 0
assert pb.sampled_ping_probability >= 0.0
assert pb.sampled_ping_probability <= 1.0
assert len(pb.default_file_type.platform_settings) >= 1
assert len(pb.file_types) > 1
def ProcessPb(self, opts, pb):
""" Generate one or more binary protos using the parsed proto. """
if opts.type is not None:
# Just one platform type
platform_enum = PlatformTypes()[opts.type]
outfile = os.path.join(opts.outdir, opts.outbasename)
FilterForPlatformAndWrite(pb, platform_enum, outfile)
else:
# Make a separate file for each platform
for platform_type, platform_enum in PlatformTypes().items():
# e.g. .../all/77/chromeos/download_file_types.pb
outfile = os.path.join(opts.outdir, str(pb.version_id),
platform_type, opts.outbasename)
MakeSubDirs(outfile)
FilterForPlatformAndWrite(pb, platform_enum, outfile)
def AddCommandLineOptions(self, parser):
parser.add_option('-a',
'--all',
action="store_true",
default=False,
help='Write a separate file for every platform. '
'Outfile must have a %d for version and %s for '
'platform.')
parser.add_option(
'-t',
'--type',
help='The platform type. One of android, chromeos, ' +
'linux, mac, win')
def AddExtraCommandLineArgsForVirtualEnvRun(self, opts, command):
if opts.type is not None:
command += ['-t', opts.type]
if opts.all:
command += ['-a']
def VerifyArgs(self, opts):
if (not opts.all and opts.type not in PlatformTypes()):
print("ERROR: Unknown platform type '%s'" % opts.type)
self.opt_parser.print_help()
return False
if (bool(opts.all) == bool(opts.type)):
print("ERROR: Need exactly one of --type or --all")
self.opt_parser.print_help()
return False
return True
def main():
return DownloadFileTypeProtoGenerator().Run()
if __name__ == '__main__':
sys.exit(main())
|
|
import re
import unicodedata
import yaml
from yaml.constructor import ConstructorError
from yaml.parser import ParserError
from yaml.scanner import ScannerError
from .sql import SQLValidator
import logging
logger = logging.getLogger(__name__)
CREATE_TEMPLATE = """CREATE TABLE {0}(
fid INTEGER PRIMARY KEY AUTOINCREMENT,
geom {1},
{2}
);
INSERT INTO {0}(geom, {3}) select geom, {3} from {4} WHERE ({5});
"""
INDEX_TEMPLATE = """
INSERT INTO gpkg_contents (table_name, data_type,identifier,srs_id) VALUES ('{0}','features','{0}','4326');
INSERT INTO gpkg_geometry_columns VALUES ('{0}', 'geom', '{1}', '4326', '0', '0');
UPDATE '{0}' SET geom=GeomFromGPB(geom);
SELECT gpkgAddSpatialIndex('{0}', 'geom');
UPDATE '{0}' SET geom=AsGPB(geom);
"""
WKT_TYPE_MAP = {
"points": "POINT",
"lines": "MULTILINESTRING",
"polygons": "MULTIPOLYGON",
}
OSM_ID_TAGS = {
"points": ["osm_id"],
"lines": ["osm_id"],
"polygons": ["osm_id", "osm_way_id"],
}
OGR2OGR_TABLENAMES = {"points": "points", "lines": "lines", "polygons": "multipolygons"}
ZIP_README = """
This thematic file was generated by EventKit.
This theme includes features matching the filter:
{criteria}
clipped to the area defined by the included boundary.geojson.
This theme includes the following OpenStreetMap keys:
{columns}
(c) OpenStreetMap contributors.
This file is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/.
Any rights in individual contents of the database are licensed under the Database Contents
License: http://opendatacommons.org/licenses/dbcl/1.0/
"""
BANNED_THEME_NAMES = [
"points",
"lines",
"multipolygons",
"boundary",
"multilinestrings",
"other_relations",
]
# adapted from https://github.com/django/django/blob/92053acbb9160862c3e743a99ed8ccff8d4f8fd6/django/utils/text.py#L417
def slugify(s):
slug = unicodedata.normalize("NFKD", str(s))
slug = slug.encode("ascii", "ignore").lower().decode()
slug = re.sub(r"[^a-z0-9]+", "_", slug).strip("_")
slug = re.sub(r"[_]+", "_", slug)
return slug
# FeatureSelection serializes as YAML.
# It describes a set of tables (themes)
# to create in a Spatialite database.
class FeatureSelection(object):
@staticmethod
def example(config):
f = FeatureSelection(config)
assert f.valid
return f
def __init__(self, raw_doc):
self._raw_doc = raw_doc
self._doc = None
self._errors = []
self.keys_from_sql = {}
@property
def doc(self):
def validate_schema(loaded_doc):
if not isinstance(loaded_doc, dict):
self._errors.append("YAML must be dict, not list")
return False
for theme, theme_dict in loaded_doc.items():
if theme in BANNED_THEME_NAMES or theme.startswith("gpkg_") or theme.startswith("rtree_"):
self._errors.append("Theme name reserved: {0}".format(theme))
return False
if not re.match("^[a-zA-Z0-9_ ]+$", theme):
self._errors.append(
"Each theme must be named using only characters, numbers, underscores and spaces"
)
return False
if "select" not in theme_dict:
self._errors.append("Each theme must have a 'select' key")
return False
for key in theme_dict["select"]:
if not key:
self._errors.append("Missing OSM key")
return False
if not re.match("[a-zA-Z0-9 _\:]+$", key):
self._errors.append("Invalid OSM key: {0}".format(key))
return False
if not isinstance(theme_dict["select"], list):
self._errors.append("'select' children must be list elements (e.g. '- amenity')")
return False
self.keys_from_sql[theme] = set()
if "where" in theme_dict:
if not isinstance(theme_dict["where"], list):
clauses = [theme_dict["where"]]
else:
clauses = theme_dict["where"]
for clause in clauses:
s = SQLValidator(clause)
if not s.valid:
self._errors.append("SQL WHERE Invalid: " + ";".join(s.errors))
return False
# also add the keys to keys_from_sql
for k in s.column_names:
self.keys_from_sql[theme].add(k)
return True
if self._doc:
return self._doc
try:
loaded_doc = yaml.safe_load(self._raw_doc)
if validate_schema(loaded_doc):
self._doc = loaded_doc
return self._doc
except (ConstructorError, ScannerError, ParserError) as e:
self._errors.append(e.problem)
@property
def valid(self):
return self.doc is not None
@property
def errors(self):
return self._errors
@property
def themes(self):
if self.doc:
return list(self.doc.keys())
return []
@property
def slug_themes(self):
return [slugify(x) for x in self.themes]
def geom_types(self, theme):
if "types" in self.doc[theme]:
return self.doc[theme]["types"]
return ["points", "lines", "polygons"]
def key_selections(self, theme):
return self.doc[theme]["select"]
def filter_clause(self, theme):
theme = self.doc[theme]
if "where" in theme:
if isinstance(theme["where"], list):
return " OR ".join(theme["where"])
return theme["where"]
return " OR ".join(['"' + x + '" IS NOT NULL' for x in theme["select"]])
def zip_readme(self, theme):
columns = []
for key in self.key_selections(theme):
columns.append("{0} http://wiki.openstreetmap.org/wiki/Key:{0}".format(key))
columns = "\n".join(columns)
criteria = self.filter_clause(theme)
return ZIP_README.format(columns=columns, criteria=criteria)
def __str__(self):
return str(self.doc)
def key_union(self, geom_type=None):
s = set()
for t in self.themes:
if geom_type is None or (geom_type in self.geom_types(t)):
for key in self.key_selections(t):
s.add(key)
for key in self.keys_from_sql[t]:
s.add(key)
return sorted(list(s))
@property
def tables(self):
retval = []
for theme in self.themes:
for geom_type in self.geom_types(theme):
retval.append(slugify(theme) + "_" + geom_type)
return retval
def col_type(self, col_name):
if col_name == "z_index":
return " INTEGER(4) DEFAULT 0"
return " TEXT"
def create_sql(self, theme, geom_type):
key_selections = ['"{0}"'.format(key) for key in self.key_selections(theme)]
cols = OSM_ID_TAGS[geom_type] + key_selections
table_name = slugify(theme) + "_" + geom_type
sqls = []
sqls.append(
CREATE_TEMPLATE.format(
table_name,
WKT_TYPE_MAP[geom_type],
",".join([col + self.col_type(col) for col in cols]),
",".join(cols),
"geopackage." + table_name,
"1",
)
)
sqls.append(
"INSERT INTO gpkg_contents VALUES "
"('{0}', 'features', '{0}', '', '2017-04-08T01:35:16.576Z', "
"null, null, null, null, '4326')".format(table_name)
)
sqls.append(
"\nINSERT INTO gpkg_geometry_columns VALUES ('{0}', 'geom', '{1}', '4326', '0', '0')".format(
table_name, WKT_TYPE_MAP[geom_type]
)
)
return sqls
@property
def sqls(self):
create_sqls = []
index_sqls = []
for theme in self.themes:
key_selections = ['"{0}"'.format(key) for key in self.key_selections(theme)]
# if any of these 5 keys in selection, add z_index
if any([x in self.key_selections(theme) for x in ["highway", "railway", "bridge", "tunnel", "layer"]]):
key_selections.append('"z_index"')
filter_clause = self.filter_clause(theme)
for geom_type in self.geom_types(theme):
dst_tablename = slugify(theme) + "_" + geom_type
src_tablename = OGR2OGR_TABLENAMES[geom_type]
cols = OSM_ID_TAGS[geom_type] + key_selections
create_sqls.append(
CREATE_TEMPLATE.format(
dst_tablename,
WKT_TYPE_MAP[geom_type],
",".join([col + self.col_type(col) for col in cols]),
",".join(cols),
src_tablename,
filter_clause,
)
)
index_sqls.append(INDEX_TEMPLATE.format(dst_tablename, WKT_TYPE_MAP[geom_type]))
return create_sqls, index_sqls
|
|
from collections import OrderedDict
from functools import partial
from numbers import Number
from pprint import pprint as pp
import f5_os_test
from f5_os_test.polling_clients import NeutronClientPollingManager
from neutronclient.common.exceptions import BadRequest
from pytest import symbols as symbols_data
nclient_config = {
'username': symbols_data.tenant_username,
'password': symbols_data.tenant_password,
'tenant_name': symbols_data.tenant_name,
'auth_url': symbols_data.auth_url
}
class UnexpectedTypeFromJson(TypeError):
pass
class UpdateScanner(object):
def __init__(self, component_config):
self.ordered_config = OrderedDict(component_config.copy())
self.keytuple = tuple(self.ordered_config.keys())
self.param_vector = self._build_param_vector()
def call_factory(self, ncpm, update_method, update_target):
def call_method(**kwargs):
cm = getattr(ncpm, update_method)
pp(cm.__name__)
partial_fixed = partial(cm, update_target)
return partial_fixed(**kwargs)
self.call_method = call_method
def _build_param_vector(self):
param_vector = []
for k, v in self.ordered_config.iteritems():
param_vector.append((k, self._toggle_state(v)))
return param_vector
def _toggle_state(self, config_value):
# Note this doesn't handle reference cycles.
if isinstance(config_value, basestring):
return config_value+'_test'
elif isinstance(config_value, bool):
return not config_value
elif isinstance(config_value, Number):
return config_value+1
elif isinstance(config_value, list):
if config_value:
return [self._toggle_state(i) for i in config_value]
else:
return [0]
elif isinstance(config_value, dict):
if config_value:
t = config_value.copy()
for k, v in t.iteritems():
t[k] = self._toggle_state(v)
return t
else:
return {'test_key': 'test_val'}
elif isinstance(config_value, type(None)):
return 1
else:
raise UnexpectedTypeFromJson(config_value)
def pytest_generate_tests(metafunc):
if 'update_lb_key' in metafunc.fixturenames:
metafunc.parametrize('update_lb_key,update_value',
metafunc.cls.lb_param_vector)
elif 'update_listener_key' in metafunc.fixturenames:
metafunc.parametrize('update_listener_key,update_value',
metafunc.cls.listener_param_vector)
elif 'update_pool_key' in metafunc.fixturenames:
metafunc.parametrize('update_pool_key,update_value',
metafunc.cls.pool_param_vector)
elif 'update_member_key' in metafunc.fixturenames:
metafunc.parametrize('update_member_key,update_value',
metafunc.cls.member_param_vector)
elif 'update_healthmonitor_key' in metafunc.fixturenames:
metafunc.parametrize('update_healthmonitor_key,update_value',
metafunc.cls.healthmonitor_param_vector)
class UpdateScenarioBase(object):
lb_name = f5_os_test.random_name('test_lb_', 6)
ncpm = NeutronClientPollingManager(**nclient_config)
subnets = ncpm.list_subnets()['subnets']
for sn in subnets:
if 'client-v4' in sn['name']:
lbconf = {'vip_subnet_id': sn['id'],
'tenant_id': sn['tenant_id'],
'name': lb_name}
# loadbalancer setup
activelb =\
ncpm.create_loadbalancer({'loadbalancer': lbconf})
active_loadbalancer_config = activelb['loadbalancer']
loadbalancer_updater = UpdateScanner(active_loadbalancer_config)
lb_param_vector = loadbalancer_updater.param_vector
# listener setup
listener_name = f5_os_test.random_name('test_listener_', 6)
listener_config = {'listener':
{'name': listener_name,
'loadbalancer_id': activelb['loadbalancer']['id'],
'protocol': 'HTTP',
'protocol_port': 80}}
active_listener = ncpm.create_listener(listener_config)
active_listener_config = active_listener['listener']
listener_updater = UpdateScanner(active_listener_config)
listener_param_vector = listener_updater.param_vector
# pool setup
pool_name = f5_os_test.random_name('test_pool_', 6)
pool_config = {'pool': {
'name': pool_name,
'lb_algorithm': 'ROUND_ROBIN',
'listener_id': active_listener['listener']['id'],
'protocol': 'HTTP'}}
active_pool = ncpm.create_lbaas_pool(pool_config)
active_pool_config = active_pool['pool']
pool_updater = UpdateScanner(active_pool_config)
pool_param_vector = pool_updater.param_vector
# pool member setup
for sn in ncpm.list_subnets()['subnets']:
if 'server-v4' in sn['name']:
address = sn['allocation_pools'][0]['start']
subnet_id = sn['id']
break
member_config = {'member': {
'subnet_id': subnet_id,
'address': address,
'protocol_port': 80}}
pool_id = active_pool_config['id']
active_member = ncpm.create_lbaas_member(pool_id, member_config)
active_member_config = active_member['member']
member_updater = UpdateScanner(active_member_config)
member_param_vector = member_updater.param_vector
# healthmonitor setup
monitor_config = {'healthmonitor': {
'delay': 3,
'pool_id': pool_id,
'type': 'HTTP',
'timeout': 13,
'max_retries': 7}}
healthmonitor = ncpm.create_lbaas_healthmonitor(monitor_config)
healthmonitorconfig = healthmonitor['healthmonitor']
healthmonitor_updater = UpdateScanner(healthmonitorconfig)
healthmonitor_param_vector = healthmonitor_updater.param_vector
class TestLoadBalancerUpdateScenarios(UpdateScenarioBase):
def test_loadbalancer_update_configs(self,
update_lb_key,
update_value,
setup_with_loadbalancer):
ncpm, active_loadbalancer = setup_with_loadbalancer
active_loadbalancer_id = active_loadbalancer['loadbalancer']['id']
self.loadbalancer_updater.call_factory(
ncpm, 'update_loadbalancer', active_loadbalancer_id
)
update_dict = {update_lb_key: update_value}
try:
updated = self.loadbalancer_updater.call_method(
lbconf={'loadbalancer': update_dict}
)
except BadRequest as exc:
exc_message_first_line = exc.message.split('\n')[0]
expected_first_line =\
'Cannot update read-only attribute %s' % update_lb_key
assert exc_message_first_line == expected_first_line
return
assert updated['loadbalancer'][update_lb_key] == update_value
class TestListenerUpdateScenarios(UpdateScenarioBase):
def test_listener_update_configs(self,
update_listener_key,
update_value,
setup_with_listener):
ncpm, active_listener = setup_with_listener
active_listener_id = active_listener['listener']['id']
self.listener_updater.call_factory(
ncpm, 'update_listener', active_listener_id
)
if update_listener_key == 'default_tls_container_ref':
update_value = 'string_for_read_only_fail'
elif update_listener_key == 'sni_container_refs':
# NOTE: THIS TEST WILL ALWAYS SUCCEED
return NotImplemented
update_dict = {update_listener_key: update_value}
try:
updated = self.listener_updater.call_method(
listener_conf={'listener': update_dict}
)
except BadRequest as exc:
exc_message_first_line = exc.message.split('\n')[0]
expected_first_line =\
'Cannot update read-only attribute %s' % update_listener_key
assert exc_message_first_line == expected_first_line
return
assert updated['listener'][update_listener_key] == update_value
class TestPoolUpdateScenarios(UpdateScenarioBase):
def test_pool_update_configs(self,
update_pool_key,
update_value,
setup_with_pool):
ncpm, active_pool = setup_with_pool
active_pool_id = active_pool['pool']['id']
self.pool_updater.call_factory(
ncpm, 'update_lbaas_pool', active_pool_id
)
if update_pool_key == 'lb_algorithm':
if 'ROUND_ROBIN' in update_value:
update_value = 'SOURCE_IP'
else:
update_value = 'ROUND_ROBIN'
elif update_pool_key == 'session_persistence':
update_value = None
update_dict = {update_pool_key: update_value}
try:
updated = self.pool_updater.call_method(
lbaas_pool_conf={'pool': update_dict}
)
except BadRequest as exc:
exc_message_first_line = exc.message.split('\n')[0]
expected_first_line =\
'Cannot update read-only attribute %s' % update_pool_key
assert exc_message_first_line == expected_first_line
return
assert updated['pool'][update_pool_key] == update_value
class TestMemberUpdateScenarios(UpdateScenarioBase):
def test_member_update_configs(self,
update_member_key,
update_value,
setup_with_pool_member):
ncpm, active_pool, active_member = setup_with_pool_member
active_member_id = active_member['member']['id']
self.member_updater.call_factory(
ncpm, 'update_lbaas_member', active_member_id
)
if update_member_key == 'lb_algorithm':
if 'ROUND_ROBIN' in update_value:
update_value = 'SOURCE_IP'
else:
update_value = 'ROUND_ROBIN'
elif update_member_key == 'session_persistence':
update_value = None
update_dict = {update_member_key: update_value}
try:
updated = self.member_updater.call_method(
pool_id=active_pool['pool']['id'],
member_conf={'member': update_dict}
)
except BadRequest as exc:
exc_message_first_line = exc.message.split('\n')[0]
expected_first_line =\
'Cannot update read-only attribute %s' % update_member_key
assert exc_message_first_line == expected_first_line
return
assert updated['member'][update_member_key] == update_value
class TestHealthMonitorUpdateScenarios(UpdateScenarioBase):
def test_healthmonitor_update_configs(self,
update_healthmonitor_key,
update_value,
setup_with_healthmonitor):
ncpm, active_healthmonitor, pool, member = setup_with_healthmonitor
active_healthmonitor_id = active_healthmonitor['healthmonitor']['id']
self.healthmonitor_updater.call_factory(
ncpm, 'update_lbaas_healthmonitor', active_healthmonitor_id
)
if update_healthmonitor_key == 'expected_codes':
update_value = '300'
update_dict = {update_healthmonitor_key: update_value}
try:
updated = self.healthmonitor_updater.call_method(
lbaas_healthmonitor_conf={'healthmonitor': update_dict}
)
except BadRequest as exc:
exc_message_first_line = exc.message.split('\n')[0]
expected_first_line =\
'Cannot update read-only attribute %s' %\
update_healthmonitor_key
assert exc_message_first_line == expected_first_line
return
assert updated['healthmonitor'][update_healthmonitor_key] ==\
update_value
|
|
# -*- coding: utf-8 -*-
'''
Execute orchestration functions
'''
# Import pytohn libs
from __future__ import absolute_import, print_function
import fnmatch
import json
import logging
import sys
# Import salt libs
import salt.syspaths
import salt.utils
import salt.utils.event
from salt.exceptions import SaltInvocationError
LOGGER = logging.getLogger(__name__)
def orchestrate(mods, saltenv='base', test=None, exclude=None, pillar=None):
'''
.. versionadded:: 0.17.0
Execute a state run from the master, used as a powerful orchestration
system.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:mod:`Docs for the master-side state module <salt.states.saltmod>`
CLI Examples:
.. code-block:: bash
salt-run state.orchestrate webserver
salt-run state.orchestrate webserver saltenv=dev test=True
.. versionchanged:: 2014.1.1
Runner renamed from ``state.sls`` to ``state.orchestrate``
.. versionchanged:: 2014.7.0
Runner uses the pillar variable
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.sls'](
mods,
saltenv,
test,
exclude,
pillar=pillar)
ret = {'data': {minion.opts['id']: running}, 'outputter': 'highstate'}
return ret
# Aliases for orchestrate runner
orch = salt.utils.alias_function(orchestrate, 'orch')
sls = salt.utils.alias_function(orchestrate, 'sls')
def orchestrate_single(fun, name, test=None, queue=False, pillar=None, **kwargs):
'''
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_single fun=salt.wheel name=key.list_all
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.single'](
fun,
name,
test=None,
queue=False,
pillar=pillar,
**kwargs)
ret = {minion.opts['id']: running}
__jid_event__.fire_event({'data': ret, 'outputter': 'highstate'}, 'progress')
return ret
def orchestrate_high(data, test=None, queue=False, pillar=None, **kwargs):
'''
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_high '{
stage_one:
{salt.state: [{tgt: "db*"}, {sls: postgres_setup}]},
stage_two:
{salt.state: [{tgt: "web*"}, {sls: apache_setup}, {
require: [{salt: stage_one}],
}]},
}'
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.high'](
data,
test=None,
queue=False,
pillar=pillar,
**kwargs)
ret = {minion.opts['id']: running}
__jid_event__.fire_event({'data': ret, 'outputter': 'highstate'}, 'progress')
return ret
def event(tagmatch='*', count=-1, quiet=False, sock_dir=None, pretty=False):
r'''
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2014.7.0
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this pattern; uses the same matching semantics as Salt's Reactor.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
CLI Examples:
.. code-block:: bash
# Reboot a minion and run highstate when it comes back online
salt 'jerry' system.reboot && \\
salt-run state.event 'salt/minion/jerry/start' count=1 quiet=True && \\
salt 'jerry' state.highstate
# Reboot multiple minions and run highstate when all are back online
salt -L 'kevin,stewart,dave' system.reboot && \\
salt-run state.event 'salt/minion/*/start' count=3 quiet=True && \\
salt -L 'kevin,stewart,dave' state.highstate
# Watch the event bus forever in a shell while-loop.
salt-run state.event | while read -r tag data; do
echo $tag
echo $data | jq -colour-output .
done
.. seealso::
See :blob:`tests/eventlisten.sh` for an example of usage within a shell
script.
'''
sevent = salt.utils.event.get_event(
'master',
sock_dir or __opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
while True:
ret = sevent.get_event(full=True)
if ret is None:
continue
if fnmatch.fnmatch(ret['tag'], tagmatch):
if not quiet:
print('{0}\t{1}'.format(
ret['tag'],
json.dumps(
ret['data'],
sort_keys=pretty,
indent=None if not pretty else 4)))
sys.stdout.flush()
count -= 1
LOGGER.debug('Remaining event matches: {0}'.format(count))
if count == 0:
break
else:
LOGGER.debug('Skipping event tag: {0}'.format(ret['tag']))
continue
|
|
"""
Useful (but not essential) functions for writing panflute filters
"""
# ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import os.path as p
import re
import sys
import json
import yaml
import shlex
from typing import Tuple
from shutil import which
from subprocess import Popen, PIPE
from functools import partial
# yamlloader keeps dict ordering in yaml
try:
import yamlloader
except ImportError:
yamlloader = None
if yamlloader is None:
# property of pyyaml:
# C*Loader when compiled with C, else fallback to pure Python loader
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader
else:
from yamlloader.ordereddict import CSafeLoader as Loader
# to be filled when the first time which('pandoc') is called
PANDOC_PATH = None
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience classes
# ---------------------------
class PandocVersion:
'''
Get runtime Pandoc version
use PandocVersion().version for comparing versions
'''
def __init__(self):
pass
def __str__(self) -> str:
return self._repr.splitlines()[0].split(' ')[1]
def __repr__(self) -> str:
return self._repr
@property
def _repr(self):
# lazily call pandoc only once
if not hasattr(self, '__repr'):
self.__repr: str = run_pandoc(args=['--version'])
return self.__repr
@property
def version(self) -> Tuple[int, ...]:
return tuple(int(i) for i in str(self).split('.'))
@property
def data_dir(self):
info = self._repr.splitlines()
prefix = "User data directory: "
info = [row for row in info if row.startswith(prefix)]
assert len(info) == 1, info
data_dir = info[0][len(prefix):]
# data_dir might contain multiple folders:
# Default user data directory: /home/runner/.local/share/pandoc or /home/runner/.pandoc/filters
data_dir = data_dir.split(' or ')
data_dir = [p.normpath(p.expanduser(p.expandvars(p.join(d, 'filters')))) for d in data_dir]
return data_dir
pandoc_version = PandocVersion()
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.load(raw, Loader=Loader) # nosec # already using SafeLoader
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.load(chunk, Loader=Loader)) # nosec # already using SafeLoader
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an element (and its children elements).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
# Add quotes around the contents of Quoted()
if type(e.parent) == Quoted:
if e.index == 0:
ans = '"' + ans
if e.index == len(e.container) - 1:
ans += '"'
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer)
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return {k: meta2builtin(v) for k, v in meta.content.dict.items()}
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
debug('<<<< shell call failed; error message below >>>>')
debug(err.decode('utf-8'))
debug('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
raise IOError()
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
def run_pandoc(text='', args=None, pandoc_path=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
:param str pandoc_path: If specified, use the Pandoc at this path.
If None, default to that from PATH.
"""
if args is None:
args = []
if pandoc_path is None:
# initialize the global PANDOC_PATH
if PANDOC_PATH is None:
temp = which('pandoc')
if temp is None:
raise OSError("Path to pandoc executable does not exists")
sys.modules[__name__].PANDOC_PATH = temp
pandoc_path = PANDOC_PATH
try:
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
raise OSError(f"Given pandoc_path {pandoc_path} is invalid")
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if err:
debug(err.decode('utf-8'))
if exitcode != 0:
raise IOError('')
return out.decode('utf-8')
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None,
pandoc_path=None):
r"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:param str pandoc_path: If specified, use the Pandoc at this path.
If None, default to that from PATH.
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args, pandoc_path=pandoc_path)
if output_format == 'panflute':
out = json.loads(out, object_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args, pandoc_path=None):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args, pandoc_path=pandoc_path)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
'''
It's difficult to replace a keyword with an entire Block element.
This is because the keyword is of type Str (an Inline) and the parent
object of a Str can only contain Inlines and not Blocks
(e.g. Para can contain Inlines, not Divs)
Implications:
1) If the Str that contains the keyword is inside another
Inline instead of a Block (e.g. Div -> Emph -> Str)
then we have to do a trick:
when .walk() touches an Emph that contains Str(keyword),
it replaces the Emph with Str(keyword).
2) If the element that contains the Str(keyword) has multiple children,
then we are in a bind as replacing it will destroy information.
Thus, we can't do do it
3) If the element that contains the Str(keyword) does so in a DictContainer
instead of a ListContainer, then we cannot retrieve the "first and only
element" easily, so we also abort (happens with metadata elements).
'''
# Here we can check that e.content is ListContainer (i.e. not DictContainer)
# or check that e is not a Metavalue ("not isinstance(e, MetaValue)")
if hasattr(e, 'content') and isinstance(e.content, ListContainer) and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
else:
pass # not implemented
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
"""
Fetch an option variable from either a local (element) level option/attribute tag,
a document level metadata tag, or a default.
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overridden at a local level.
For example, the two files below show how to apply different styles to docx text:
**main.md:**
.. code-block:: none
:linenos:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
**style_filter.py:**
.. code-block:: python
:linenos:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
|
# Copyright 2014: The Rally team
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.cli.commands import show
from tests.unit import fakes
from tests.unit import test
class ShowCommandsTestCase(test.TestCase):
def setUp(self):
super(ShowCommandsTestCase, self).setUp()
self.show = show.ShowCommands()
self.admin_endpoint = {
"username": "admin",
"password": "admin",
"tenant_name": "admin",
"auth_url": "http://fake.auth.url"
}
self.user_endpoints = {
"username": "user1",
"password": "user2",
"tenant_name": "user3",
"auth_url": "http://fake.auth.url"
}
self.fake_deployment_id = "7f6e88e0-897e-45c0-947c-595ce2437bee"
self.fake_clients = fakes.FakeClients()
self.fake_glance_client = fakes.FakeGlanceClient()
self.fake_nova_client = fakes.FakeNovaClient()
@mock.patch("rally.cli.commands.show.print", create=True)
@mock.patch("rally.cli.commands.show.cliutils.print_list")
@mock.patch("rally.cli.commands.show.cliutils.pretty_float_formatter")
@mock.patch("rally.cli.commands.show.utils.Struct")
@mock.patch("rally.cli.commands.show.osclients.Clients.glance")
@mock.patch("rally.cli.commands.show.db.deployment_get")
def test_images(self, mock_deployment_get, mock_get_glance,
mock_struct, mock_formatter, mock_print_list, mock_print):
self.fake_glance_client.images.create("image", None, None, None)
fake_image = list(self.fake_glance_client.images.cache.values())[0]
fake_image.size = 1
mock_get_glance.return_value = self.fake_glance_client
mock_deployment_get.return_value = {
"admin": self.admin_endpoint,
"users": [self.user_endpoints, self.user_endpoints]
}
self.show.images(self.fake_deployment_id)
mock_deployment_get.assert_called_once_with(self.fake_deployment_id)
mock_get_glance.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_get_glance.call_count)
headers = ["UUID", "Name", "Size (B)"]
fake_data = dict(
zip(headers, [fake_image.id, fake_image.name, fake_image.size])
)
mock_struct.assert_has_calls([mock.call(**fake_data)] * 3)
fake_formatters = {"Size (B)": mock_formatter()}
mixed_case_fields = ["UUID", "Name"]
mock_print_list.assert_has_calls([mock.call(
[mock_struct()],
fields=headers,
formatters=fake_formatters,
mixed_case_fields=mixed_case_fields
)] * 3)
self.assertEqual(3, mock_print.call_count)
@mock.patch("rally.cli.commands.show.cliutils.print_list")
@mock.patch("rally.cli.commands.show.cliutils.pretty_float_formatter")
@mock.patch("rally.cli.commands.show.utils.Struct")
@mock.patch("rally.cli.commands.show.osclients.Clients.nova")
@mock.patch("rally.cli.commands.show.db.deployment_get")
def test_flavors(self, mock_deployment_get, mock_get_nova,
mock_struct, mock_formatter, mock_print_list):
self.fake_nova_client.flavors.create()
fake_flavor = list(self.fake_nova_client.flavors.cache.values())[0]
fake_flavor.id, fake_flavor.name, fake_flavor.vcpus = 1, "m1.fake", 1
fake_flavor.ram, fake_flavor.swap, fake_flavor.disk = 1024, 128, 10
mock_get_nova.return_value = self.fake_nova_client
mock_deployment_get.return_value = {
"admin": self.admin_endpoint,
"users": [self.user_endpoints, self.user_endpoints]
}
self.show.flavors(self.fake_deployment_id)
mock_deployment_get.assert_called_once_with(self.fake_deployment_id)
mock_get_nova.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_get_nova.call_count)
headers = ["ID", "Name", "vCPUs", "RAM (MB)", "Swap (MB)", "Disk (GB)"]
fake_data = dict(
zip(headers,
[fake_flavor.id, fake_flavor.name, fake_flavor.vcpus,
fake_flavor.ram, fake_flavor.swap, fake_flavor.disk])
)
mock_struct.assert_has_calls([mock.call(**fake_data)] * 3)
fake_formatters = {"RAM (MB)": mock_formatter(),
"Swap (MB)": mock_formatter(),
"Disk (GB)": mock_formatter()}
mixed_case_fields = ["ID", "Name", "vCPUs"]
mock_print_list.assert_has_calls([mock.call(
[mock_struct()],
fields=headers,
formatters=fake_formatters,
mixed_case_fields=mixed_case_fields
)] * 3)
@mock.patch("rally.cli.commands.show.cliutils.print_list")
@mock.patch("rally.cli.commands.show.utils.Struct")
@mock.patch("rally.cli.commands.show.osclients.Clients.nova")
@mock.patch("rally.cli.commands.show.db.deployment_get")
def test_networks(self, mock_deployment_get, mock_get_nova,
mock_struct, mock_print_list):
self.fake_nova_client.networks.create(1234)
fake_network = list(self.fake_nova_client.networks.cache.values())[0]
fake_network.label = "fakenet"
fake_network.cidr = "10.0.0.0/24"
mock_get_nova.return_value = self.fake_nova_client
mock_deployment_get.return_value = {
"admin": self.admin_endpoint,
"users": [self.user_endpoints, self.user_endpoints]
}
self.show.networks(self.fake_deployment_id)
mock_deployment_get.assert_called_once_with(self.fake_deployment_id)
mock_get_nova.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_get_nova.call_count)
headers = ["ID", "Label", "CIDR"]
fake_data = dict(
zip(headers,
[fake_network.id, fake_network.label, fake_network.cidr])
)
mock_struct.assert_has_calls([mock.call(**fake_data)] * 3)
mixed_case_fields = ["ID", "Label", "CIDR"]
mock_print_list.assert_has_calls([mock.call(
[mock_struct()],
fields=headers,
mixed_case_fields=mixed_case_fields
)] * 3)
@mock.patch("rally.cli.commands.show.cliutils.print_list")
@mock.patch("rally.cli.commands.show.utils.Struct")
@mock.patch("rally.cli.commands.show.osclients.Clients.nova")
@mock.patch("rally.cli.commands.show.db.deployment_get")
def test_secgroups(self, mock_deployment_get, mock_get_nova,
mock_struct, mock_print_list):
self.fake_nova_client.security_groups.create("othersg")
fake_secgroup = list(
self.fake_nova_client.security_groups.cache.values())[0]
fake_secgroup.id = 0
fake_secgroup2 = list(
self.fake_nova_client.security_groups.cache.values())[1]
fake_secgroup2.id = 1
mock_get_nova.return_value = self.fake_nova_client
mock_deployment_get.return_value = {
"admin": self.admin_endpoint,
"users": [self.user_endpoints]
}
self.show.secgroups(self.fake_deployment_id)
mock_deployment_get.assert_called_once_with(self.fake_deployment_id)
mock_get_nova.assert_has_calls([mock.call()] * 2)
self.assertEqual(2, mock_get_nova.call_count)
headers = ["ID", "Name", "Description"]
fake_data = [fake_secgroup.id, fake_secgroup.name, ""]
fake_data2 = [fake_secgroup2.id, fake_secgroup2.name, ""]
calls = [mock.call(**dict(zip(headers, fake_data2))),
mock.call(**dict(zip(headers, fake_data)))]
mock_struct.assert_has_calls(calls * 2, any_order=True)
mixed_case_fields = ["ID", "Name", "Description"]
mock_print_list.assert_has_calls([mock.call(
[mock_struct(), mock_struct()],
fields=headers,
mixed_case_fields=mixed_case_fields
)] * 2)
@mock.patch("rally.cli.commands.show.cliutils.print_list")
@mock.patch("rally.cli.commands.show.utils.Struct")
@mock.patch("rally.cli.commands.show.osclients.Clients.nova")
@mock.patch("rally.cli.commands.show.db.deployment_get")
def test_keypairs(self, mock_deployment_get, mock_get_nova,
mock_struct, mock_print_list):
self.fake_nova_client.keypairs.create("keypair")
fake_keypair = list(self.fake_nova_client.keypairs.cache.values())[0]
fake_keypair.fingerprint = "84:87:58"
mock_get_nova.return_value = self.fake_nova_client
mock_deployment_get.return_value = {
"admin": self.admin_endpoint,
"users": [self.user_endpoints, self.user_endpoints]
}
self.show.keypairs(self.fake_deployment_id)
mock_deployment_get.assert_called_once_with(self.fake_deployment_id)
mock_get_nova.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_get_nova.call_count)
headers = ["Name", "Fingerprint"]
fake_data = dict(
zip(headers,
[fake_keypair.name, fake_keypair.fingerprint])
)
mock_struct.assert_has_calls([mock.call(**fake_data)] * 3)
mixed_case_fields = ["Name", "Fingerprint"]
mock_print_list.assert_has_calls([mock.call(
[mock_struct()],
fields=headers,
mixed_case_fields=mixed_case_fields
)] * 3)
|
|
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the
Google Analytics Data Export API."""
__author__ = '[email protected] (Nick Mihailovski)'
import gdata.data
import atom.core
# XML Namespace used in Google Analytics API entities.
DXP_NS = '{http://schemas.google.com/analytics/2009}%s'
GA_NS = '{http://schemas.google.com/ga/2009}%s'
class GetProperty(object):
"""Utility class to simplify retrieving Property objects."""
def get_property(self, name):
"""Helper method to return a propery object by its name attribute.
Args:
name: string The name of the <dxp:property> element to retrieve.
Returns:
A property object corresponding to the matching <dxp:property> element.
if no property is found, None is returned.
"""
for prop in self.property:
if prop.name == name:
return prop
return None
GetProperty = get_property
class GetMetric(object):
"""Utility class to simplify retrieving Metric objects."""
def get_metric(self, name):
"""Helper method to return a propery value by its name attribute
Args:
name: string The name of the <dxp:metric> element to retrieve.
Returns:
A property object corresponding to the matching <dxp:metric> element.
if no property is found, None is returned.
"""
for met in self.metric:
if met.name == name:
return met
return None
GetMetric = get_metric
class GetDimension(object):
"""Utility class to simplify retrieving Dimension objects."""
def get_dimension(self, name):
"""Helper method to return a dimention object by its name attribute
Args:
name: string The name of the <dxp:dimension> element to retrieve.
Returns:
A dimension object corresponding to the matching <dxp:dimension> element.
if no dimension is found, None is returned.
"""
for dim in self.dimension:
if dim.name == name:
return dim
return None
GetDimension = get_dimension
class StartDate(atom.core.XmlElement):
"""Analytics Feed <dxp:startDate>"""
_qname = DXP_NS % 'startDate'
class EndDate(atom.core.XmlElement):
"""Analytics Feed <dxp:endDate>"""
_qname = DXP_NS % 'endDate'
class Metric(atom.core.XmlElement):
"""Analytics Feed <dxp:metric>"""
_qname = DXP_NS % 'metric'
name = 'name'
type = 'type'
value = 'value'
confidence_interval = 'confidenceInterval'
class Aggregates(atom.core.XmlElement, GetMetric):
"""Analytics Data Feed <dxp:aggregates>"""
_qname = DXP_NS % 'aggregates'
metric = [Metric]
class TableId(atom.core.XmlElement):
"""Analytics Feed <dxp:tableId>"""
_qname = DXP_NS % 'tableId'
class TableName(atom.core.XmlElement):
"""Analytics Feed <dxp:tableName>"""
_qname = DXP_NS % 'tableName'
class Property(atom.core.XmlElement):
"""Analytics Feed <dxp:property>"""
_qname = DXP_NS % 'property'
name = 'name'
value = 'value'
class Definition(atom.core.XmlElement):
"""Analytics Feed <dxp:definition>"""
_qname = DXP_NS % 'definition'
class Segment(atom.core.XmlElement):
"""Analytics Feed <dxp:segment>"""
_qname = DXP_NS % 'segment'
id = 'id'
name = 'name'
definition = Definition
class Engagement(atom.core.XmlElement):
"""Analytics Feed <dxp:engagement>"""
_qname = GA_NS % 'engagement'
type = 'type'
comparison = 'comparison'
threshold_value = 'thresholdValue'
class Step(atom.core.XmlElement):
"""Analytics Feed <dxp:step>"""
_qname = GA_NS % 'step'
number = 'number'
name = 'name'
path = 'path'
class Destination(atom.core.XmlElement):
"""Analytics Feed <dxp:destination>"""
_qname = GA_NS % 'destination'
step = [Step]
expression = 'expression'
case_sensitive = 'caseSensitive'
match_type = 'matchType'
step1_required = 'step1Required'
class Goal(atom.core.XmlElement):
"""Analytics Feed <dxp:goal>"""
_qname = GA_NS % 'goal'
destination = Destination
engagement = Engagement
number = 'number'
name = 'name'
value = 'value'
active = 'active'
class CustomVariable(atom.core.XmlElement):
"""Analytics Data Feed <dxp:customVariable>"""
_qname = GA_NS % 'customVariable'
index = 'index'
name = 'name'
scope = 'scope'
class DataSource(atom.core.XmlElement, GetProperty):
"""Analytics Data Feed <dxp:dataSource>"""
_qname = DXP_NS % 'dataSource'
table_id = TableId
table_name = TableName
property = [Property]
class Dimension(atom.core.XmlElement):
"""Analytics Feed <dxp:dimension>"""
_qname = DXP_NS % 'dimension'
name = 'name'
value = 'value'
# Account Feed.
class AccountEntry(gdata.data.GDEntry, GetProperty):
"""Analytics Account Feed <entry>"""
_qname = atom.data.ATOM_TEMPLATE % 'entry'
table_id = TableId
property = [Property]
goal = [Goal]
custom_variable = [CustomVariable]
class AccountFeed(gdata.data.GDFeed):
"""Analytics Account Feed <feed>"""
_qname = atom.data.ATOM_TEMPLATE % 'feed'
segment = [Segment]
entry = [AccountEntry]
# Data Feed.
class DataEntry(gdata.data.GDEntry, GetMetric, GetDimension):
"""Analytics Data Feed <entry>"""
_qname = atom.data.ATOM_TEMPLATE % 'entry'
dimension = [Dimension]
metric = [Metric]
def get_object(self, name):
"""Returns either a Dimension or Metric object with the same name as the
name parameter.
Args:
name: string The name of the object to retrieve.
Returns:
Either a Dimension or Object that has the same as the name parameter.
"""
output = self.GetDimension(name)
if not output:
output = self.GetMetric(name)
return output
GetObject = get_object
class DataFeed(gdata.data.GDFeed):
"""Analytics Data Feed <feed>. Althrough there is only one datasource, it is
stored in an array to replicate the design of the Java client library and
ensure backwards compatibility if new data sources are added in the future.
"""
_qname = atom.data.ATOM_TEMPLATE % 'feed'
start_date = StartDate
end_date = EndDate
aggregates = Aggregates
data_source = [DataSource]
entry = [DataEntry]
segment = Segment
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate,invalid-name
"""
Database of MeasureInput/MeasureResult pair.
This can be used for replaying measurement.
"""
import os
from .record import encode, decode, measure_str_key
class Database(object):
"""
Base class for a record database object.
"""
def load(self, inp, get_all=False):
"""
Load a result based on an input's string key
Parameters
----------
inp: MeasureInput
to be translated into key for RedisDB
get_all: bool, optional
Whether the latest result (or all matching results) should be returned
Returns
-------
rec: MeasureResult if previously saved, otherwise None
"""
raise NotImplementedError()
def save(self, inp, res, extend=False):
"""
Save a result based on an input's string key
Parameters
----------
inp: MeasureInput
to be translated into key for RedisDB
res: MeasureResult
to associate with key
extend:
Whether to extend existing MeasureResults if they exist
"""
raise NotImplementedError()
def filter_inputs(db, measure_inputs, retry=False):
"""
Filter a measure_inputs batch based on saved db results
Parameters
----------
db: Database
database object
measure_inputs: Array of MeasureInput
measure_inputs as expected in measure_batch
retry: bool
whether to retry if the saved result is a failure
Returns
-------
partial_results: Array of MeasureResult
a full list of result, where None denotes no corresponding saved result
unsaved: Array of MeasureInput
a list that only contains unsaved inputs
"""
partial_results = list()
unsaved = list()
for inp in measure_inputs:
res = db.load(inp)
if res is None or (retry and res.error_no != 0):
unsaved.append(inp)
partial_results.append(None)
else:
partial_results.append(res)
return partial_results, unsaved
class RedisDatabase(Database):
"""
Redis version of record database
"""
REDIS_PROD = 15
REDIS_LOCA = 14
REDIS_TEST = 13 # for unit test
REDIS_NIGHT_TEMP = 12 # for nightly report (will be flushed after every workload)
MAGIC_SPLIT = "$"
def __init__(self, db_index=REDIS_PROD):
# pylint: disable=import-outside-toplevel
import redis
if db_index == RedisDatabase.REDIS_TEST:
host = "localhost"
else:
host = os.environ.get("TVM_FLEET_HOST")
self.db = redis.StrictRedis(host=host, port=6379, db=db_index)
self.db_index = db_index
def set(self, key, value):
self.db.set(key, value)
def get(self, key):
current = self.db.get(key)
return current.decode() if isinstance(current, bytes) else current
def load(self, inp, get_all=False):
current = self.get(measure_str_key(inp))
if current is not None:
records = [decode(x) for x in current.split(RedisDatabase.MAGIC_SPLIT)]
results = [rec[1] for rec in records if rec is not None]
if get_all:
return results
return max(results, key=lambda result: result.timestamp)
return current
def save(self, inp, res, extend=False):
current = self.get(measure_str_key(inp))
if not extend or current is None:
self.set(measure_str_key(inp), RedisDatabase.MAGIC_SPLIT.join([encode(inp, res)]))
else:
current = current.split(RedisDatabase.MAGIC_SPLIT)
self.set(
measure_str_key(inp), RedisDatabase.MAGIC_SPLIT.join(current + [encode(inp, res)])
)
def filter(self, func):
"""
Dump all of the records that match the given rule
Parameters
----------
func: callable
The signature of the function is (MeasureInput, [MeasureResult]) -> bool
Returns
-------
list of records in tuple (MeasureInput, MeasureResult) matching the rule
Examples
--------
get records for a target
>>> db.filter(lambda inp, results: "cuda" in inp.target.keys)
get records with errors
>>> db.filter(lambda inp, results: any(r.error_no != 0 for r in results))
"""
matched_records = list()
# may consider filtering in iterator in the future
for key in self.db.keys():
current = self.get(key)
try:
records = [decode(x) for x in current.split(RedisDatabase.MAGIC_SPLIT)]
records = [rec for rec in records if rec is not None]
except TypeError: # got a badly formatted/old format record
continue
if not records:
continue
inps, results = zip(*records)
inp = inps[0]
if not func(inp, results):
continue
result = max(results, key=lambda res: res.timestamp)
matched_records.append((inp, result))
return matched_records
def flush(self):
self.db.flushdb()
class DummyDatabase(RedisDatabase):
"""
A database based on python dictionary for testing.
"""
def __init__(self):
# pylint: disable=super-init-not-called
self.db = {}
def set(self, key, value):
self.db[key] = value
def flush(self):
self.db = {}
|
|
# Copyright 2011 Johan Rydberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
from twisted.internet import defer, task, error
from twisted.python import log
from txgossip.recipies import KeyStoreMixin, LeaderElectionMixin
from .assign import AssignmentComputer
class _LeaderElectionProtocol(LeaderElectionMixin):
"""Private version of the leader election protocol that informs
the application logic about election results.
"""
def __init__(self, clock, app):
LeaderElectionMixin.__init__(self, clock, vote_delay=2)
self._app = app
def leader_elected(self, is_leader, leader):
LeaderElectionMixin.leader_elected(self, is_leader, leader)
self._app.leader_elected(is_leader)
class FechterProtocol:
"""Implementation of our 'fechter protocol'."""
STATUS = 'private:status'
def __init__(self, clock, storage, platform, pinger):
self.election = _LeaderElectionProtocol(clock, self)
self.keystore = KeyStoreMixin(clock, storage,
[self.election.LEADER_KEY, self.election.VOTE_KEY,
self.election.PRIO_KEY, self.STATUS])
self.computer = AssignmentComputer(self.keystore)
self.platform = platform
self.clock = clock
self.pinger = pinger
self._connectivity_checker = task.LoopingCall(
self._check_connectivity)
self._status = 'down'
self._connectivity = 'down'
@defer.inlineCallbacks
def _check_connectivity(self):
"""Check connectivity with gateway."""
tries = 0
done = 0
while not done:
tries += 1
try:
yield self.pinger.check_connectivity(timeout=1)
except error.TimeoutError:
if tries == 3:
self.set_connectivity('down')
break
else:
done = 1
else:
self.set_connectivity('up')
def _update_status(self):
"""Update status that will be communicated to other peers."""
status = self._status if self._connectivity == 'up' else 'down'
log.msg('change status to in keystore to "%s"' % (status,))
self.gossiper.set(self.STATUS, status)
def connectivity(self):
"""Return current connectivity status."""
return self._connectivity
def set_connectivity(self, status):
"""Change connectivity status.
@param status: A string that is either C{up} or C{down}.
@type status: C{str}
"""
assert status in ('up', 'down')
if status != self._connectivity:
self._connectivity = status
self._update_status()
def status(self):
"""Return current administrative status."""
return self._status
def set_status(self, status):
"""Change status.
@param status: A string that is either C{up} or C{down}.
@type status: C{str}
"""
assert status in ('up', 'down')
if status != self._status:
self._status = status
self._update_status()
def add_resource(self, resource):
"""Add a resource.
@param resource: the resource that can be distributed over the
cluster
@type resource: a C{str}
@return: the unique ID of the resource
@rtype: C{str}
"""
resource_id = str(uuid.uuid4())
resource_key = 'resource:%s' % (resource_id,)
self.keystore[resource_key] = [self.clock.seconds(),
'please-assign', resource]
return resource_id
def list_resources(self):
"""Return a mapping of all existing resources."""
resources = {}
for key in self.keystore.keys('resource:*'):
if self.keystore[key] is None:
continue
resource_id = key[9:]
timestamp, state, resource = self.keystore[key]
if state != 'please-assign':
continue
resources[resource_id] = {'resource': resource}
assign_key = 'assign:%s' % (resource_id,)
if assign_key in self.keystore:
assigned_to = self.keystore[assign_key]
if assigned_to:
resources[resource_id]['assigned_to'] = assigned_to
return resources
def _check_consensus(self, key):
"""Check if all peers have the same value for C{key}.
Return the value if they all have the same value, otherwise
return C{None}.
"""
correct = self.gossiper.get(key)
for peer in self.gossiper.live_peers:
if not key in peer.keys():
return None
value = peer.get(key)
if value != correct:
return None
return correct
def value_changed(self, peer, key, value):
"""A peer changed one of its values."""
if key == '__heartbeat__':
return
if self.election.value_changed(peer, key, value):
# This value change was handled by the leader election
# protocol.
return
self.keystore.value_changed(peer, key, value)
if key == self.STATUS:
self.status_change(peer, value == 'up')
return
if peer.name != self.gossiper.name:
# We ignore anything that has not yet been replicated to
# our own peer.
return
if self.election.is_leader is None:
# Ignore because we have not seen an election yet.
return
if key.startswith('assign:'):
# First check if we want any resources at all, since this
# may be an old assignment.
status = self.gossiper.get(self.STATUS)
resource_id = key[7:]
resource_key = 'resource:%s' % (resource_id,)
self.platform.assign_resource(resource_id,
self.keystore.get(key) == self.gossiper.name,
self.keystore.get(resource_key)[2])
elif key.startswith('resource:'):
if self.election.is_leader:
self.assign_resources()
def status_change(self, peer, up):
"""A peer changed its status flag.
@param up: true if the peer changed its status to I{up}.
@type up: C{bool}
"""
log.msg('status changed for %s to %s' % (peer.name,
"up" if up else "down"))
if self.election.is_leader:
self.assign_resources()
def leader_elected(self, is_leader):
"""The result of an election is in.
@param is_leader: C{true} if this peer was elected thr
leader of the cluste.r
@type is_leader: C{bool}
"""
log.msg('leader elected and it %s us!' % (
"IS" if is_leader else "IS NOT"))
if is_leader:
self.assign_resources()
def collect_peers(self):
"""Gather up which peers that should be assigned resources.
Skip a peer if it is dead or if it has its C{status} falg set
to something else than C{'up'}.
"""
peers = [peer.name for peer in self.gossiper.live_peers
if peer.get(self.STATUS) == 'up']
if self.gossiper.get(self.STATUS) == 'up':
peers.append(self.gossiper.name)
peers.sort(key=lambda peer: hash(peer))
return peers
def assign_resources(self):
"""Process and assign resources to peers in the cluster."""
self.computer.assign_resources(self.collect_peers())
def make_connection(self, gossiper):
"""Make connection to gossip instance."""
self.gossiper = gossiper
self._update_status()
self.election.make_connection(gossiper)
self.keystore.make_connection(gossiper)
self._connectivity_checker.start(5)
def peer_alive(self, peer):
self.election.peer_alive(peer)
def peer_dead(self, peer):
self.election.peer_alive(peer)
|
|
# Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_hamode_db
from neutron.extensions import l3
from neutron.extensions import l3_ext_ha_mode
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
_uuid = uuidutils.generate_uuid
class FakeL3PluginWithAgents(common_db_mixin.CommonDbMixin,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agents_db.AgentDbMixin):
pass
class L3HATestFramework(testlib_api.SqlTestCase,
testlib_plugin.PluginSetupHelper):
def setUp(self):
super(L3HATestFramework, self).setUp()
self.admin_ctx = context.get_admin_context()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
self.core_plugin = manager.NeutronManager.get_plugin()
notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated')
self.notif_m = notif_p.start()
cfg.CONF.set_override('allow_overlapping_ips', True)
self.plugin = FakeL3PluginWithAgents()
self._register_agents()
def _register_agents(self):
agent_status = {
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'neutron-l3-agent',
'host': 'l3host',
'topic': 'N/A',
'configurations': {'agent_mode': 'legacy'}
}
self.plugin.create_or_update_agent(self.admin_ctx, agent_status)
agent_status['host'] = 'l3host_2'
agent_status['configurations'] = {'agent_mode': 'dvr_snat'}
self.plugin.create_or_update_agent(self.admin_ctx, agent_status)
self.agent1, self.agent2 = self.plugin.get_agents(self.admin_ctx)
def _create_router(self, ha=True, tenant_id='tenant1', distributed=None,
ctx=None):
if ctx is None:
ctx = self.admin_ctx
ctx.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True}
if ha is not None:
router['ha'] = ha
if distributed is not None:
router['distributed'] = distributed
return self.plugin.create_router(ctx, {'router': router})
def _update_router(self, router_id, ha=True, distributed=None, ctx=None):
if ctx is None:
ctx = self.admin_ctx
data = {'ha': ha} if ha is not None else {}
if distributed is not None:
data['distributed'] = distributed
return self.plugin._update_router_db(ctx, router_id,
data, None)
def _bind_router(self, router_id):
with self.admin_ctx.session.begin(subtransactions=True):
bindings = self.plugin.get_ha_router_port_bindings(self.admin_ctx,
[router_id])
for agent_id, binding in zip(
[self.agent1['id'], self.agent2['id']], bindings):
binding.l3_agent_id = agent_id
class L3HATestCase(L3HATestFramework):
def test_verify_configuration_succeed(self):
# Default configuration should pass
self.plugin._verify_configuration()
def test_verify_configuration_l3_ha_net_cidr_is_not_a_cidr(self):
cfg.CONF.set_override('l3_ha_net_cidr', 'not a cidr')
self.assertRaises(
l3_ext_ha_mode.HANetworkCIDRNotValid,
self.plugin._verify_configuration)
def test_verify_configuration_l3_ha_net_cidr_is_not_a_subnet(self):
cfg.CONF.set_override('l3_ha_net_cidr', '10.0.0.1/8')
self.assertRaises(
l3_ext_ha_mode.HANetworkCIDRNotValid,
self.plugin._verify_configuration)
def test_verify_configuration_min_l3_agents_per_router_below_minimum(self):
cfg.CONF.set_override('min_l3_agents_per_router', 0)
self.assertRaises(
l3_ext_ha_mode.HAMinimumAgentsNumberNotValid,
self.plugin._check_num_agents_per_router)
def test_verify_configuration_max_l3_agents_below_min_l3_agents(self):
cfg.CONF.set_override('max_l3_agents_per_router', 3)
cfg.CONF.set_override('min_l3_agents_per_router', 4)
self.assertRaises(
l3_ext_ha_mode.HAMaximumAgentsNumberNotValid,
self.plugin._check_num_agents_per_router)
def test_verify_configuration_max_l3_agents_unlimited(self):
cfg.CONF.set_override('max_l3_agents_per_router',
l3_hamode_db.UNLIMITED_AGENTS_PER_ROUTER)
self.plugin._check_num_agents_per_router()
def test_ha_router_create(self):
router = self._create_router()
self.assertTrue(router['ha'])
def test_ha_router_create_with_distributed(self):
self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
self._create_router,
distributed=True)
def test_no_ha_router_create(self):
router = self._create_router(ha=False)
self.assertFalse(router['ha'])
def test_router_create_with_ha_conf_enabled(self):
cfg.CONF.set_override('l3_ha', True)
router = self._create_router(ha=None)
self.assertTrue(router['ha'])
def test_migration_from_ha(self):
router = self._create_router()
self.assertTrue(router['ha'])
router = self._update_router(router['id'], ha=False)
self.assertFalse(router.extra_attributes['ha'])
self.assertIsNone(router.extra_attributes['ha_vr_id'])
def test_migration_to_ha(self):
router = self._create_router(ha=False)
self.assertFalse(router['ha'])
router = self._update_router(router['id'], ha=True)
self.assertTrue(router.extra_attributes['ha'])
self.assertIsNotNone(router.extra_attributes['ha_vr_id'])
def test_migrate_ha_router_to_distributed(self):
router = self._create_router()
self.assertTrue(router['ha'])
self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
self._update_router,
router['id'],
distributed=True)
def test_l3_agent_routers_query_interface(self):
router = self._create_router()
self._bind_router(router['id'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('ha'))
interface = router.get(constants.HA_INTERFACE_KEY)
self.assertIsNotNone(interface)
self.assertEqual(constants.DEVICE_OWNER_ROUTER_HA_INTF,
interface['device_owner'])
self.assertEqual(cfg.CONF.l3_ha_net_cidr, interface['subnet']['cidr'])
def test_update_state(self):
router = self._create_router()
self._bind_router(router['id'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
state = routers[0].get(constants.HA_ROUTER_STATE_KEY)
self.assertEqual('standby', state)
self.plugin.update_router_state(self.admin_ctx, router['id'], 'active',
self.agent1['host'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
state = routers[0].get(constants.HA_ROUTER_STATE_KEY)
self.assertEqual('active', state)
def test_unique_ha_network_per_tenant(self):
tenant1 = _uuid()
tenant2 = _uuid()
self._create_router(tenant_id=tenant1)
self._create_router(tenant_id=tenant2)
ha_network1 = self.plugin.get_ha_network(self.admin_ctx, tenant1)
ha_network2 = self.plugin.get_ha_network(self.admin_ctx, tenant2)
self.assertNotEqual(
ha_network1['network_id'], ha_network2['network_id'])
def _deployed_router_change_ha_flag(self, to_ha):
self._create_router(ha=not to_ha)
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
router = routers[0]
interface = router.get(constants.HA_INTERFACE_KEY)
if to_ha:
self.assertIsNone(interface)
else:
self.assertIsNotNone(interface)
self._update_router(router['id'], to_ha)
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
router = routers[0]
interface = router.get(constants.HA_INTERFACE_KEY)
if to_ha:
self.assertIsNotNone(interface)
else:
self.assertIsNone(interface)
def test_deployed_router_can_have_ha_enabled(self):
self._deployed_router_change_ha_flag(to_ha=True)
def test_deployed_router_can_have_ha_disabled(self):
self._deployed_router_change_ha_flag(to_ha=False)
def test_create_ha_router_notifies_agent(self):
self._create_router()
self.assertTrue(self.notif_m.called)
def test_update_router_to_ha_notifies_agent(self):
router = self._create_router(ha=False)
self.notif_m.reset_mock()
self._update_router(router['id'], ha=True)
self.assertTrue(self.notif_m.called)
def test_unique_vr_id_between_routers(self):
self._create_router()
self._create_router()
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
self.assertEqual(2, len(routers))
self.assertNotEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id'])
@mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 1)))
def test_vr_id_depleted(self):
self.assertRaises(l3_ext_ha_mode.NoVRIDAvailable, self._create_router)
@mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 2)))
def test_vr_id_unique_range_per_tenant(self):
self._create_router()
self._create_router(tenant_id=_uuid())
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
self.assertEqual(2, len(routers))
self.assertEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id'])
@mock.patch('neutron.db.l3_hamode_db.MAX_ALLOCATION_TRIES', new=2)
def test_vr_id_allocation_contraint_conflict(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
with mock.patch.object(self.plugin, '_get_allocated_vr_id',
return_value=set()) as alloc:
self.assertRaises(l3_ext_ha_mode.MaxVRIDAllocationTriesReached,
self.plugin._allocate_vr_id, self.admin_ctx,
network.network_id, router['id'])
self.assertEqual(2, len(alloc.mock_calls))
def test_vr_id_allocation_delete_router(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
router = self._create_router()
allocs_current = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertNotEqual(allocs_before, allocs_current)
self.plugin.delete_router(self.admin_ctx, router['id'])
allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertEqual(allocs_before, allocs_after)
def test_vr_id_allocation_router_migration(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
router = self._create_router()
self._update_router(router['id'], ha=False)
allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertEqual(allocs_before, allocs_after)
def test_one_ha_router_one_not(self):
self._create_router(ha=False)
self._create_router()
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
ha0 = routers[0]['ha']
ha1 = routers[1]['ha']
self.assertNotEqual(ha0, ha1)
def test_add_ha_port_binding_failure_rolls_back_port(self):
router = self._create_router()
device_filter = {'device_id': [router['id']]}
ports_before = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
with mock.patch.object(self.plugin, '_create_ha_port_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin.add_ha_port,
self.admin_ctx, router['id'], network.network_id,
router['tenant_id'])
ports_after = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
self.assertEqual(ports_before, ports_after)
def test_create_ha_network_binding_failure_rolls_back_network(self):
networks_before = self.core_plugin.get_networks(self.admin_ctx)
with mock.patch.object(self.plugin,
'_create_ha_network_tenant_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_network,
self.admin_ctx, _uuid())
networks_after = self.core_plugin.get_networks(self.admin_ctx)
self.assertEqual(networks_before, networks_after)
def test_create_ha_network_subnet_failure_rolls_back_network(self):
networks_before = self.core_plugin.get_networks(self.admin_ctx)
with mock.patch.object(self.plugin, '_create_ha_subnet',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_network,
self.admin_ctx, _uuid())
networks_after = self.core_plugin.get_networks(self.admin_ctx)
self.assertEqual(networks_before, networks_after)
def test_create_ha_interfaces_binding_failure_rolls_back_ports(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
device_filter = {'device_id': [router['id']]}
ports_before = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
router_db = self.plugin._get_router(self.admin_ctx, router['id'])
with mock.patch.object(self.plugin, '_create_ha_port_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_interfaces,
self.admin_ctx, router_db, network)
ports_after = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
self.assertEqual(ports_before, ports_after)
def test_create_router_db_ha_attribute_failure_rolls_back_router(self):
routers_before = self.plugin.get_routers(self.admin_ctx)
for method in ('_set_vr_id',
'_create_ha_interfaces',
'_notify_ha_interfaces_updated'):
with mock.patch.object(self.plugin, method,
side_effect=ValueError):
self.assertRaises(ValueError, self._create_router)
routers_after = self.plugin.get_routers(self.admin_ctx)
self.assertEqual(routers_before, routers_after)
def test_exclude_dvr_agents_for_ha_candidates(self):
"""Test dvr agents are not counted in the ha candidates.
This test case tests that when get_number_of_agents_for_scheduling
is called, it doesn't count dvr agents.
"""
# Test setup registers two l3 agents.
# Register another l3 agent with dvr mode and assert that
# get_number_of_ha_agent_candidates return 2.
dvr_agent_status = {
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'neutron-l3-agent',
'host': 'l3host_3',
'topic': 'N/A',
'configurations': {'agent_mode': 'dvr'}
}
self.plugin.create_or_update_agent(self.admin_ctx, dvr_agent_status)
num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling(
self.admin_ctx)
self.assertEqual(2, num_ha_candidates)
class L3HAModeDbTestCase(L3HATestFramework):
def _create_network(self, plugin, ctx, name='net',
tenant_id='tenant1'):
network = {'network': {'name': name,
'shared': False,
'admin_state_up': True,
'tenant_id': tenant_id}}
return plugin.create_network(ctx, network)['id']
def _create_subnet(self, plugin, ctx, network_id, cidr='10.0.0.0/8',
name='subnet', tenant_id='tenant1'):
subnet = {'subnet': {'name': name,
'ip_version': 4,
'network_id': network_id,
'cidr': cidr,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
'tenant_id': tenant_id,
'enable_dhcp': True,
'ipv6_ra_mode': attributes.ATTR_NOT_SPECIFIED}}
created_subnet = plugin.create_subnet(ctx, subnet)
return created_subnet
def test_remove_ha_in_use(self):
router = self._create_router(ctx=self.admin_ctx)
network_id = self._create_network(self.core_plugin, self.admin_ctx)
subnet = self._create_subnet(self.core_plugin, self.admin_ctx,
network_id)
interface_info = {'subnet_id': subnet['id']}
self.plugin.add_router_interface(self.admin_ctx,
router['id'],
interface_info)
self.assertRaises(l3.RouterInUse, self.plugin.delete_router,
self.admin_ctx, router['id'])
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router['id']])
self.assertEqual(2, len(bindings))
class L3HAUserTestCase(L3HATestFramework):
def setUp(self):
super(L3HAUserTestCase, self).setUp()
self.user_ctx = context.Context('', _uuid())
def test_create_ha_router(self):
self._create_router(ctx=self.user_ctx)
def test_update_router(self):
router = self._create_router(ctx=self.user_ctx)
self._update_router(router['id'], ha=False, ctx=self.user_ctx)
def test_delete_router(self):
router = self._create_router(ctx=self.user_ctx)
self.plugin.delete_router(self.user_ctx, router['id'])
|
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_v2_test_util
from tensorflow.lite.python.convert import mlir_quantize
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model import saved_model
from tensorflow.python.saved_model.loader_impl import parse_saved_model
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
class FromConcreteFunctionTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testTypeInvalid(self):
root = self._getSimpleVariableModel()
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_concrete_functions([root.f])
self.assertIn('call get_concrete_function', str(error.exception))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
@test_util.run_v2_only
def testFloat(self, enable_mlir_converter):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = enable_mlir_converter
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@parameterized.named_parameters(
('_INT8InputOutput', lite.constants.INT8),
('_UINT8InputOutput', lite.constants.QUANTIZED_UINT8),
('_INT16InputOutput', lite.constants.INT16))
@test_util.run_v2_only
def testInvalidFloat(self, inference_input_output_type):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
with self.assertRaises(ValueError) as error:
converter.inference_input_type = inference_input_output_type
converter.inference_output_type = inference_input_output_type
converter.convert()
self.assertEqual(
'The inference_input_type and inference_output_type '
'must be tf.float32.', str(error.exception))
@test_util.run_v2_only
def testScalarInput(self):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Convert a single model in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.add.get_concrete_function(input_data)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.add(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testConvertMultipleFunctions(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[add_func, sub_func])
with self.assertRaises(ValueError) as error:
_ = converter.convert()
self.assertIn('can only convert a single ConcreteFunction',
str(error.exception))
def _getIntegerQuantizeModel(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)])
def func(inp):
conv = tf.nn.conv2d(
inp, tf.ones([3, 3, 3, 16]), strides=[1, 1, 1, 1], padding='SAME')
output = tf.nn.relu(conv, name='output')
return output
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
root.f = func
to_save = root.f.get_concrete_function()
return (to_save, calibration_gen)
@parameterized.named_parameters(
('EnableMlirQuantizer', True), # enable mlir quantizer
('DisableMlirQuantizer', False)) # disable mlir quantizer
def testPostTrainingCalibrateAndQuantize(self, mlir_quantizer):
func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_converter._experimental_new_quantizer = mlir_quantizer
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@parameterized.named_parameters(
('_INT8InputOutput', lite.constants.INT8),
('_UINT8InputOutput', lite.constants.QUANTIZED_UINT8),
('_INT16InputOutput', lite.constants.INT16))
@test_util.run_v2_only
def testInvalidPostTrainingDynamicRangeQuantization(
self, inference_input_output_type):
func, _ = self._getIntegerQuantizeModel()
# Convert float model.
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
with self.assertRaises(ValueError) as error:
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_converter.convert()
self.assertEqual(
'The inference_input_type and inference_output_type '
'must be tf.float32.', str(error.exception))
@parameterized.named_parameters(
('_Default', False, False, lite.constants.FLOAT),
('_INT8InputOutput', False, False, lite.constants.INT8),
('_UINT8InputOutput', False, False, lite.constants.QUANTIZED_UINT8),
('_INT16Quantize', False, True, lite.constants.FLOAT),
('_INT16Quantize_INT16InputOutput', False, True, lite.constants.INT16),
('_IntOnly', True, False, lite.constants.FLOAT),
('_IntOnly_INT8InputOutput', True, False, lite.constants.INT8),
('_IntOnly_UINT8InputOutput', True, False,
lite.constants.QUANTIZED_UINT8),
('_IntOnly_INT16Quantize', True, True, lite.constants.FLOAT),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True,
lite.constants.INT16))
def testIntegerQuantization(self, is_int_only, is_int16_quantize,
inference_input_output_type):
func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details[0]['dtype'])
# Ensure that the quantized tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(tflite_model))
@parameterized.named_parameters(
('_INT16Quantize_INT8InputOutput', True, lite.constants.INT8))
def testInvalidIntegerQuantization(self, is_int16_quantize,
inference_input_output_type):
func, calibration_gen = self._getIntegerQuantizeModel()
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
with self.assertRaises(ValueError) as error:
quantized_converter.inference_input_type = lite.constants.INT8
quantized_converter.inference_output_type = lite.constants.INT8
quantized_converter.convert()
self.assertEqual(
"The inference_input_type and inference_output_type "
"must be in ['tf.float32', 'tf.int16'].", str(error.exception))
def testCalibrateAndQuantizeBuiltinInt16(self):
func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
# TODO(b/156309549): We should add INT16 to the builtin types.
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.representative_dataset = calibration_gen
converter._experimental_calibrate_only = True
calibrated_tflite = converter.convert()
quantized_tflite_model = mlir_quantize(
calibrated_tflite, inference_type=_types_pb2.QUANTIZED_INT16)
self.assertIsNotNone(quantized_tflite_model)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
def _getTrainingTimeQuantizedModel(self):
class QLinear(tf.keras.layers.Layer):
def __init__(self, units=3, **kwargs):
super(QLinear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
'weight',
shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.min_var = self.add_weight(
'min',
initializer=tf.keras.initializers.Constant(-6.0),
trainable=False)
self.max_var = self.add_weight(
'max',
initializer=tf.keras.initializers.Constant(6.0),
trainable=False)
def call(self, inputs):
x = tf.quantization.fake_quant_with_min_max_vars(
inputs, self.min_var, self.max_var)
w_fq = tf.quantization.fake_quant_with_min_max_vars(
self.w, self.min_var, self.max_var)
x = tf.matmul(x, w_fq)
x = tf.quantization.fake_quant_with_min_max_vars(
x, self.min_var, self.max_var)
return x
return tf.keras.Sequential(QLinear(3, input_shape=(2,)))
@parameterized.named_parameters(
('_DefaultFLOAT32InputOutput', lite.constants.FLOAT),
('_INT8InputOutput', lite.constants.INT8),
('_UINT8InputOutput', lite.constants.QUANTIZED_UINT8))
@test_util.run_v2_only
def testTrainingTimeQuantization(self, inference_input_output_type):
model = self._getTrainingTimeQuantizedModel()
float_converter = lite.TFLiteConverterV2.from_keras_model(model)
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
quantized_converter = lite.TFLiteConverterV2.from_keras_model(model)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details[0]['dtype'])
# Ensure that the quantized tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@test_util.run_v2_only
def testNewQuantizer(self):
"""Test the model quantized by the new converter."""
func, calibration_gen = self._getIntegerQuantizeModel()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
# default quantizer
quantized_converter._experimental_new_quantizer = False
old_tflite = quantized_converter.convert()
# new quantizer
quantized_converter._experimental_new_quantizer = True
new_tflite = quantized_converter.convert()
for _ in range(5):
input_data = tf.constant(
np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32))
old_value = self._evaluateTFLiteModel(old_tflite, [input_data])
new_value = self._evaluateTFLiteModel(new_tflite, [input_data])
self.assertAllClose(old_value, new_value, atol=1e-01)
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
@test_util.run_v2_only
def testEmbeddings(self, enable_mlir_converter):
"""Test model with embeddings."""
input_data = tf.constant(
np.array(np.random.random_sample((20)), dtype=np.int32))
class EmbeddingModel(tf.keras.Model):
def __init__(self):
super(EmbeddingModel, self).__init__()
self.shared_weights = self.add_weight(
'weights',
shape=(2000, 300),
dtype=tf.float32,
initializer=tf.random_normal_initializer(
mean=0.0, stddev=300**(-0.5)))
@tf.function(input_signature=[tf.TensorSpec(shape=(20), dtype=tf.int32)])
def func(self, x):
return tf.gather(self.shared_weights, x)
# Building the model.
root = EmbeddingModel()
concrete_func = root.func.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = enable_mlir_converter
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertAllClose(expected_value.numpy(), actual_value[0], atol=1e-05)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a concrete function has debug info captured."""
root = tracking.AutoTrackable()
root.v1 = tf.Variable(3.)
root.f = tf.function(lambda x: root.v1 * x)
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
@test_util.run_v2_only
def testFlexOpWithInt8OpSet(self):
model = tf.keras.Sequential()
input_shape = (1, 4, 4, 4, 1)
model.add(
tf.keras.layers.Conv3D(
4,
kernel_size=(1, 1, 1),
activation='relu',
input_shape=input_shape[1:]))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(2, activation='relu'))
@tf.function(
input_signature=[tf.TensorSpec(shape=input_shape, dtype=tf.float32)])
def _call_fn(inputs):
return model(inputs, training=False)
concrete_func = _call_fn.get_concrete_function(
tf.TensorSpec(input_shape, dtype=tf.float32))
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8,
tf.lite.OpsSet.SELECT_TF_OPS,
]
tflite_model = converter.convert()
self.assertTrue(tflite_model)
class FromSavedModelTest(lite_v2_test_util.ModelTest):
def _createV1SavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor_1 = tf.compat.v1.placeholder(
shape=shape, dtype=tf.float32, name='inputB')
in_tensor_2 = tf.compat.v1.placeholder(
shape=shape, dtype=tf.float32, name='inputA')
variable_node = tf.Variable(1.0, name='variable_node')
out_tensor = in_tensor_1 + in_tensor_2 * variable_node
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
sess.run(tf.compat.v1.variables_initializer([variable_node]))
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
@test_util.run_v2_only
def testV1SimpleModel(self):
"""Test a SavedModel."""
with tf.Graph().as_default():
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertStartsWith(input_details[0]['name'], 'inputA')
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertStartsWith(
input_details[1]['name'],
'inputB',
)
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue([1, 16, 16, 3], input_details[1]['shape'])
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertStartsWith(output_details[0]['name'], 'add')
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
@test_util.run_v2_only
def testTF1HubFormattedModel(self):
"""Test a TF1 hub formatted model."""
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
# TF1 hub model is based on V1 saved model and they omit the saved model
# schema version setting.
saved_model_proto = parse_saved_model(saved_model_dir)
saved_model_proto.saved_model_schema_version = 0
saved_model_pb_file_path = os.path.join(saved_model_dir, 'saved_model.pb')
with file_io.FileIO(saved_model_pb_file_path, 'wb') as writer:
writer.write(saved_model_proto.SerializeToString())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
@test_util.run_v2_only
def testConstModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = tf.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = tf.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testSignatures(self):
"""Test values for `signature_keys` argument."""
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model with invalid `signature_keys`.
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['INVALID'])
self.assertIn("Invalid signature key 'INVALID'", str(error.exception))
# Convert model with empty `signature_keys`.
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=[])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultipleFunctionModel(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'add': add_func, 'sub': sub_func})
# Try converting multiple functions.
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(save_dir)
self.assertIn('Only support a single signature key.', str(error.exception))
@test_util.run_v2_only
def testNoConcreteFunctionModel(self):
root = self._getMultiFunctionModel()
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir)
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(save_dir)
self.assertIn('Only support a single signature key.', str(error.exception))
@test_util.run_v2_only
def testKerasSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = tf.constant(1., shape=[1, 1])
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = tf.keras.models.Sequential([
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1),
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(model, save_dir)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a SavedModel has debug info captured."""
input_data = tf.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = tf.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
options = save_options.SaveOptions(save_debug_info=True)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save, options)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
@test_util.run_v2_only
def testFallbackPath(self):
"""Test a SavedModel fallback path using old converter."""
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.experimental_new_converter = False
tflite_model = converter.convert()
self.assertTrue(tflite_model)
class FromKerasModelTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = tf.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = tf.keras.models.Sequential([
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testSequentialMultiInputOutputModel(self):
"""Test a tf.Keras model with multiple inputs and outputs."""
left_input_data = tf.constant(1., shape=[1, 3])
right_input_data = tf.constant(1., shape=[1, 3])
# Create a simple Keras model.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_c_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 2))
input_a = tf.keras.layers.Input(shape=(3,), name='input_a')
input_b = tf.keras.layers.Input(shape=(3,), name='input_b')
dense = tf.keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
interm_b = dense(input_b)
merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge')
output_c = tf.keras.layers.Dense(
3, activation='softmax', name='dense_2')(
merged)
output_d = tf.keras.layers.Dense(
2, activation='softmax', name='dense_3')(
merged)
model = tf.keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
input_data = [left_input_data, right_input_data]
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, input_data)
for tf_result, tflite_result in zip(expected_value, actual_value):
self.assertAllClose(tf_result, tflite_result, atol=1e-05)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a tf.Keras model has debug info captured."""
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = tf.keras.models.Sequential(
[tf.keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
@test_util.run_v2_only
def testKerasFallbackPath(self):
"""Test keras model which failed when exporting to the saved model."""
input_data = tf.constant(
np.array(np.random.random_sample((20)), dtype=np.float32))
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
# A None name will cause a failure in exporting to a saved model.
self.shared_weights = self.add_weight(
name=None,
shape=(20, 1),
dtype=tf.float32,
initializer=tf.random_normal_initializer(
mean=0.0, stddev=300**(-0.5)))
def call(self, x):
return tf.add(self.shared_weights, x)
# Building the model.
model = Model()
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(input_data, input_data, epochs=1)
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
class ControlFlowTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testCond(self):
input_data = {
'x': tf.constant([1., 2.], shape=[1, 2]),
'b': tf.constant(True)
}
weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)
def true_fn(x):
return tf.matmul(x, weights)
def false_fn(x):
return tf.add(x, weights)
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, 2], dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.bool)
])
def model(x, b):
return tf.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(**input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data['x'], input_data['b']])[0]
self.assertAllClose(expected_value, actual_value)
@test_util.run_v2_only
def testStaticRnn(self):
input_data = tf.constant(
np.array(np.random.random_sample((3, 10)), dtype=np.float32))
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(10)
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 10], dtype=tf.float32)])
def model(x):
seq = tf.split(x, 3, 0)
return tf.compat.v1.nn.static_rnn(
cell, seq, dtype=tf.float32, sequence_length=[1])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)[0]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
self.assertAllClose(expected, actual)
@test_util.run_v2_only
def testWhileLoop(self):
input_data = tf.constant([1., 2., 3., 4.], shape=[2, 2])
weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)
def condition(x):
return tf.reduce_sum(x) < 100
def body(x):
return tf.add(x, weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[2, 2], dtype=tf.float32)])
def model(x):
return tf.while_loop(condition, body, [x])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)[0]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
@test_util.run_v2_only
def testDynamicRnn(self):
input_data = tf.constant(
np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32))
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(10)
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 10, 10], dtype=tf.float32)])
def model(x):
return tf.compat.v1.nn.dynamic_rnn(cell, x, dtype=tf.float32)
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
if not isinstance(expected, ops.EagerTensor):
expected = expected.c
self.assertAllClose(expected, actual)
@parameterized.named_parameters(('LSTM', recurrent_v2.LSTM),
('SimpleRNN', recurrent.SimpleRNN),
('GRU', recurrent_v2.GRU))
@test_util.run_v2_only
def testKerasRNN(self, rnn_layer):
# This relies on TFLiteConverter to rewrite unknown batch size to 1. The
# model will fail if resizing the input to non-1 batch size.
input_data = tf.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
rnn_obj = rnn_layer(units=10, input_shape=(10, 10))
model = tf.keras.models.Sequential([
tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'),
rnn_obj,
])
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@parameterized.named_parameters(('LSTM', recurrent_v2.LSTM),
('SimpleRNN', recurrent.SimpleRNN),
('GRU', recurrent_v2.GRU))
@test_util.run_v2_only
def testKerasRNNMultiBatches(self, rnn_layer):
input_data = tf.constant(
np.array(np.random.random_sample((4, 10, 10)), dtype=np.float32))
# Specify a fixed batch size(4) for the test model.
x = tf.keras.layers.Input(batch_shape=(4, 10, 10))
y = rnn_layer(units=10, input_shape=(10, 10))(x)
model = tf.keras.Model(inputs=[x], outputs=[y])
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@test_util.run_v2_only
def testKerasBidirectionalRNN(self):
input_data = tf.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'))
model.add(
tf.keras.layers.Bidirectional(
recurrent_v2.LSTM(units=10, return_sequences=True),
input_shape=(10, 10)))
model.add(tf.keras.layers.Bidirectional(recurrent_v2.LSTM(units=10)))
model.add(tf.keras.layers.Dense(5))
model.add(tf.keras.layers.Activation('softmax'))
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
class GrapplerTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testConstantFolding(self):
# Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added.
input_data = tf.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.], shape=[3, 3])
@tf.function
def func(x):
y_const = tf.constant([1., 2., 3.])
y_broadcast = tf.broadcast_to(y_const, [3, 3])
return tf.matmul(x, y_broadcast)
root = tracking.AutoTrackable()
root.f = func
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
# Enable hybrid quantization, same result
converter.optimizations = [lite.Optimize.DEFAULT]
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
class UnknownShapes(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testMatMul(self):
input_data = tf.constant(
np.array(np.random.random_sample((10, 4)), dtype=np.float32))
@tf.function(
input_signature=[tf.TensorSpec(shape=[None, 4], dtype=tf.float32)])
def model(in_tensor):
shape = tf.shape(in_tensor)
fill = tf.transpose(tf.fill(shape, 1.))
return tf.matmul(fill, in_tensor)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data], input_shapes=[([-1, 4], [10, 4])])[0]
self.assertAllClose(expected_value, actual_value, atol=1e-06)
def _getIntegerQuantizeModelWithUnknownShapes(self):
np.random.seed(0)
@tf.function(
input_signature=[tf.TensorSpec(shape=[None, 33], dtype=tf.float32)])
def model(input_tensor):
"""Define a model with tf.MatMul and unknown shapes."""
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
const_tensor = tf.constant(
np.random.uniform(low=-10., high=10., size=[33, 33]),
shape=[33, 33],
dtype=tf.float32,
name='inputB')
shape = tf.shape(input_tensor)
fill = tf.transpose(tf.fill(shape, 1.))
mult = tf.matmul(fill, input_tensor)
return tf.matmul(mult, const_tensor)
root = tracking.AutoTrackable()
root.f = model
concrete_func = root.f.get_concrete_function()
def calibration_gen():
for batch in range(5, 20, 5):
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(batch, 33)).astype(np.float32)]
return concrete_func, calibration_gen
@test_util.run_v2_only
def testMatMulQuantize(self):
concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes()
float_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func])
float_tflite_model = float_converter.convert()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_tflite_model = quantized_converter.convert()
# The default input and output types should be float.
quantized_interpreter = Interpreter(model_content=quantized_tflite_model)
quantized_interpreter.allocate_tensors()
input_details = quantized_interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@test_util.run_v2_only
def testMatMulCalibrateAndQuantize(self):
concrete_func, calibration_gen = \
self._getIntegerQuantizeModelWithUnknownShapes()
float_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func])
float_tflite_model = float_converter.convert()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite_model = quantized_converter.convert()
# The default input and output types should be float.
quantized_interpreter = Interpreter(model_content=quantized_tflite_model)
quantized_interpreter.allocate_tensors()
input_details = quantized_interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
def testBatchMatMul(self):
input_data_1 = tf.constant(
np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))
input_data_2 = tf.constant(
np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32),
tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32)
])
def model(in_tensor_1, in_tensor_2):
return tf.matmul(in_tensor_1, in_tensor_2)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data_1, input_data_2)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data_1, input_data_2],
input_shapes=[([-1, 256, 256], [1, 256, 256])])[0]
self.assertAllClose(expected_value, actual_value, atol=4)
def testSizeInvalid(self):
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, None, 16, 3], dtype=tf.float32)
])
def model(in_tensor):
return in_tensor + in_tensor
concrete_func = model.get_concrete_function()
# Test invalid shape. None after 1st dimension. Run with TOCO in order to
# invoke shape checking code.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = False
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'in_tensor\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
if __name__ == '__main__':
test.main()
|
|
# pylint: disable=g-backslash-continuation
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from src.test.py.bazel import test_base
from src.test.py.bazel.bzlmod.test_utils import BazelRegistry
class BazelModuleTest(test_base.TestBase):
def setUp(self):
test_base.TestBase.setUp(self)
self.registries_work_dir = tempfile.mkdtemp(dir=self._test_cwd)
self.main_registry = BazelRegistry(
os.path.join(self.registries_work_dir, 'main'))
self.main_registry.createCcModule('A', '1.0') \
.createCcModule('A', '1.1') \
.createCcModule('B', '1.0', {'A': '1.0'}, {'A': 'com_foo_bar_a'}) \
.createCcModule('B', '1.1', {'A': '1.1'}) \
.createCcModule('C', '1.1', {'A': '1.1', 'B': '1.1'})
self.ScratchFile(
'.bazelrc',
[
# In ipv6 only network, this has to be enabled.
# 'startup --host_jvm_args=-Djava.net.preferIPv6Addresses=true',
'build --experimental_enable_bzlmod',
'build --registry=' + self.main_registry.getURL(),
'build --verbose_failures',
])
def writeMainProjectFiles(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('A.patch', [
'--- a/a.cc',
'+++ b/a.cc',
'@@ -1,6 +1,6 @@',
' #include <stdio.h>',
' #include "a.h"',
' void hello_a(const std::string& caller) {',
'- std::string lib_name = "[email protected]";',
'+ std::string lib_name = "[email protected] (locally patched)";',
' printf("%s => %s\\n", caller.c_str(), lib_name.c_str());',
' }',
])
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = [',
' "@A//:lib_a",',
' "@B//:lib_b",',
' ],',
')',
])
self.ScratchFile('main.cc', [
'#include "a.h"',
'#include "b.h"',
'int main() {',
' hello_a("main function");',
' hello_b("main function");',
'}',
])
def testSimple(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('MODULE.bazel', [
'bazel_dep(name = "A", version = "1.0")',
])
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["@A//:lib_a"],',
')',
])
self.ScratchFile('main.cc', [
'#include "a.h"',
'int main() {',
' hello_a("main function");',
'}',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected]', stdout)
def testSimpleTransitive(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('MODULE.bazel', [
'bazel_dep(name = "B", version = "1.0")',
])
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["@B//:lib_b"],',
')',
])
self.ScratchFile('main.cc', [
'#include "b.h"',
'int main() {',
' hello_b("main function");',
'}',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected]', stdout)
def testSimpleDiamond(self):
self.writeMainProjectFiles()
self.ScratchFile(
'MODULE.bazel',
[
'bazel_dep(name = "A", version = "1.1")',
# B1.0 has to depend on A1.1 after MVS.
'bazel_dep(name = "B", version = "1.0")',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected]', stdout)
def testSingleVersionOverrideWithPatch(self):
self.writeMainProjectFiles()
self.ScratchFile(
'MODULE.bazel',
[
'bazel_dep(name = "A", version = "1.1")',
'bazel_dep(name = "B", version = "1.1")',
# Both main and B1.1 has to depend on the locally patched A1.0.
'single_version_override(',
' module_name = "A",',
' version = "1.0",',
' patches = ["//:A.patch"],',
' patch_strip = 1,',
')',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected] (locally patched)', stdout)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected] (locally patched)', stdout)
def testRegistryOverride(self):
self.writeMainProjectFiles()
another_registry = BazelRegistry(
os.path.join(self.registries_work_dir, 'another'),
' from another registry')
another_registry.createCcModule('A', '1.0')
self.ScratchFile('MODULE.bazel', [
'bazel_dep(name = "A", version = "1.0")',
'bazel_dep(name = "B", version = "1.0")',
'single_version_override(',
' module_name = "A",',
' registry = "%s",' % another_registry.getURL(),
')',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected] from another registry', stdout)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected] from another registry', stdout)
def testArchiveOverride(self):
self.writeMainProjectFiles()
archive_a_1_0 = self.main_registry.archives.joinpath('A.1.0.zip')
self.ScratchFile('MODULE.bazel', [
'bazel_dep(name = "A", version = "1.1")',
'bazel_dep(name = "B", version = "1.1")',
'archive_override(',
' module_name = "A",',
' urls = ["%s"],' % archive_a_1_0.as_uri(),
' patches = ["//:A.patch"],',
' patch_strip = 1,',
')',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected] (locally patched)', stdout)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected] (locally patched)', stdout)
def testGitOverride(self):
self.writeMainProjectFiles()
src_a_1_0 = self.main_registry.projects.joinpath('A', '1.0')
self.RunProgram(['git', 'init'], cwd=src_a_1_0, allow_failure=False)
self.RunProgram(['git', 'config', 'user.name', 'tester'],
cwd=src_a_1_0,
allow_failure=False)
self.RunProgram(['git', 'config', 'user.email', '[email protected]'],
cwd=src_a_1_0,
allow_failure=False)
self.RunProgram(['git', 'add', './'], cwd=src_a_1_0, allow_failure=False)
self.RunProgram(['git', 'commit', '-m', 'Initial commit.'],
cwd=src_a_1_0,
allow_failure=False)
_, stdout, _ = self.RunProgram(['git', 'rev-parse', 'HEAD'],
cwd=src_a_1_0,
allow_failure=False)
commit = stdout[0].strip()
self.ScratchFile('MODULE.bazel', [
'bazel_dep(name = "A", version = "1.1")',
'bazel_dep(name = "B", version = "1.1")',
'git_override(',
' module_name = "A",',
' remote = "%s",' % src_a_1_0.as_uri(),
' commit = "%s",' % commit,
' patches = ["//:A.patch"],',
' patch_strip = 1,',
')',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected] (locally patched)', stdout)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected] (locally patched)', stdout)
def testLocalPathOverride(self):
src_a_1_0 = self.main_registry.projects.joinpath('A', '1.0')
self.writeMainProjectFiles()
self.ScratchFile('MODULE.bazel', [
'bazel_dep(name = "A", version = "1.1")',
'bazel_dep(name = "B", version = "1.1")',
'local_path_override(',
' module_name = "A",',
' path = "%s",' % str(src_a_1_0.resolve()).replace('\\', '/'),
')',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected]', stdout)
def testRemotePatchForBazelDep(self):
patch_file = self.ScratchFile('A.patch', [
'--- a/a.cc',
'+++ b/a.cc',
'@@ -1,6 +1,6 @@',
' #include <stdio.h>',
' #include "a.h"',
' void hello_a(const std::string& caller) {',
'- std::string lib_name = "[email protected]";',
'+ std::string lib_name = "[email protected] (remotely patched)";',
' printf("%s => %s\\n", caller.c_str(), lib_name.c_str());',
' }',
])
self.main_registry.createCcModule(
'A', '1.1-1', patches=[patch_file], patch_strip=1)
self.ScratchFile('WORKSPACE')
self.ScratchFile('MODULE.bazel', [
'bazel_dep(name = "A", version = "1.1-1")',
])
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["@A//:lib_a"],',
')',
])
self.ScratchFile('main.cc', [
'#include "a.h"',
'int main() {',
' hello_a("main function");',
'}',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected] (remotely patched)', stdout)
def testRepoNameForBazelDep(self):
self.writeMainProjectFiles()
self.ScratchFile(
'MODULE.bazel',
[
'bazel_dep(name = "A", version = "1.0", repo_name = "my_repo_a_name")',
# B should still be able to access A as com_foo_bar_a
'bazel_dep(name = "B", version = "1.0")',
])
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = [',
' "@my_repo_a_name//:lib_a",',
' "@B//:lib_b",',
' ],',
')',
])
_, stdout, _ = self.RunBazel(['run', '//:main'], allow_failure=False)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected]', stdout)
def testCheckDirectDependencies(self):
self.writeMainProjectFiles()
self.ScratchFile('MODULE.bazel', [
'bazel_dep(name = "A", version = "1.0")',
'bazel_dep(name = "B", version = "1.0")',
'bazel_dep(name = "C", version = "1.1")',
])
_, stdout, stderr = self.RunBazel(
['run', '//:main', '--check_direct_dependencies=warning'],
allow_failure=False)
self.assertIn(
'WARNING: For repository \'A\', the root module requires module version [email protected], but got [email protected] in the resolved dependency graph.',
stderr)
self.assertIn(
'WARNING: For repository \'B\', the root module requires module version [email protected], but got [email protected] in the resolved dependency graph.',
stderr)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('main function => [email protected]', stdout)
self.assertIn('[email protected] => [email protected]', stdout)
exit_code, _, stderr = self.RunBazel(
['run', '//:main', '--check_direct_dependencies=error'],
allow_failure=True)
self.AssertExitCode(exit_code, 48, stderr)
self.assertIn(
'ERROR: For repository \'A\', the root module requires module version [email protected], but got [email protected] in the resolved dependency graph.',
stderr)
self.assertIn(
'ERROR: For repository \'B\', the root module requires module version [email protected], but got [email protected] in the resolved dependency graph.',
stderr)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# Every python controller needs these lines
import roslib; roslib.load_manifest('practica_turtlebot')
import rospy
import time
import math
import numpy as np
# The velocity command message
from geometry_msgs.msg import Vector3, Twist, Quaternion, Pose, Point
# The laser scan message
from sensor_msgs.msg import LaserScan
# The odometry message
from nav_msgs.msg import Odometry
# We use a hyperbolic tangent as a transfer function
from math import tanh, radians, degrees
import math
import tf
def angle_wrap(a):
'''
Returns the angle a normalized between -pi and pi.
Works with numbers and numpy arrays.
'''
a = a % (2 * np.pi)
if (isinstance(a, int) or isinstance(a, float)) and (a > np.pi):
a -= 2 * np.pi
elif isinstance(a, np.ndarray): # arrays
a[a > np.pi] -= 2 * np.pi
return a
class Driver(object):
def __init__(self, rate=5):
# Create end_pose
self.end_pose = Pose()
self.end_pose.position.x = rospy.get_param('x',0)
rospy.loginfo('X: {0}'.format(self.end_pose.position.x ))
self.end_pose.position.y = rospy.get_param('y',0)
rospy.loginfo('Y: {0}'.format(self.end_pose.position.y))
self.current_pose = Pose()
self.rate = rate
self.obstacle = False
self.final_point = None
#VARIABLES
#this app use status that lets make a execution in realtime
self.status=0 #0: Moving towards goal. 1: front obstacble, robot turns 2: forward to avoid obstable 5: finished
#max error of precision of the goal accepted
self.accepted_error = 0.05
#distance (m) detection of obstacte when the robot stop and begin to turn
self.obstacle_threshold = 1
#distance (m) obstacle_threshold+obstacle_threshold_add when the robot detects that the robot i more far starts to go fowrward to avoid it
self.obstacle_threshold_add = 0.35
#distance (m) in status 2 that the robot move to try avoid the obstracle and go to the goal after turn in front of obstracle
self.distance_try_avoid_obstacle = 0.6
#######GO FOWARD#########
#this linear variable * distance to goal lets to make a variable velocity
self.linear_constant=0.5
#max speed of the robot, if is very high is possible that the robot don't have enought time for stop whith obstabce
self.max_speed=0.25
#######GO FOWARD DISTANCE#########
self.accepted_error_try_avoid_obstable = 0.1
#######TURN#########
#degrees/sec that turns the robot when detects obstable.
self.turn_speed = 25 #velocitat ideal si fa mes no dona temps al laserscan a actuar
####### head_toward_goal #########
#this linear velocity variable * distance to goal lets to make a variable linear velocity
self.head_toward_linear_constant=1
#this angular velocity variable * distance to goal lets to make a variable angular velocity
self.head_toward_angular_constant=2
# Subscriber for the encoder data
# When data of type LaserScal arrives from topic 'scan' call laser_callback function immediately
self.sub_scan = rospy.Subscriber('scan', LaserScan, self.laser_callback) # self.sub_scan.unregister()
self.sub_odom = rospy.Subscriber('odom', Odometry, self.odometry_callback) # self.sub_odom.unregister()
# Publisher for movement commands
# We publish data of type Twist in velocity topic
self.cmd_vel = rospy.Publisher("/mobile_base/commands/velocity", Twist)
# Let the world know we're ready
rospy.loginfo('Driver initialized')
rospy.loginfo('Starting bug_0 algorithm')
##################### NAVIGATION #####################
def bug0(self):
rospy.loginfo('status: {0}'.format( self.status))
if self.status==0: #Move towards goal
self.move_toward_goal()
elif self.status==1:
print "front obstacle we turn"
self.turn()
elif self.status==2:
print "we forward 0.4 meten and try if the object is here"
self.go_forward_X_distance()
##################### ORIENTATION #####################
def turn(self):
# rospy.loginfo('Turning robot, Speed: {0} degrees/sec'.format(turn_speed))
twist_turn = Twist()
# let's go forward at turn_speed degrees/sec
twist_turn.angular.z = radians(self.turn_speed)
# publish the command
self.cmd_vel.publish(twist_turn)
##################### MOVEMENT #####################
# Move the robot in the forward direction
#def go_forward(self):
# twist_forward = Twist()
# let's go forward at speed m/s
# twist_forward.linear.x = self.distance_to_goal()* self.linear_constant
# rospy.loginfo('Moving forward, Speed: {0}'.format(twist_forward.linear.x))
# publish the command
# self.cmd_vel.publish(twist_forward)
# Move the robot in the forward direction adding le current position the definet final_point to avoid obstable
def go_forward_X_distance(self):
distance = math.hypot(self.final_point.x - self.current_pose.position.x, self.final_point.y - self.current_pose.position.y)
#com que no estem del tot alienats hem de posar un treshold bastant gros perque arribi a l'objectiu
if (distance> self.accepted_error_try_avoid_obstable):
twist_forward = Twist()
# let's go forward at speed m/s
twist_forward.linear.x = distance* self.linear_constant
rospy.loginfo('Moving forward X distance, Speed: {0}'.format(twist_forward.linear.x))
# publish the command
self.cmd_vel.publish(twist_forward)
else:
self.status=0
##################### OBJECTIVE #####################
def move_toward_goal(self):
if not self.is_goal():
self.head_toward_goal()
else:
print "Finished"
self.status=5 #Finished
self.shutdown()
def distance_to_goal(self):
distance = math.hypot(self.end_pose.position.x - self.current_pose.position.x, self.end_pose.position.y - self.current_pose.position.y)
rospy.loginfo('Distance to goal: {0}'.format(distance))
return distance;
# Return true if the robot has reached the goal with the given accepted error. False otherwise.
def is_goal(self):
return (self.distance_to_goal() < self.accepted_error);
def degrees_to_goal(self):
# Desired angle
deltaX = self.end_pose.position.x - self.current_pose.position.x
deltaY = self.end_pose.position.y - self.current_pose.position.y
desired_angle_radians = math.atan2(deltaY, deltaX)
# Current angle
current_quat = self.current_pose.orientation
current_euler = tf.transformations.euler_from_quaternion([current_quat.x,current_quat.y,current_quat.z,current_quat.w])
current_position_theta = current_euler[2]
distance_radians = angle_wrap((desired_angle_radians) - (current_position_theta))
distance_degrees = degrees(distance_radians)
rospy.loginfo('Degrees to face goal theta = {0}'.format(distance_degrees))
# rospy.loginfo('Degrees to face goal = {0}'.format(self.degrees_to_goal_odom()))
return distance_degrees;
# Turn the robot facing the goal
def head_toward_goal(self):
twist_turn = Twist()
# let's go forward at speed m/s
#si estem molt mal posats la divisio sera molt alta i anira amb velocitat linieal lenta i rectificara l0angle, si no comencaria a donar voltes
degrees_to_goal = self.degrees_to_goal()
if degrees_to_goal:
twist_turn.linear.x = min(self.distance_to_goal()*self.head_toward_linear_constant/abs(self.degrees_to_goal()), self.max_speed)
else:
twist_turn.linear.x = self.max_speed
rospy.loginfo('Moving forward, Speed: {0}'.format(twist_turn.linear.x))
# let's go forward at turn_speed degrees/sec
twist_turn.angular.z = radians(self.degrees_to_goal()*self.head_toward_angular_constant)
rospy.loginfo('Turning to goal, Speed: {0}'.format(twist_turn.angular.z))
# publish the command
self.cmd_vel.publish(twist_turn)
##################### ROS CALLBACKS #####################
# Laser returns NaN if objects is too far or too near. We must take care!
def laser_callback(self, scan):
closest = np.nanmin(scan.ranges)
rospy.loginfo('Closest range is: {0}'.format(closest))
if np.isnan(closest): # All values returned by scan.ranges are NANs
closest=999 #when closest is nan = very fast, is not possible too near because the robot turn before
rospy.loginfo('REAL Closest range is: {0}'.format(closest))
self.obstacle = self.obstacle_threshold >= closest
if (self.status==0 or self.status==2) and self.obstacle:
self.status=1 #turn object
if self.status==1 and (self.obstacle_threshold + self.obstacle_threshold_add) < closest :
self.status=2 #Following object
#Compute next point to follow object
current_quat=self.current_pose.orientation
current_euler = tf.transformations.euler_from_quaternion([current_quat.x,current_quat.y,current_quat.z,current_quat.w])
current_position_theta = current_euler[2]
self.final_point= Point();
self.final_point.x=self.current_pose.position.x+ self.distance_try_avoid_obstacle * (np.cos(current_position_theta))
self.final_point.y=self.current_pose.position.y+ self.distance_try_avoid_obstacle * (np.sin(current_position_theta))
def odometry_callback(self, odom):
self.current_pose = odom.pose.pose
##################### ROS SYSTEM #####################
def stop(self):
rospy.loginfo('Stopping TurtleBot')
twist_stop = Twist()
self.cmd_vel.publish(twist_stop)
def shutdown(self):
rospy.loginfo("Shutting down TurtleBot")
self.stop()
rospy.sleep(1)
rospy.signal_shutdown("Shutdown function has been called")
|
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import servicetype_db as st_db
import sqlalchemy as sa
from sqlalchemy.ext import orderinglist
from sqlalchemy import orm
from neutron_lbaas.services.loadbalancer import constants as lb_const
class SessionPersistenceV2(model_base.BASEV2):
__tablename__ = "lbaas_sessionpersistences"
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_pools.id"),
primary_key=True,
nullable=False)
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_SP_TYPES,
name="lbaas_sesssionpersistences_typev2"),
nullable=False)
cookie_name = sa.Column(sa.String(1024), nullable=True)
class LoadBalancerStatistics(model_base.BASEV2):
"""Represents load balancer statistics."""
NAME = 'loadbalancer_stats'
__tablename__ = "lbaas_loadbalancer_statistics"
loadbalancer_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_loadbalancers.id"),
primary_key=True,
nullable=False)
bytes_in = sa.Column(sa.BigInteger, nullable=False)
bytes_out = sa.Column(sa.BigInteger, nullable=False)
active_connections = sa.Column(sa.BigInteger, nullable=False)
total_connections = sa.Column(sa.BigInteger, nullable=False)
@orm.validates('bytes_in', 'bytes_out',
'active_connections', 'total_connections')
def validate_non_negative_int(self, key, value):
if value < 0:
data = {'key': key, 'value': value}
raise ValueError(_('The %(key)s field can not have '
'negative value. '
'Current value is %(value)d.') % data)
return value
class MemberV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer member."""
NAME = 'member'
__tablename__ = "lbaas_members"
__table_args__ = (
sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
name='uniq_pool_address_port_v2'),
)
pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
nullable=False)
address = sa.Column(sa.String(64), nullable=False)
protocol_port = sa.Column(sa.Integer, nullable=False)
weight = sa.Column(sa.Integer, nullable=True)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), nullable=True)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
@property
def root_loadbalancer(self):
return self.pool.listener.loadbalancer
class HealthMonitorV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer healthmonitor."""
NAME = 'healthmonitor'
__tablename__ = "lbaas_healthmonitors"
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_HEALTH_MONITOR_TYPES,
name="healthmonitors_typev2"),
nullable=False)
delay = sa.Column(sa.Integer, nullable=False)
timeout = sa.Column(sa.Integer, nullable=False)
max_retries = sa.Column(sa.Integer, nullable=False)
http_method = sa.Column(sa.String(16), nullable=True)
url_path = sa.Column(sa.String(255), nullable=True)
expected_codes = sa.Column(sa.String(64), nullable=True)
provisioning_status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
@property
def root_loadbalancer(self):
return self.pool.listener.loadbalancer
class PoolV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer pool."""
NAME = 'pool'
__tablename__ = "lbaas_pools"
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
healthmonitor_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_healthmonitors.id"),
unique=True,
nullable=True)
protocol = sa.Column(sa.Enum(*lb_const.POOL_SUPPORTED_PROTOCOLS,
name="pool_protocolsv2"),
nullable=False)
lb_algorithm = sa.Column(sa.Enum(*lb_const.SUPPORTED_LB_ALGORITHMS,
name="lb_algorithmsv2"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
members = orm.relationship(MemberV2,
backref=orm.backref("pool", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
healthmonitor = orm.relationship(
HealthMonitorV2,
backref=orm.backref("pool", uselist=False),
lazy='joined')
sessionpersistence = orm.relationship(
SessionPersistenceV2,
uselist=False,
backref=orm.backref("pool", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
@property
def root_loadbalancer(self):
return self.listener.loadbalancer
class LoadBalancer(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer."""
NAME = 'loadbalancer'
__tablename__ = "lbaas_loadbalancers"
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
vip_subnet_id = sa.Column(sa.String(36), nullable=False)
vip_port_id = sa.Column(sa.String(36), sa.ForeignKey(
'ports.id', name='fk_lbaas_loadbalancers_ports_id'))
vip_address = sa.Column(sa.String(36))
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vip_port = orm.relationship(models_v2.Port)
stats = orm.relationship(
LoadBalancerStatistics,
uselist=False,
backref=orm.backref("loadbalancer", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
provider = orm.relationship(
st_db.ProviderResourceAssociation,
uselist=False,
lazy="joined",
primaryjoin="LoadBalancer.id==ProviderResourceAssociation.resource_id",
foreign_keys=[st_db.ProviderResourceAssociation.resource_id],
# this is only for old API backwards compatibility because when a load
# balancer is deleted the pool ID should be the same as the load
# balancer ID and should not be cleared out in this table
viewonly=True
)
@property
def root_loadbalancer(self):
return self
class SNI(model_base.BASEV2):
"""Many-to-many association between Listener and TLS container ids
Making the SNI certificates list, ordered using the position
"""
NAME = 'sni'
__tablename__ = "lbaas_sni"
listener_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_listeners.id"),
primary_key=True,
nullable=False)
tls_container_id = sa.Column(sa.String(36),
primary_key=True,
nullable=False)
position = sa.Column(sa.Integer)
@property
def root_loadbalancer(self):
return self.listener.loadbalancer
class Listener(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron listener."""
NAME = 'listener'
__tablename__ = "lbaas_listeners"
__table_args__ = (
sa.schema.UniqueConstraint('loadbalancer_id', 'protocol_port',
name='uniq_loadbalancer_listener_port'),
)
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
default_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
unique=True)
loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey(
"lbaas_loadbalancers.id"))
protocol = sa.Column(sa.Enum(*lb_const.LISTENER_SUPPORTED_PROTOCOLS,
name="listener_protocolsv2"),
nullable=False)
default_tls_container_id = sa.Column(sa.String(36),
default=None, nullable=True)
sni_containers = orm.relationship(
SNI,
backref=orm.backref("listener", uselist=False),
uselist=True,
lazy="joined",
primaryjoin="Listener.id==SNI.listener_id",
order_by='SNI.position',
collection_class=orderinglist.ordering_list(
'position'),
foreign_keys=[SNI.listener_id],
cascade="all, delete-orphan"
)
protocol_port = sa.Column(sa.Integer, nullable=False)
connection_limit = sa.Column(sa.Integer)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
default_pool = orm.relationship(
PoolV2, backref=orm.backref("listener", uselist=False), lazy='joined')
loadbalancer = orm.relationship(
LoadBalancer, backref=orm.backref("listeners"), lazy='joined')
@property
def root_loadbalancer(self):
return self.loadbalancer
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import email.utils
import re
import time
import jsonschema
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import six
from six.moves import urllib
from tempest.lib.common import http
from tempest.lib.common import jsonschema_validator
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
# redrive rate limited calls at most twice
MAX_RECURSION_DEPTH = 2
# All the successful HTTP status codes from RFC 7231 & 4918
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206, 207)
# All the redirection HTTP status codes from RFC 7231 & 4918
HTTP_REDIRECTION = (300, 301, 302, 303, 304, 305, 306, 307)
# JSON Schema validator and format checker used for JSON Schema validation
JSONSCHEMA_VALIDATOR = jsonschema_validator.JSONSCHEMA_VALIDATOR
FORMAT_CHECKER = jsonschema_validator.FORMAT_CHECKER
class RestClient(object):
"""Unified OpenStack RestClient class
This class is used for building openstack api clients on top of. It is
intended to provide a base layer for wrapping outgoing http requests in
keystone auth as well as providing response code checking and error
handling.
:param auth_provider: an auth provider object used to wrap requests in auth
:param str service: The service name to use for the catalog lookup
:param str region: The region to use for the catalog lookup
:param str name: The endpoint name to use for the catalog lookup; this
returns only if the service exists
:param str endpoint_type: The endpoint type to use for the catalog lookup
:param int build_interval: Time in seconds between to status checks in
wait loops
:param int build_timeout: Timeout in seconds to wait for a wait operation.
:param bool disable_ssl_certificate_validation: Set to true to disable ssl
certificate validation
:param str ca_certs: File containing the CA Bundle to use in verifying a
TLS server cert
:param str trace_requests: Regex to use for specifying logging the entirety
of the request and response payload
:param str http_timeout: Timeout in seconds to wait for the http request to
return
:param str proxy_url: http proxy url to use.
"""
# The version of the API this client implements
api_version = None
LOG = logging.getLogger(__name__)
def __init__(self, auth_provider, service, region,
endpoint_type='publicURL',
build_interval=1, build_timeout=60,
disable_ssl_certificate_validation=False, ca_certs=None,
trace_requests='', name=None, http_timeout=None,
proxy_url=None):
self.auth_provider = auth_provider
self.service = service
self.region = region
self.name = name
self.endpoint_type = endpoint_type
self.build_interval = build_interval
self.build_timeout = build_timeout
self.trace_requests = trace_requests
self._skip_path = False
self.general_header_lc = set(('cache-control', 'connection',
'date', 'pragma', 'trailer',
'transfer-encoding', 'via',
'warning'))
self.response_header_lc = set(('accept-ranges', 'age', 'etag',
'location', 'proxy-authenticate',
'retry-after', 'server',
'vary', 'www-authenticate'))
dscv = disable_ssl_certificate_validation
if proxy_url:
self.http_obj = http.ClosingProxyHttp(
proxy_url,
disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
timeout=http_timeout)
else:
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
timeout=http_timeout)
def get_headers(self, accept_type=None, send_type=None):
"""Return the default headers which will be used with outgoing requests
:param str accept_type: The media type to use for the Accept header, if
one isn't provided the object var TYPE will be
used
:param str send_type: The media-type to use for the Content-Type
header, if one isn't provided the object var
TYPE will be used
:rtype: dict
:return: The dictionary of headers which can be used in the headers
dict for outgoing request
"""
if accept_type is None:
accept_type = 'json'
if send_type is None:
send_type = 'json'
return {'Content-Type': 'application/%s' % send_type,
'Accept': 'application/%s' % accept_type}
def __str__(self):
STRING_LIMIT = 80
str_format = ("service:%s, base_url:%s, "
"filters: %s, build_interval:%s, build_timeout:%s"
"\ntoken:%s..., \nheaders:%s...")
return str_format % (self.service, self.base_url,
self.filters, self.build_interval,
self.build_timeout,
str(self.token)[0:STRING_LIMIT],
str(self.get_headers())[0:STRING_LIMIT])
@property
def user(self):
"""The username used for requests
:rtype: string
:return: The username being used for requests
"""
return self.auth_provider.credentials.username
@property
def user_id(self):
"""The user_id used for requests
:rtype: string
:return: The user id being used for requests
"""
return self.auth_provider.credentials.user_id
@property
def tenant_name(self):
"""The tenant/project being used for requests
:rtype: string
:return: The tenant/project name being used for requests
"""
return self.auth_provider.credentials.tenant_name
@property
def tenant_id(self):
"""The tenant/project id being used for requests
:rtype: string
:return: The tenant/project id being used for requests
"""
return self.auth_provider.credentials.tenant_id
@property
def password(self):
"""The password being used for requests
:rtype: string
:return: The password being used for requests
"""
return self.auth_provider.credentials.password
@property
def base_url(self):
return self.auth_provider.base_url(filters=self.filters)
@property
def token(self):
return self.auth_provider.get_token()
@property
def filters(self):
_filters = dict(
service=self.service,
endpoint_type=self.endpoint_type,
region=self.region,
name=self.name
)
if self.api_version is not None:
_filters['api_version'] = self.api_version
if self._skip_path:
_filters['skip_path'] = self._skip_path
return _filters
def skip_path(self):
"""When set, ignore the path part of the base URL from the catalog"""
self._skip_path = True
def reset_path(self):
"""When reset, use the base URL from the catalog as-is"""
self._skip_path = False
@classmethod
def expected_success(cls, expected_code, read_code):
"""Check expected success response code against the http response
:param int expected_code: The response code that is expected.
Optionally a list of integers can be used
to specify multiple valid success codes
:param int read_code: The response code which was returned in the
response
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises exceptions.InvalidHttpSuccessCode: if the read code isn't an
expected http success code
"""
if not isinstance(read_code, int):
raise TypeError("'read_code' must be an int instead of (%s)"
% type(read_code))
assert_msg = ("This function only allowed to use for HTTP status "
"codes which explicitly defined in the RFC 7231 & 4918. "
"{0} is not a defined Success Code!"
).format(expected_code)
if isinstance(expected_code, list):
for code in expected_code:
assert code in HTTP_SUCCESS + HTTP_REDIRECTION, assert_msg
else:
assert expected_code in HTTP_SUCCESS + HTTP_REDIRECTION, assert_msg
# NOTE(afazekas): the http status code above 400 is processed by
# the _error_checker method
if read_code < 400:
pattern = ("Unexpected http success status code {0}, "
"The expected status code is {1}")
if ((not isinstance(expected_code, list) and
(read_code != expected_code)) or
(isinstance(expected_code, list) and
(read_code not in expected_code))):
details = pattern.format(read_code, expected_code)
raise exceptions.InvalidHttpSuccessCode(details)
def post(self, url, body, headers=None, extra_headers=False,
chunked=False):
"""Send a HTTP POST request using keystone auth
:param str url: the relative url to send the post request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
be used but additional headers are needed in
the request pass them in as a dict.
:param bool chunked: sends the body with chunked encoding
:return: a tuple with the first entry containing the response headers
and the second the response body
:rtype: tuple
"""
return self.request('POST', url, extra_headers, headers, body, chunked)
def get(self, url, headers=None, extra_headers=False):
"""Send a HTTP GET request using keystone service catalog and auth
:param str url: the relative url to send the post request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
be used but additional headers are needed in
the request pass them in as a dict.
:return: a tuple with the first entry containing the response headers
and the second the response body
:rtype: tuple
"""
return self.request('GET', url, extra_headers, headers)
def delete(self, url, headers=None, body=None, extra_headers=False):
"""Send a HTTP DELETE request using keystone service catalog and auth
:param str url: the relative url to send the post request to
:param dict headers: The headers to use for the request
:param dict body: the request body
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
be used but additional headers are needed in
the request pass them in as a dict.
:return: a tuple with the first entry containing the response headers
and the second the response body
:rtype: tuple
"""
return self.request('DELETE', url, extra_headers, headers, body)
def patch(self, url, body, headers=None, extra_headers=False):
"""Send a HTTP PATCH request using keystone service catalog and auth
:param str url: the relative url to send the post request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
be used but additional headers are needed in
the request pass them in as a dict.
:return: a tuple with the first entry containing the response headers
and the second the response body
:rtype: tuple
"""
return self.request('PATCH', url, extra_headers, headers, body)
def put(self, url, body, headers=None, extra_headers=False, chunked=False):
"""Send a HTTP PUT request using keystone service catalog and auth
:param str url: the relative url to send the post request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
be used but additional headers are needed in
the request pass them in as a dict.
:param bool chunked: sends the body with chunked encoding
:return: a tuple with the first entry containing the response headers
and the second the response body
:rtype: tuple
"""
return self.request('PUT', url, extra_headers, headers, body, chunked)
def head(self, url, headers=None, extra_headers=False):
"""Send a HTTP HEAD request using keystone service catalog and auth
:param str url: the relative url to send the post request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
be used but additional headers are needed in
the request pass them in as a dict.
:return: a tuple with the first entry containing the response headers
and the second the response body
:rtype: tuple
"""
return self.request('HEAD', url, extra_headers, headers)
def copy(self, url, headers=None, extra_headers=False):
"""Send a HTTP COPY request using keystone service catalog and auth
:param str url: the relative url to send the post request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
be used but additional headers are needed in
the request pass them in as a dict.
:return: a tuple with the first entry containing the response headers
and the second the response body
:rtype: tuple
"""
return self.request('COPY', url, extra_headers, headers)
def get_versions(self):
"""Get the versions on a endpoint from the keystone catalog
This method will make a GET request on the baseurl from the keystone
catalog to return a list of API versions. It is expected that a GET
on the endpoint in the catalog will return a list of supported API
versions.
:return: tuple with response headers and list of version numbers
:rtype: tuple
"""
resp, body = self.get('')
body = self._parse_resp(body)
versions = map(lambda x: x['id'], body)
return resp, versions
def _get_request_id(self, resp):
for i in ('x-openstack-request-id', 'x-compute-request-id'):
if i in resp:
return resp[i]
return ""
def _safe_body(self, body, maxlen=4096):
# convert a structure into a string safely
try:
text = six.text_type(body)
except UnicodeDecodeError:
# if this isn't actually text, return marker that
return "<BinaryData: removed>"
if len(text) > maxlen:
return text[:maxlen]
else:
return text
def _log_request_start(self, method, req_url):
caller_name = test_utils.find_test_caller()
if self.trace_requests and re.search(self.trace_requests, caller_name):
self.LOG.debug('Starting Request (%s): %s %s', caller_name,
method, req_url)
def _log_request_full(self, resp, req_headers=None, req_body=None,
resp_body=None, extra=None):
if 'X-Auth-Token' in req_headers:
req_headers['X-Auth-Token'] = '<omitted>'
# A shallow copy is sufficient
resp_log = resp.copy()
if 'x-subject-token' in resp_log:
resp_log['x-subject-token'] = '<omitted>'
log_fmt = """Request - Headers: %s
Body: %s
Response - Headers: %s
Body: %s"""
self.LOG.debug(
log_fmt,
str(req_headers),
self._safe_body(req_body),
str(resp_log),
self._safe_body(resp_body),
extra=extra)
def _log_request(self, method, req_url, resp,
secs="", req_headers=None,
req_body=None, resp_body=None):
if req_headers is None:
req_headers = {}
# if we have the request id, put it in the right part of the log
extra = dict(request_id=self._get_request_id(resp))
# NOTE(sdague): while we still have 6 callers to this function
# we're going to just provide work around on who is actually
# providing timings by gracefully adding no content if they don't.
# Once we're down to 1 caller, clean this up.
caller_name = test_utils.find_test_caller()
if secs:
secs = " %.3fs" % secs
self.LOG.info(
'Request (%s): %s %s %s%s',
caller_name,
resp['status'],
method,
req_url,
secs,
extra=extra)
# Also look everything at DEBUG if you want to filter this
# out, don't run at debug.
if self.LOG.isEnabledFor(logging.DEBUG):
self._log_request_full(resp, req_headers, req_body,
resp_body, extra)
def _parse_resp(self, body):
try:
body = json.loads(body)
except ValueError:
return body
# We assume, that if the first value of the deserialized body's
# item set is a dict or a list, that we just return the first value
# of deserialized body.
# Essentially "cutting out" the first placeholder element in a body
# that looks like this:
#
# {
# "users": [
# ...
# ]
# }
try:
# Ensure there are not more than one top-level keys
# NOTE(freerunner): Ensure, that JSON is not nullable to
# to prevent StopIteration Exception
if not hasattr(body, "keys") or len(body.keys()) != 1:
return body
# Just return the "wrapped" element
first_key, first_item = six.next(six.iteritems(body))
if isinstance(first_item, (dict, list)):
return first_item
except (ValueError, IndexError):
pass
return body
def response_checker(self, method, resp, resp_body):
"""A sanity check on the response from a HTTP request
This method does a sanity check on whether the response from an HTTP
request conforms the HTTP RFC.
:param str method: The HTTP verb of the request associated with the
response being passed in.
:param resp: The response headers
:param resp_body: The body of the response
:raises ResponseWithNonEmptyBody: If the response with the status code
is not supposed to have a body
:raises ResponseWithEntity: If the response code is 205 but has an
entity
"""
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
# NOTE(afazekas):
# If the HTTP Status Code is 205
# 'The response MUST NOT include an entity.'
# A HTTP entity has an entity-body and an 'entity-header'.
# In the HTTP response specification (Section 6) the 'entity-header'
# 'generic-header' and 'response-header' are in OR relation.
# All headers not in the above two group are considered as entity
# header in every interpretation.
if (resp.status == 205 and
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
raise exceptions.ResponseWithEntity()
# NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
# for swift.
# Usually RFC2616 says error responses SHOULD contain an explanation.
# The warning is normal for SHOULD/SHOULD NOT case
# Likely it will cause an error
if method != 'HEAD' and not resp_body and resp.status >= 400:
self.LOG.warning("status >= 400 response with empty body")
def _request(self, method, url, headers=None, body=None, chunked=False):
"""A simple HTTP request interface."""
# Authenticate the request with the auth provider
req_url, req_headers, req_body = self.auth_provider.auth_request(
method, url, headers, body, self.filters)
# Do the actual request, and time it
start = time.time()
self._log_request_start(method, req_url)
resp, resp_body = self.raw_request(
req_url, method, headers=req_headers, body=req_body,
chunked=chunked
)
end = time.time()
self._log_request(method, req_url, resp, secs=(end - start),
req_headers=req_headers, req_body=req_body,
resp_body=resp_body)
# Verify HTTP response codes
self.response_checker(method, resp, resp_body)
return resp, resp_body
def raw_request(self, url, method, headers=None, body=None, chunked=False):
"""Send a raw HTTP request without the keystone catalog or auth
This method sends a HTTP request in the same manner as the request()
method, however it does so without using keystone auth or the catalog
to determine the base url. Additionally no response handling is done
the results from the request are just returned.
:param str url: Full url to send the request
:param str method: The HTTP verb to use for the request
:param str headers: Headers to use for the request if none are specifed
the headers
:param str body: Body to send with the request
:param bool chunked: sends the body with chunked encoding
:rtype: tuple
:return: a tuple with the first entry containing the response headers
and the second the response body
"""
if headers is None:
headers = self.get_headers()
return self.http_obj.request(url, method, headers=headers,
body=body, chunked=chunked)
def request(self, method, url, extra_headers=False, headers=None,
body=None, chunked=False):
"""Send a HTTP request with keystone auth and using the catalog
This method will send an HTTP request using keystone auth in the
headers and the catalog to determine the endpoint to use for the
baseurl to send the request to. Additionally
When a response is received it will check it to see if an error
response was received. If it was an exception will be raised to enable
it to be handled quickly.
This method will also handle rate-limiting, if a 413 response code is
received it will retry the request after waiting the 'retry-after'
duration from the header.
:param str method: The HTTP verb to use for the request
:param str url: Relative url to send the request to
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
be used but additional headers are needed in
the request pass them in as a dict.
:param dict headers: Headers to use for the request if none are
specifed the headers returned from the
get_headers() method are used. If the request
explicitly requires no headers use an empty dict.
:param str body: Body to send with the request
:param bool chunked: sends the body with chunked encoding
:rtype: tuple
:return: a tuple with the first entry containing the response headers
and the second the response body
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises Unauthorized: If a 401 response code is received
:raises Forbidden: If a 403 response code is received
:raises NotFound: If a 404 response code is received
:raises BadRequest: If a 400 response code is received
:raises Gone: If a 410 response code is received
:raises Conflict: If a 409 response code is received
:raises PreconditionFailed: If a 412 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises InvalidContentType: If a 415 response code is received
:raises UnprocessableEntity: If a 422 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
and couldn't be parsed
:raises NotImplemented: If a 501 response code is received
:raises ServerFault: If a 500 response code is received
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
"""
# if extra_headers is True
# default headers would be added to headers
retry = 0
if headers is None:
# NOTE(vponomaryov): if some client do not need headers,
# it should explicitly pass empty dict
headers = self.get_headers()
elif extra_headers:
try:
headers.update(self.get_headers())
except (ValueError, TypeError):
headers = self.get_headers()
resp, resp_body = self._request(method, url, headers=headers,
body=body, chunked=chunked)
while (resp.status == 413 and
'retry-after' in resp and
not self.is_absolute_limit(
resp, self._parse_resp(resp_body)) and
retry < MAX_RECURSION_DEPTH):
retry += 1
delay = self._get_retry_after_delay(resp)
self.LOG.debug(
"Sleeping %s seconds based on retry-after header", delay
)
time.sleep(delay)
resp, resp_body = self._request(method, url,
headers=headers, body=body)
self._error_checker(resp, resp_body)
return resp, resp_body
def _get_retry_after_delay(self, resp):
"""Extract the delay from the retry-after header.
This supports both integer and HTTP date formatted retry-after headers
per RFC 2616.
:param resp: The response containing the retry-after headers
:rtype: int
:return: The delay in seconds, clamped to be at least 1 second
:raises ValueError: On failing to parse the delay
"""
delay = None
try:
delay = int(resp['retry-after'])
except (ValueError, KeyError):
pass
try:
retry_timestamp = self._parse_http_date(resp['retry-after'])
date_timestamp = self._parse_http_date(resp['date'])
delay = int(retry_timestamp - date_timestamp)
except (ValueError, OverflowError, KeyError):
pass
if delay is None:
raise ValueError(
"Failed to parse retry-after header %r as either int or "
"HTTP-date." % resp.get('retry-after')
)
# Retry-after headers do not have sub-second precision. Clients may
# receive a delay of 0. After sleeping 0 seconds, we would (likely) hit
# another 413. To avoid this, always sleep at least 1 second.
return max(1, delay)
def _parse_http_date(self, val):
"""Parse an HTTP date, like 'Fri, 31 Dec 1999 23:59:59 GMT'.
Return an epoch timestamp (float), as returned by time.mktime().
"""
parts = email.utils.parsedate(val)
if not parts:
raise ValueError("Failed to parse date %s" % val)
return time.mktime(parts)
def _error_checker(self, resp, resp_body):
# NOTE(mtreinish): Check for httplib response from glance_http. The
# object can't be used here because importing httplib breaks httplib2.
# If another object from a class not imported were passed here as
# resp this could possibly fail
if str(type(resp)) == "<type 'instance'>":
ctype = resp.getheader('content-type')
else:
try:
ctype = resp['content-type']
# NOTE(mtreinish): Keystone delete user responses doesn't have a
# content-type header. (They don't have a body) So just pretend it
# is set.
except KeyError:
ctype = 'application/json'
# It is not an error response
if resp.status < 400:
return
# NOTE(zhipengh): There is a purposefully duplicate of content-type
# with the only difference is with or without spaces, as specified
# in RFC7231.
JSON_ENC = ['application/json', 'application/json; charset=utf-8',
'application/json;charset=utf-8']
# NOTE(mtreinish): This is for compatibility with Glance and swift
# APIs. These are the return content types that Glance api v1
# (and occasionally swift) are using.
# NOTE(zhipengh): There is a purposefully duplicate of content-type
# with the only difference is with or without spaces, as specified
# in RFC7231.
TXT_ENC = ['text/plain', 'text/html', 'text/html; charset=utf-8',
'text/plain; charset=utf-8', 'text/html;charset=utf-8',
'text/plain;charset=utf-8']
if ctype.lower() in JSON_ENC:
parse_resp = True
elif ctype.lower() in TXT_ENC:
parse_resp = False
else:
raise exceptions.UnexpectedContentType(str(resp.status),
resp=resp)
if resp.status == 401:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.Unauthorized(resp_body, resp=resp)
if resp.status == 403:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.Forbidden(resp_body, resp=resp)
if resp.status == 404:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.NotFound(resp_body, resp=resp)
if resp.status == 400:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.BadRequest(resp_body, resp=resp)
if resp.status == 410:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.Gone(resp_body, resp=resp)
if resp.status == 409:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.Conflict(resp_body, resp=resp)
if resp.status == 412:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.PreconditionFailed(resp_body, resp=resp)
if resp.status == 413:
if parse_resp:
resp_body = self._parse_resp(resp_body)
if self.is_absolute_limit(resp, resp_body):
raise exceptions.OverLimit(resp_body, resp=resp)
else:
raise exceptions.RateLimitExceeded(resp_body, resp=resp)
if resp.status == 415:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.InvalidContentType(resp_body, resp=resp)
if resp.status == 422:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.UnprocessableEntity(resp_body, resp=resp)
if resp.status in (500, 501):
message = resp_body
if parse_resp:
try:
resp_body = self._parse_resp(resp_body)
except ValueError:
# If response body is a non-json string message.
# Use resp_body as is and raise InvalidResponseBody
# exception.
raise exceptions.InvalidHTTPResponseBody(message)
else:
if isinstance(resp_body, dict):
# I'm seeing both computeFault
# and cloudServersFault come back.
# Will file a bug to fix, but leave as is for now.
if 'cloudServersFault' in resp_body:
message = resp_body['cloudServersFault']['message']
elif 'computeFault' in resp_body:
message = resp_body['computeFault']['message']
elif 'error' in resp_body:
message = resp_body['error']['message']
elif 'message' in resp_body:
message = resp_body['message']
else:
message = resp_body
if resp.status == 501:
raise exceptions.NotImplemented(resp_body, resp=resp,
message=message)
else:
raise exceptions.ServerFault(resp_body, resp=resp,
message=message)
if resp.status >= 400:
raise exceptions.UnexpectedResponseCode(str(resp.status),
resp=resp)
def is_absolute_limit(self, resp, resp_body):
if (not isinstance(resp_body, collections.Mapping) or
'retry-after' not in resp):
return True
return 'exceed' in resp_body.get('message', 'blabla')
def wait_for_resource_deletion(self, id):
"""Waits for a resource to be deleted
This method will loop over is_resource_deleted until either
is_resource_deleted returns True or the build timeout is reached. This
depends on is_resource_deleted being implemented
:param str id: The id of the resource to check
:raises TimeoutException: If the build_timeout has elapsed and the
resource still hasn't been deleted
"""
start_time = int(time.time())
while True:
if self.is_resource_deleted(id):
return
if int(time.time()) - start_time >= self.build_timeout:
message = ('Failed to delete %(resource_type)s %(id)s within '
'the required time (%(timeout)s s).' %
{'resource_type': self.resource_type, 'id': id,
'timeout': self.build_timeout})
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def is_resource_deleted(self, id):
"""Subclasses override with specific deletion detection."""
message = ('"%s" does not implement is_resource_deleted'
% self.__class__.__name__)
raise NotImplementedError(message)
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'resource'
@classmethod
def validate_response(cls, schema, resp, body):
# Only check the response if the status code is a success code
# TODO(cyeoh): Eventually we should be able to verify that a failure
# code if it exists is something that we expect. This is explicitly
# declared in the V3 API and so we should be able to export this in
# the response schema. For now we'll ignore it.
if resp.status in HTTP_SUCCESS + HTTP_REDIRECTION:
cls.expected_success(schema['status_code'], resp.status)
# Check the body of a response
body_schema = schema.get('response_body')
if body_schema:
try:
jsonschema.validate(body, body_schema,
cls=JSONSCHEMA_VALIDATOR,
format_checker=FORMAT_CHECKER)
except jsonschema.ValidationError as ex:
msg = ("HTTP response body is invalid (%s)" % ex)
raise exceptions.InvalidHTTPResponseBody(msg)
else:
if body:
msg = ("HTTP response body should not exist (%s)" % body)
raise exceptions.InvalidHTTPResponseBody(msg)
# Check the header of a response
header_schema = schema.get('response_header')
if header_schema:
try:
jsonschema.validate(resp, header_schema,
cls=JSONSCHEMA_VALIDATOR,
format_checker=FORMAT_CHECKER)
except jsonschema.ValidationError as ex:
msg = ("HTTP response header is invalid (%s)" % ex)
raise exceptions.InvalidHTTPResponseHeader(msg)
def _get_base_version_url(self):
# TODO(oomichi): This method can be used for auth's replace_version().
# So it is nice to have common logic for the maintenance.
endpoint = self.base_url
url = urllib.parse.urlsplit(endpoint)
new_path = re.split(r'(^|/)+v\d+(\.\d+)?', url.path)[0]
url = list(url)
url[2] = new_path + '/'
return urllib.parse.urlunsplit(url)
class ResponseBody(dict):
"""Class that wraps an http response and dict body into a single value.
Callers that receive this object will normally use it as a dict but
can extract the response if needed.
"""
def __init__(self, response, body=None):
body_data = body or {}
self.update(body_data)
self.response = response
def __str__(self):
body = super(ResponseBody, self).__str__()
return "response: %s\nBody: %s" % (self.response, body)
class ResponseBodyData(object):
"""Class that wraps an http response and string data into a single value.
"""
def __init__(self, response, data):
self.response = response
self.data = data
def __str__(self):
return "response: %s\nBody: %s" % (self.response, self.data)
class ResponseBodyList(list):
"""Class that wraps an http response and list body into a single value.
Callers that receive this object will normally use it as a list but
can extract the response if needed.
"""
def __init__(self, response, body=None):
body_data = body or []
self.extend(body_data)
self.response = response
def __str__(self):
body = super(ResponseBodyList, self).__str__()
return "response: %s\nBody: %s" % (self.response, body)
|
|
"""Download handlers for http and https schemes"""
import re
import logging
from io import BytesIO
from time import time
import warnings
from six.moves.urllib.parse import urldefrag
from zope.interface import implementer
from twisted.internet import defer, reactor, protocol
from twisted.web.http_headers import Headers as TxHeaders
from twisted.web.iweb import IBodyProducer, UNKNOWN_LENGTH
from twisted.internet.error import TimeoutError
from twisted.web.http import _DataLoss, PotentialDataLoss
from twisted.web.client import Agent, ProxyAgent, ResponseDone, \
HTTPConnectionPool, ResponseFailed
try:
from twisted.web.client import URI
except ImportError:
from twisted.web.client import _URI as URI
from twisted.internet.endpoints import TCP4ClientEndpoint
from scrapy.http import Headers
from scrapy.responsetypes import responsetypes
from scrapy.core.downloader.webclient import _parse
from scrapy.core.downloader.tls import openssl_methods
from scrapy.utils.misc import load_object
from scrapy.utils.python import to_bytes, to_unicode
from scrapy import twisted_version
logger = logging.getLogger(__name__)
class HTTP11DownloadHandler(object):
lazy = False
def __init__(self, settings):
self._pool = HTTPConnectionPool(reactor, persistent=True)
self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
self._pool._factory.noisy = False
self._sslMethod = openssl_methods[settings.get('DOWNLOADER_CLIENT_TLS_METHOD')]
self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
# try method-aware context factory
try:
self._contextFactory = self._contextFactoryClass(method=self._sslMethod)
except TypeError:
# use context factory defaults
self._contextFactory = self._contextFactoryClass()
msg = """
'%s' does not accept `method` argument (type OpenSSL.SSL method,\
e.g. OpenSSL.SSL.SSLv23_METHOD).\
Please upgrade your context factory class to handle it or ignore it.""" % (
settings['DOWNLOADER_CLIENTCONTEXTFACTORY'],)
warnings.warn(msg)
self._default_maxsize = settings.getint('DOWNLOAD_MAXSIZE')
self._default_warnsize = settings.getint('DOWNLOAD_WARNSIZE')
self._fail_on_dataloss = settings.getbool('DOWNLOAD_FAIL_ON_DATALOSS')
self._disconnect_timeout = 1
def download_request(self, request, spider):
"""Return a deferred for the HTTP download"""
agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool,
maxsize=getattr(spider, 'download_maxsize', self._default_maxsize),
warnsize=getattr(spider, 'download_warnsize', self._default_warnsize),
fail_on_dataloss=self._fail_on_dataloss)
return agent.download_request(request)
def close(self):
d = self._pool.closeCachedConnections()
# closeCachedConnections will hang on network or server issues, so
# we'll manually timeout the deferred.
#
# Twisted issue addressing this problem can be found here:
# https://twistedmatrix.com/trac/ticket/7738.
#
# closeCachedConnections doesn't handle external errbacks, so we'll
# issue a callback after `_disconnect_timeout` seconds.
delayed_call = reactor.callLater(self._disconnect_timeout, d.callback, [])
def cancel_delayed_call(result):
if delayed_call.active():
delayed_call.cancel()
return result
d.addBoth(cancel_delayed_call)
return d
class TunnelError(Exception):
"""An HTTP CONNECT tunnel could not be established by the proxy."""
class TunnelingTCP4ClientEndpoint(TCP4ClientEndpoint):
"""An endpoint that tunnels through proxies to allow HTTPS downloads. To
accomplish that, this endpoint sends an HTTP CONNECT to the proxy.
The HTTP CONNECT is always sent when using this endpoint, I think this could
be improved as the CONNECT will be redundant if the connection associated
with this endpoint comes from the pool and a CONNECT has already been issued
for it.
"""
_responseMatcher = re.compile(b'HTTP/1\.. (?P<status>\d{3})(?P<reason>.{,32})')
def __init__(self, reactor, host, port, proxyConf, contextFactory,
timeout=30, bindAddress=None):
proxyHost, proxyPort, self._proxyAuthHeader = proxyConf
super(TunnelingTCP4ClientEndpoint, self).__init__(reactor, proxyHost,
proxyPort, timeout, bindAddress)
self._tunnelReadyDeferred = defer.Deferred()
self._tunneledHost = host
self._tunneledPort = port
self._contextFactory = contextFactory
self._connectBuffer = bytearray()
def requestTunnel(self, protocol):
"""Asks the proxy to open a tunnel."""
tunnelReq = tunnel_request_data(self._tunneledHost, self._tunneledPort,
self._proxyAuthHeader)
protocol.transport.write(tunnelReq)
self._protocolDataReceived = protocol.dataReceived
protocol.dataReceived = self.processProxyResponse
self._protocol = protocol
return protocol
def processProxyResponse(self, rcvd_bytes):
"""Processes the response from the proxy. If the tunnel is successfully
created, notifies the client that we are ready to send requests. If not
raises a TunnelError.
"""
self._connectBuffer += rcvd_bytes
# make sure that enough (all) bytes are consumed
# and that we've got all HTTP headers (ending with a blank line)
# from the proxy so that we don't send those bytes to the TLS layer
#
# see https://github.com/scrapy/scrapy/issues/2491
if b'\r\n\r\n' not in self._connectBuffer:
return
self._protocol.dataReceived = self._protocolDataReceived
respm = TunnelingTCP4ClientEndpoint._responseMatcher.match(self._connectBuffer)
if respm and int(respm.group('status')) == 200:
try:
# this sets proper Server Name Indication extension
# but is only available for Twisted>=14.0
sslOptions = self._contextFactory.creatorForNetloc(
self._tunneledHost, self._tunneledPort)
except AttributeError:
# fall back to non-SNI SSL context factory
sslOptions = self._contextFactory
self._protocol.transport.startTLS(sslOptions,
self._protocolFactory)
self._tunnelReadyDeferred.callback(self._protocol)
else:
if respm:
extra = {'status': int(respm.group('status')),
'reason': respm.group('reason').strip()}
else:
extra = rcvd_bytes[:32]
self._tunnelReadyDeferred.errback(
TunnelError('Could not open CONNECT tunnel with proxy %s:%s [%r]' % (
self._host, self._port, extra)))
def connectFailed(self, reason):
"""Propagates the errback to the appropriate deferred."""
self._tunnelReadyDeferred.errback(reason)
def connect(self, protocolFactory):
self._protocolFactory = protocolFactory
connectDeferred = super(TunnelingTCP4ClientEndpoint,
self).connect(protocolFactory)
connectDeferred.addCallback(self.requestTunnel)
connectDeferred.addErrback(self.connectFailed)
return self._tunnelReadyDeferred
def tunnel_request_data(host, port, proxy_auth_header=None):
r"""
Return binary content of a CONNECT request.
>>> from scrapy.utils.python import to_native_str as s
>>> s(tunnel_request_data("example.com", 8080))
'CONNECT example.com:8080 HTTP/1.1\r\nHost: example.com:8080\r\n\r\n'
>>> s(tunnel_request_data("example.com", 8080, b"123"))
'CONNECT example.com:8080 HTTP/1.1\r\nHost: example.com:8080\r\nProxy-Authorization: 123\r\n\r\n'
>>> s(tunnel_request_data(b"example.com", "8090"))
'CONNECT example.com:8090 HTTP/1.1\r\nHost: example.com:8090\r\n\r\n'
"""
host_value = to_bytes(host, encoding='ascii') + b':' + to_bytes(str(port))
tunnel_req = b'CONNECT ' + host_value + b' HTTP/1.1\r\n'
tunnel_req += b'Host: ' + host_value + b'\r\n'
if proxy_auth_header:
tunnel_req += b'Proxy-Authorization: ' + proxy_auth_header + b'\r\n'
tunnel_req += b'\r\n'
return tunnel_req
class TunnelingAgent(Agent):
"""An agent that uses a L{TunnelingTCP4ClientEndpoint} to make HTTPS
downloads. It may look strange that we have chosen to subclass Agent and not
ProxyAgent but consider that after the tunnel is opened the proxy is
transparent to the client; thus the agent should behave like there is no
proxy involved.
"""
def __init__(self, reactor, proxyConf, contextFactory=None,
connectTimeout=None, bindAddress=None, pool=None):
super(TunnelingAgent, self).__init__(reactor, contextFactory,
connectTimeout, bindAddress, pool)
self._proxyConf = proxyConf
self._contextFactory = contextFactory
if twisted_version >= (15, 0, 0):
def _getEndpoint(self, uri):
return TunnelingTCP4ClientEndpoint(
self._reactor, uri.host, uri.port, self._proxyConf,
self._contextFactory, self._endpointFactory._connectTimeout,
self._endpointFactory._bindAddress)
else:
def _getEndpoint(self, scheme, host, port):
return TunnelingTCP4ClientEndpoint(
self._reactor, host, port, self._proxyConf,
self._contextFactory, self._connectTimeout,
self._bindAddress)
def _requestWithEndpoint(self, key, endpoint, method, parsedURI,
headers, bodyProducer, requestPath):
# proxy host and port are required for HTTP pool `key`
# otherwise, same remote host connection request could reuse
# a cached tunneled connection to a different proxy
key = key + self._proxyConf
return super(TunnelingAgent, self)._requestWithEndpoint(key, endpoint, method, parsedURI,
headers, bodyProducer, requestPath)
class ScrapyProxyAgent(Agent):
def __init__(self, reactor, proxyURI,
connectTimeout=None, bindAddress=None, pool=None):
super(ScrapyProxyAgent, self).__init__(reactor,
connectTimeout=connectTimeout,
bindAddress=bindAddress,
pool=pool)
self._proxyURI = URI.fromBytes(proxyURI)
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Issue a new request via the configured proxy.
"""
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
if twisted_version >= (15, 0, 0):
proxyEndpoint = self._getEndpoint(self._proxyURI)
else:
proxyEndpoint = self._getEndpoint(self._proxyURI.scheme,
self._proxyURI.host,
self._proxyURI.port)
key = ("http-proxy", self._proxyURI.host, self._proxyURI.port)
return self._requestWithEndpoint(key, proxyEndpoint, method,
URI.fromBytes(uri), headers,
bodyProducer, uri)
class ScrapyAgent(object):
_Agent = Agent
_ProxyAgent = ScrapyProxyAgent
_TunnelingAgent = TunnelingAgent
def __init__(self, contextFactory=None, connectTimeout=10, bindAddress=None, pool=None,
maxsize=0, warnsize=0, fail_on_dataloss=True):
self._contextFactory = contextFactory
self._connectTimeout = connectTimeout
self._bindAddress = bindAddress
self._pool = pool
self._maxsize = maxsize
self._warnsize = warnsize
self._fail_on_dataloss = fail_on_dataloss
self._txresponse = None
def _get_agent(self, request, timeout):
bindaddress = request.meta.get('bindaddress') or self._bindAddress
proxy = request.meta.get('proxy')
if proxy:
_, _, proxyHost, proxyPort, proxyParams = _parse(proxy)
scheme = _parse(request.url)[0]
proxyHost = to_unicode(proxyHost)
omitConnectTunnel = b'noconnect' in proxyParams
if scheme == b'https' and not omitConnectTunnel:
proxyConf = (proxyHost, proxyPort,
request.headers.get(b'Proxy-Authorization', None))
return self._TunnelingAgent(reactor, proxyConf,
contextFactory=self._contextFactory, connectTimeout=timeout,
bindAddress=bindaddress, pool=self._pool)
else:
return self._ProxyAgent(reactor, proxyURI=to_bytes(proxy, encoding='ascii'),
connectTimeout=timeout, bindAddress=bindaddress, pool=self._pool)
return self._Agent(reactor, contextFactory=self._contextFactory,
connectTimeout=timeout, bindAddress=bindaddress, pool=self._pool)
def download_request(self, request):
timeout = request.meta.get('download_timeout') or self._connectTimeout
agent = self._get_agent(request, timeout)
# request details
url = urldefrag(request.url)[0]
method = to_bytes(request.method)
headers = TxHeaders(request.headers)
if isinstance(agent, self._TunnelingAgent):
headers.removeHeader(b'Proxy-Authorization')
if request.body:
bodyproducer = _RequestBodyProducer(request.body)
elif method == b'POST':
# Setting Content-Length: 0 even for POST requests is not a
# MUST per HTTP RFCs, but it's common behavior, and some
# servers require this, otherwise returning HTTP 411 Length required
#
# RFC 7230#section-3.3.2:
# "a Content-Length header field is normally sent in a POST
# request even when the value is 0 (indicating an empty payload body)."
#
# Twisted < 17 will not add "Content-Length: 0" by itself;
# Twisted >= 17 fixes this;
# Using a producer with an empty-string sends `0` as Content-Length
# for all versions of Twisted.
bodyproducer = _RequestBodyProducer(b'')
else:
bodyproducer = None
start_time = time()
d = agent.request(
method, to_bytes(url, encoding='ascii'), headers, bodyproducer)
# set download latency
d.addCallback(self._cb_latency, request, start_time)
# response body is ready to be consumed
d.addCallback(self._cb_bodyready, request)
d.addCallback(self._cb_bodydone, request, url)
# check download timeout
self._timeout_cl = reactor.callLater(timeout, d.cancel)
d.addBoth(self._cb_timeout, request, url, timeout)
return d
def _cb_timeout(self, result, request, url, timeout):
if self._timeout_cl.active():
self._timeout_cl.cancel()
return result
# needed for HTTPS requests, otherwise _ResponseReader doesn't
# receive connectionLost()
if self._txresponse:
self._txresponse._transport.stopProducing()
raise TimeoutError("Getting %s took longer than %s seconds." % (url, timeout))
def _cb_latency(self, result, request, start_time):
request.meta['download_latency'] = time() - start_time
return result
def _cb_bodyready(self, txresponse, request):
# deliverBody hangs for responses without body
if txresponse.length == 0:
return txresponse, b'', None
maxsize = request.meta.get('download_maxsize', self._maxsize)
warnsize = request.meta.get('download_warnsize', self._warnsize)
expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1
fail_on_dataloss = request.meta.get('download_fail_on_dataloss', self._fail_on_dataloss)
if maxsize and expected_size > maxsize:
error_msg = ("Cancelling download of %(url)s: expected response "
"size (%(size)s) larger than download max size (%(maxsize)s).")
error_args = {'url': request.url, 'size': expected_size, 'maxsize': maxsize}
logger.error(error_msg, error_args)
txresponse._transport._producer.loseConnection()
raise defer.CancelledError(error_msg % error_args)
if warnsize and expected_size > warnsize:
logger.warning("Expected response size (%(size)s) larger than "
"download warn size (%(warnsize)s) in request %(request)s.",
{'size': expected_size, 'warnsize': warnsize, 'request': request})
def _cancel(_):
# Abort connection immediately.
txresponse._transport._producer.abortConnection()
d = defer.Deferred(_cancel)
txresponse.deliverBody(_ResponseReader(
d, txresponse, request, maxsize, warnsize, fail_on_dataloss))
# save response for timeouts
self._txresponse = txresponse
return d
def _cb_bodydone(self, result, request, url):
txresponse, body, flags = result
status = int(txresponse.code)
headers = Headers(txresponse.headers.getAllRawHeaders())
respcls = responsetypes.from_args(headers=headers, url=url, body=body)
return respcls(url=url, status=status, headers=headers, body=body, flags=flags)
@implementer(IBodyProducer)
class _RequestBodyProducer(object):
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class _ResponseReader(protocol.Protocol):
def __init__(self, finished, txresponse, request, maxsize, warnsize,
fail_on_dataloss):
self._finished = finished
self._txresponse = txresponse
self._request = request
self._bodybuf = BytesIO()
self._maxsize = maxsize
self._warnsize = warnsize
self._fail_on_dataloss = fail_on_dataloss
self._fail_on_dataloss_warned = False
self._reached_warnsize = False
self._bytes_received = 0
def dataReceived(self, bodyBytes):
# This maybe called several times after cancel was called with buffered
# data.
if self._finished.called:
return
self._bodybuf.write(bodyBytes)
self._bytes_received += len(bodyBytes)
if self._maxsize and self._bytes_received > self._maxsize:
logger.error("Received (%(bytes)s) bytes larger than download "
"max size (%(maxsize)s) in request %(request)s.",
{'bytes': self._bytes_received,
'maxsize': self._maxsize,
'request': self._request})
# Clear buffer earlier to avoid keeping data in memory for a long
# time.
self._bodybuf.truncate(0)
self._finished.cancel()
if self._warnsize and self._bytes_received > self._warnsize and not self._reached_warnsize:
self._reached_warnsize = True
logger.warning("Received more bytes than download "
"warn size (%(warnsize)s) in request %(request)s.",
{'warnsize': self._warnsize,
'request': self._request})
def connectionLost(self, reason):
if self._finished.called:
return
body = self._bodybuf.getvalue()
if reason.check(ResponseDone):
self._finished.callback((self._txresponse, body, None))
return
if reason.check(PotentialDataLoss):
self._finished.callback((self._txresponse, body, ['partial']))
return
if reason.check(ResponseFailed) and any(r.check(_DataLoss) for r in reason.value.reasons):
if not self._fail_on_dataloss:
self._finished.callback((self._txresponse, body, ['dataloss']))
return
elif not self._fail_on_dataloss_warned:
logger.warn("Got data loss in %s. If you want to process broken "
"responses set the setting DOWNLOAD_FAIL_ON_DATALOSS = False"
" -- This message won't be shown in further requests",
self._txresponse.request.absoluteURI.decode())
self._fail_on_dataloss_warned = True
self._finished.errback(reason)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" example train fit utility """
import logging
import os
import time
import re
import math
import mxnet as mx
import horovod.mxnet as hvd
import numpy as np
#### imports needed for fit monkeypatch
from mxnet.initializer import Uniform
from mxnet.context import cpu
from mxnet.monitor import Monitor
from mxnet.model import BatchEndParam
from mxnet.initializer import Uniform
from mxnet.io import DataDesc, DataIter, DataBatch
from mxnet.base import _as_list
import copy
##### imports needed for custom optimizer
from mxnet.optimizer import Optimizer, register
from mxnet.ndarray import (NDArray, zeros, clip, sqrt, cast, maximum, abs as NDabs, array, multiply,
multi_sum_sq, multi_lars)
from mxnet.ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
mp_sgd_update, mp_sgd_mom_update, square, ftrl_update, ftml_update,
signsgd_update, signum_update,
multi_sgd_update, multi_sgd_mom_update, multi_mp_sgd_update,
multi_mp_sgd_mom_update,
lars_multi_sgd_update, lars_multi_sgd_mom_update,
lars_multi_mp_sgd_update, lars_multi_mp_sgd_mom_update)
from mxnet.ndarray import sparse
#####
from mlperf_logging.mllog import constants as mlperf_constants
from mlperf_log_utils import mx_resnet_print, all_reduce, mpiwrapper
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
@register
class SGDwLARS(Optimizer):
"""The SGD optimizer with momentum and weight decay.
If the storage types of grad is ``row_sparse`` and ``lazy_update`` is True, \
**lazy updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = lr * (rescale_grad * clip(grad[row], clip_gradient) + wd * weight[row])
state[row] = momentum[row] * state[row] + rescaled_grad[row]
weight[row] = weight[row] - state[row]
The sparse update only updates the momentum for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
Otherwise, **standard updates** are applied by::
rescaled_grad = lr * (rescale_grad * clip(grad, clip_gradient) + wd * weight)
state = momentum * state + rescaled_grad
weight = weight - state
For details of the update algorithm see
:class:`~mxnet.ndarray.sgd_update` and :class:`~mxnet.ndarray.sgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lazy_update : bool, optional
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.::
False: results in using the same precision as the weights (default),
True: makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, lazy_update=True, lars=True, lars_eta=0.001, lars_eps=0, **kwargs):
super(SGDwLARS, self).__init__(**kwargs)
self.momentum = momentum
self.lazy_update = lazy_update
self.aggregate_num = int(os.getenv('MXNET_OPTIMIZER_AGGREGATION_SIZE', "4"))
self.lars = lars
self.lars_eta = lars_eta
self.lars_eps = lars_eps
self.skip = 0
self.last_lr = None
self.cur_lr = None
def _get_lrs(self, indices):
"""Gets the learning rates given the indices of the weights.
Parameters
----------
indices : list of int
Indices corresponding to weights.
Returns
-------
lrs : list of float
Learning rates for those indices.
"""
if self.cur_lr is not None:
self.last_lr = self.cur_lr
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if self.cur_lr is None:
self.last_lr = lr
self.cur_lr = lr
lrs = [lr for _ in indices]
for i, index in enumerate(indices):
if index in self.param_dict:
lrs[i] *= self.param_dict[index].lr_mult
elif index in self.lr_mult:
lrs[i] *= self.lr_mult[index]
elif index in self.idx2name:
lrs[i] *= self.lr_mult.get(self.idx2name[index], 1.0)
return lrs
def set_wd_mult(self, args_wd_mult):
self.wd_mult = {}
for n in self.idx2name.values():
is_weight = n.endswith('_weight')
is_fc_bias = 'fc' in n and 'bias' in n
if not (is_weight or is_fc_bias):
if hvd.rank() == 0:
print("skipping wd on {}".format(n))
self.wd_mult[n] = 0.0
else:
if hvd.rank() == 0:
print("using wd on {}".format(n))
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
def create_state_multi_precision(self, index, weight):
weight_master_copy = None
if self.multi_precision and weight.dtype == np.float16:
weight_master_copy = weight.astype(np.float32)
return (self.create_state(index, weight_master_copy), weight_master_copy)
if weight.dtype == np.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
return self.create_state(index, weight)
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
stype = weight.stype if self.lazy_update else 'default'
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=stype)
return momentum
def _l2norm(self, v, rescale=False):
"L2 Norm implementation"
v = v.astype('float32')
if rescale:
v *= self.rescale_grad
norm = mx.nd.norm(v).asnumpy()[0]
return norm
def _get_lars(self, i, weight, g, lr, wd):
"Returns a scaling factor for the learning rate for this layer"
name = self.idx2name[i] if i in self.idx2name else str(i)
if name.endswith('gamma') or name.endswith('beta') or name.endswith('bias'):
return lr
w_norm = self._l2norm(weight)
g_norm = self._l2norm(g, rescale=True)
if w_norm > 0.0 and g_norm > 0.0:
lars = self.lars_eta * w_norm/(g_norm + wd * w_norm + self.lars_eps)
else:
lars = 1.0
return lars * lr
def _update_impl(self, indices, weights, grads, states, multi_precision=False):
aggregate = True
if not isinstance(indices, (tuple, list)):
indices = [indices]
weights = [weights]
grads = [grads]
states = [states]
for weight, grad in zip(weights, grads):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
aggregate = (aggregate and
weight.stype == 'default' and
grad.stype == 'default')
self._update_count(indices)
lrs = self._get_lrs(indices)
wds = self._get_wds(indices)
if self.lars:
lrs = [self._get_lars(i, w, g, lr, wd) for (i, w, g, lr, wd) in zip(indices, weights, grads, lrs, wds)]
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum * (self.cur_lr / self.last_lr)
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if aggregate:
current_index = 0
while current_index < len(indices):
sidx = current_index
eidx = current_index + self.aggregate_num
if not multi_precision:
if self.momentum > 0:
multi_sgd_mom_update(*_flatten_list(zip(weights[sidx:eidx],
grads[sidx:eidx],
states[sidx:eidx])),
out=weights[sidx:eidx],
num_weights=len(weights[sidx:eidx]),
lrs=lrs[sidx:eidx],
wds=wds[sidx:eidx],
**kwargs)
else:
multi_sgd_update(*_flatten_list(zip(weights[sidx:eidx],
grads[sidx:eidx])),
out=weights[sidx:eidx],
num_weights=len(weights[sidx:eidx]),
lrs=lrs[sidx:eidx],
wds=wds[sidx:eidx],
**kwargs)
else:
if self.momentum > 0:
multi_mp_sgd_mom_update(*_flatten_list(zip(weights[sidx:eidx],
grads[sidx:eidx],
*zip(*states[sidx:eidx]))),
out=weights[sidx:eidx],
num_weights=len(weights[sidx:eidx]),
lrs=lrs[sidx:eidx],
wds=wds[sidx:eidx],
**kwargs)
else:
multi_mp_sgd_update(*_flatten_list(zip(weights[sidx:eidx],
grads[sidx:eidx],
list(zip(*states[sidx:eidx]))[1])),
out=weights[sidx:eidx],
num_weights=len(weights[sidx:eidx]),
lrs=lrs[sidx:eidx],
wds=wds[sidx:eidx],
**kwargs)
current_index += self.aggregate_num
else:
for weight, grad, state, lr, wd in zip(weights, grads, states, lrs, wds):
if not multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight,
lazy_update=self.lazy_update, lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight, lazy_update=self.lazy_update,
lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state, multi_precision=False)
def update_multi_precision(self, index, weight, grad, state):
if not isinstance(index, (tuple, list)):
use_multi_precision = self.multi_precision and weight.dtype == np.float16
else:
use_multi_precision = self.multi_precision and weight[0].dtype == np.float16
self._update_impl(index, weight, grad, state,
multi_precision=use_multi_precision)
@register
class SGDwFASTLARS(Optimizer):
"""The SGD optimizer with momentum and weight decay.
If the storage types of grad is ``row_sparse`` and ``lazy_update`` is True, \
**lazy updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = lr * (rescale_grad * clip(grad[row], clip_gradient) + wd * weight[row])
state[row] = momentum[row] * state[row] + rescaled_grad[row]
weight[row] = weight[row] - state[row]
The sparse update only updates the momentum for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
Otherwise, **standard updates** are applied by::
rescaled_grad = lr * (rescale_grad * clip(grad, clip_gradient) + wd * weight)
state = momentum * state + rescaled_grad
weight = weight - state
For details of the update algorithm see
:class:`~mxnet.ndarray.sgd_update` and :class:`~mxnet.ndarray.sgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lazy_update : bool, optional
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.::
False: results in using the same precision as the weights (default),
True: makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, lazy_update=True, lars=True, lars_eta=0.001, lars_eps=0, **kwargs):
super(SGDwFASTLARS, self).__init__(**kwargs)
self.momentum = momentum
self.lazy_update = lazy_update
self.aggregate_num = int(os.getenv('MXNET_OPTIMIZER_AGGREGATION_SIZE', "4"))
self.lars = lars
self.lars_eta = lars_eta
self.lars_eps = lars_eps
self.skip = 0
self.last_lr = None
self.cur_lr = None
def _get_lrs(self, indices):
"""Gets the learning rates given the indices of the weights.
Parameters
----------
indices : list of int
Indices corresponding to weights.
Returns
-------
lrs : list of float
Learning rates for those indices.
"""
if self.cur_lr is not None:
self.last_lr = self.cur_lr
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if self.cur_lr is None:
self.last_lr = lr
self.cur_lr = lr
lrs = [lr for _ in indices]
for i, index in enumerate(indices):
if index in self.param_dict:
lrs[i] *= self.param_dict[index].lr_mult
elif index in self.lr_mult:
lrs[i] *= self.lr_mult[index]
elif index in self.idx2name:
lrs[i] *= self.lr_mult.get(self.idx2name[index], 1.0)
return lrs
def set_wd_mult(self, args_wd_mult):
self.wd_mult = {}
for n in self.idx2name.values():
is_weight = n.endswith('_weight')
is_fc_bias = 'fc' in n and 'bias' in n
if not (is_weight or is_fc_bias):
if hvd.rank() == 0:
print("skipping wd on {}".format(n))
self.wd_mult[n] = 0.0
else:
if hvd.rank() == 0:
print("using wd on {}".format(n))
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
def create_state_multi_precision(self, index, weight):
weight_master_copy = None
if self.multi_precision and weight.dtype == np.float16:
weight_master_copy = weight.astype(np.float32)
return (self.create_state(index, weight_master_copy), weight_master_copy)
if weight.dtype == np.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
return self.create_state(index, weight)
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
stype = weight.stype if self.lazy_update else 'default'
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=stype)
return momentum
def _l2norm(self, v, rescale=False):
"L2 Norm implementation"
v = v.astype('float32')
if rescale:
v *= self.rescale_grad
norm = mx.nd.norm(v).asnumpy()[0]
return norm
def _get_lars(self, i, weight, g, lr, wd):
"Returns a scaling factor for the learning rate for this layer"
name = self.idx2name[i] if i in self.idx2name else str(i)
if name.endswith('gamma') or name.endswith('beta') or name.endswith('bias'):
return lr
w_norm = self._l2norm(weight)
g_norm = self._l2norm(g, rescale=True)
if w_norm > 0.0 and g_norm > 0.0:
lars = self.lars_eta * w_norm/(g_norm + wd * w_norm + self.lars_eps)
else:
lars = 1.0
return lars * lr
def _update_impl(self, indices, weights, grads, states, multi_precision=False):
aggregate = True
if not isinstance(indices, (tuple, list)):
indices = [indices]
weights = [weights]
grads = [grads]
states = [states]
for weight, grad in zip(weights, grads):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
aggregate = (aggregate and
weight.stype == 'default' and
grad.stype == 'default')
self._update_count(indices)
lrs = self._get_lrs(indices)
wds = self._get_wds(indices)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum * (self.cur_lr / self.last_lr)
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if aggregate:
nb_params = len(indices)
names = [self.idx2name[i] if i in self.idx2name else str(i) for i in indices]
lars_idx = [i for i in range(nb_params) if not(names[i].endswith('gamma')
or names[i].endswith('beta') or names[i].endswith('bias'))]
if self.lars and len(lars_idx) > 0:
nb_lars = len(lars_idx)
no_lars_idx = [i for i in range(nb_params) if (names[i].endswith('gamma') or
names[i].endswith('beta') or names[i].endswith('bias'))]
cur_ctx = weights[0].context
full_idx = lars_idx + no_lars_idx
new_lrs = array([lrs[i] for i in full_idx], ctx=cur_ctx, dtype='float32')
new_wds = array([wds[i] for i in full_idx], ctx=cur_ctx, dtype='float32')
new_weights = [weights[i] for i in full_idx]
new_grads = [grads[i] for i in full_idx]
w_sum_sq = multi_sum_sq(*new_weights[:nb_lars], num_arrays=nb_lars)
g_sum_sq = multi_sum_sq(*new_grads[:nb_lars], num_arrays=nb_lars)
multi_lars(new_lrs[:nb_lars], w_sum_sq, g_sum_sq, new_wds[:nb_lars],
eta=self.lars_eta, eps=self.lars_eps, rescale_grad=self.rescale_grad,
out=new_lrs[:nb_lars])
new_states = [states[i] for i in full_idx]
# Same than usual using preloaded sgd functions
sidx = 0
while sidx < len(indices):
eidx = sidx + len(new_weights[sidx:sidx+self.aggregate_num])
if not multi_precision:
if self.momentum > 0:
lars_multi_sgd_mom_update(
*_flatten_list(zip(new_weights[sidx:eidx],
new_grads[sidx:eidx],
new_states[sidx:eidx])),
new_lrs[sidx:eidx],
new_wds[sidx:eidx],
out=new_weights[sidx:eidx],
num_weights=len(new_weights[sidx:eidx]),
**kwargs)
else:
lars_multi_sgd_update(
*_flatten_list(zip(new_weights[sidx:eidx],
new_grads[sidx:eidx])),
new_lrs[sidx:eidx],
new_wds[sidx:eidx],
out=new_weights[sidx:eidx],
num_weights=len(new_weights[sidx:eidx]),
**kwargs)
else:
if self.momentum > 0:
lars_multi_mp_sgd_mom_update(
*_flatten_list(zip(new_weights[sidx:eidx],
new_grads[sidx:eidx],
*zip(*new_states[sidx:eidx]))),
new_lrs[sidx:eidx],
new_wds[sidx:eidx],
out=new_weights[sidx:eidx],
num_weights=len(new_weights[sidx:eidx]),
**kwargs)
else:
lars_multi_mp_sgd_update(
*_flatten_list(zip(new_weights[sidx:eidx],
new_grads[sidx:eidx],
list(zip(*new_states[sidx:eidx]))[1])),
new_lrs[sidx:eidx],
new_wds[sidx:eidx],
out=new_weights[sidx:eidx],
num_weights=len(new_weights[sidx:eidx]),
**kwargs)
sidx += self.aggregate_num
else:
current_index = 0
while current_index < len(indices):
sidx = current_index
eidx = current_index + self.aggregate_num
if not multi_precision:
if self.momentum > 0:
multi_sgd_mom_update(*_flatten_list(zip(weights[sidx:eidx],
grads[sidx:eidx],
states[sidx:eidx])),
out=weights[sidx:eidx],
num_weights=len(weights[sidx:eidx]),
lrs=lrs[sidx:eidx],
wds=wds[sidx:eidx],
**kwargs)
else:
multi_sgd_update(*_flatten_list(zip(weights[sidx:eidx],
grads[sidx:eidx])),
out=weights[sidx:eidx],
num_weights=len(weights[sidx:eidx]),
lrs=lrs[sidx:eidx],
wds=wds[sidx:eidx],
**kwargs)
else:
if self.momentum > 0:
multi_mp_sgd_mom_update(*_flatten_list(zip(weights[sidx:eidx],
grads[sidx:eidx],
*zip(*states[sidx:eidx]))),
out=weights[sidx:eidx],
num_weights=len(weights[sidx:eidx]),
lrs=lrs[sidx:eidx],
wds=wds[sidx:eidx],
**kwargs)
else:
multi_mp_sgd_update(*_flatten_list(zip(weights[sidx:eidx],
grads[sidx:eidx],
list(zip(*states[sidx:eidx]))[1])),
out=weights[sidx:eidx],
num_weights=len(weights[sidx:eidx]),
lrs=lrs[sidx:eidx],
wds=wds[sidx:eidx],
**kwargs)
current_index += self.aggregate_num
else:
if self.lars:
lrs = [self._get_lars(i, w, g, lr, wd) for (i, w, g, lr, wd) in
zip(indices, weights, grads, lrs, wds)]
for weight, grad, state, lr, wd in zip(weights, grads, states, lrs, wds):
if not multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight,
lazy_update=self.lazy_update, lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight, lazy_update=self.lazy_update,
lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state, multi_precision=False)
def update_multi_precision(self, index, weight, grad, state):
if not isinstance(index, (tuple, list)):
use_multi_precision = self.multi_precision and weight.dtype == np.float16
else:
use_multi_precision = self.multi_precision and weight[0].dtype == np.float16
self._update_impl(index, weight, grad, state,
multi_precision=use_multi_precision)
def get_epoch_size(args, kv):
num_workers = hvd.size() if 'horovod' in args.kv_store else kv.num_workers
return math.ceil(int(args.num_examples / num_workers) / args.batch_size)
def _get_gpu(gpus):
idx = hvd.local_rank()
gpu = gpus.split(",")[idx]
return gpu
def _get_lr_scheduler(args, kv):
if 'lr_factor' not in args or args.lr_factor >= 1:
return (args.lr, None)
epoch_size = get_epoch_size(args, kv)
begin_epoch = 0
mx_resnet_print(key='lars_opt_base_learning_rate', val=args.lr)
mx_resnet_print(key='lars_opt_learning_rate_warmup_epochs', val=args.warmup_epochs)
if 'pow' in args.lr_step_epochs:
if 'horovod' in args.kv_store:
num_workers = hvd.size()
else:
num_workers = kv.num_workers if kv else 1
epoch_size = math.ceil(int(args.num_examples/num_workers)/args.batch_size)
warmup_steps = epoch_size * args.warmup_epochs
total_steps = epoch_size * args.num_epochs
mx_resnet_print(key=mlperf_constants.LARS_OPT_LR_DECAY_STEPS,
val=args.num_epochs)
return (args.lr, PolySchedule(args.lr, total_steps, warmup_steps))
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
lr = args.lr
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d',
lr, begin_epoch)
steps = [epoch_size * (x - begin_epoch)
for x in step_epochs if x - begin_epoch > 0]
if steps:
if 'horovod' in args.kv_store:
num_workers = hvd.size()
else:
num_workers = kv.num_workers if kv else 1
epoch_size = math.ceil(int(args.num_examples/num_workers)/args.batch_size)
mx_resnet_print(key=mlperf_constants.OPT_LR_DECAY_BOUNDARY_EPOCHS,
val=step_epochs)
mx_resnet_print(key=mlperf_constants.OPT_LR_DECAY_BOUNDARY_STEPS,
val=[lr * (args.lr_factor ** i) for i in range(len(step_epochs))])
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor,
base_lr=args.lr, warmup_steps=epoch_size * args.warmup_epochs,
warmup_mode=args.warmup_strategy))
else:
return (lr, None)
class PolySchedule():
def __init__(self, base_lr, iterations, warmup_iterations):
self.base_lr = base_lr
self.iterations = iterations
self.warmup_iterations = warmup_iterations
self.end_lr = 0.0001
mx_resnet_print(key=mlperf_constants.LARS_OPT_LR_DECAY_POLY_POWER, val=2)
mx_resnet_print(key=mlperf_constants.LARS_OPT_END_LR, val=self.end_lr)
def __call__(self, iteration):
if iteration <= self.warmup_iterations:
return self.base_lr * (iteration / self.warmup_iterations)
else:
polyit = iteration - self.warmup_iterations
polytotal = self.iterations - self.warmup_iterations
return self.end_lr + ((self.base_lr - self.end_lr) * (1 - (polyit / polytotal))**2)
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, \
required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--initializer', type=str, default='default',
help='the initializer type')
train.add_argument('--label-smoothing', type=float, default=0.0)
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--lars-eps', type=float, default=0,
help='lars epsilon param')
train.add_argument('--lars-eta', type=float, default=0.001,
help='lars trust_factor param')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
train.add_argument('--save-period', type=int, default=1, help='params saving period')
train.add_argument('--eval-period', type=int, default=1, help='evaluation every N epochs')
train.add_argument('--eval-offset', type=int, default=0, help='first evaluation on epoch N')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--dtype', type=str, default='float32',
help='precision: float32 or float16')
# additional parameters for large batch sgd
train.add_argument('--warmup-epochs', type=int, default=5,
help='the epochs to ramp-up lr to scaled large-batch value')
train.add_argument('--warmup-strategy', type=str, default='linear',
help='the ramping-up strategy for large batch sgd')
train.add_argument('--logging-dir', type=str, default='logs')
train.add_argument('--log', type=str, default='')
train.add_argument('--bn-gamma-init0', action='store_true')
train.add_argument('--epoch-size',type=int, default=0,
help='set number of batches in an epoch. useful for debugging')
train.add_argument('--profile-worker-suffix', type=str, default='',
help='profile workers actions into this file. During distributed training\
filename saved will be rank1_ followed by this suffix')
train.add_argument('--profile-server-suffix', type=str, default='',
help='profile server actions into a file with name like rank1_ followed by this suffix \
during distributed training')
train.add_argument('--accuracy-threshold', default=1.0, type=float,
help='stop training after top1 reaches this value')
return train
class CorrectCount(mx.metric.Accuracy):
def __init__(self, axis=1, name='correct-count',
output_names=None, label_names=None):
super(CorrectCount, self).__init__(
name=name, axis=axis,
output_names=output_names, label_names=label_names)
self.axis = axis
def get(self):
return (self.name, self.sum_metric)
def get_global(self):
return (self.name, self.global_sum_metric)
class TotalCount(mx.metric.Accuracy):
def __init__(self, axis=1, name='total-count',
output_names=None, label_names=None):
super(TotalCount, self).__init__(
name=name, axis=axis,
output_names=output_names, label_names=label_names)
self.axis = axis
def get(self):
return (self.name, self.num_inst)
def get_global(self):
return (self.name, self.global_num_inst)
class TopKCorrectCount(mx.metric.TopKAccuracy):
def __init__(self, name='top-k-correct-count',
output_names=None, label_names=None):
super(TopKCorrectCount, self).__init__(
name=name, top_k=5,
output_names=output_names, label_names=label_names)
def get(self):
return (self.name, self.sum_metric)
def get_global(self):
return (self.name, self.global_sum_metric)
class CrossEntropyCount(mx.metric.CrossEntropy):
def __init__(self, name='cross-entropy',
output_names=None, label_names=None):
super(CrossEntropyCount, self).__init__(
name=name, output_names=output_names, label_names=label_names)
def get(self):
return (self.name, self.sum_metric)
def get_global(self):
return (self.name, self.global_sum_metric)
def mlperf_fit(self, args, train_data, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local',
optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
eval_end_callback=None,
eval_batch_end_callback=None, initializer=Uniform(0.01),
arg_params=None, aux_params=None, allow_missing=False,
force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None,
validation_metric=None, monitor=None, sparse_row_id_fn=None,
eval_offset=0, eval_period=1,
accuracy_threshold=1.0,
multi_gpu_per_process=False):
assert num_epoch is not None, 'please specify number of epochs'
self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True, force_rebind=force_rebind)
if monitor is not None:
self.install_monitor(monitor)
self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
self.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params)
if validation_metric is None:
validation_metric = eval_metric
###########################################################################
# Adding Correct and Total Count metrics
###########################################################################
if not isinstance(validation_metric, list):
validation_metric = [validation_metric]
validation_metric = mx.metric.create(validation_metric)
if not isinstance(validation_metric, mx.metric.CompositeEvalMetric):
vm = mx.metric.CompositeEvalMetric()
vm.append(validation_metric)
validation_metric = vm
for m in [CorrectCount(), TotalCount()]:
validation_metric.metrics.append(m)
###########################################################################
if not isinstance(eval_metric, mx.metric.EvalMetric):
eval_metric = mx.metric.create(eval_metric)
block_epoch_start = begin_epoch
block_epoch_count = eval_offset + 1 - (begin_epoch % eval_period)
if block_epoch_count < 0:
block_epoch_count += eval_period
mx_resnet_print(key=mlperf_constants.BLOCK_START,
metadata={'first_epoch_num': block_epoch_start + 1, 'epoch_count': block_epoch_count})
################################################################################
# training loop
################################################################################
for epoch in range(begin_epoch, num_epoch):
mx_resnet_print(key=mlperf_constants.EPOCH_START, metadata={'epoch_num': epoch + 1})
tic = time.time()
eval_metric.reset()
nbatch = 0
data_iter = iter(train_data)
end_of_batch = False
next_data_batch = next(data_iter)
while not end_of_batch:
data_batch = next_data_batch
if monitor is not None:
monitor.tic()
self.forward_backward(data_batch)
self.update()
if isinstance(data_batch, list):
self.update_metric(eval_metric,
[db.label for db in data_batch],
pre_sliced=True)
else:
self.update_metric(eval_metric, data_batch.label)
try:
# pre fetch next batch
next_data_batch = next(data_iter)
self.prepare(next_data_batch, sparse_row_id_fn=sparse_row_id_fn)
except StopIteration:
end_of_batch = True
if monitor is not None:
monitor.toc_print()
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
nbatch += 1
mx_resnet_print(key=mlperf_constants.EPOCH_STOP, metadata={"epoch_num": epoch + 1})
# one epoch of training is finished
toc = time.time()
if kvstore:
if kvstore.rank == 0:
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
elif 'horovod' in args.kv_store:
if hvd.rank() == 0:
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
else:
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
# sync aux params across devices if there is more than one GPU per process
if multi_gpu_per_process:
arg_params, aux_params = self.get_params()
self.set_params(arg_params, aux_params)
if epoch_end_callback is not None:
for callback in _as_list(epoch_end_callback):
callback(epoch, self.symbol, arg_params, aux_params)
#----------------------------------------
# evaluation on validation set
if eval_data and epoch % eval_period == eval_offset:
mx_resnet_print(key=mlperf_constants.EVAL_START, metadata={'epoch_num': epoch + 1})
res = self.score(eval_data, validation_metric,
score_end_callback=eval_end_callback,
batch_end_callback=eval_batch_end_callback, epoch=epoch)
if kvstore:
if kvstore.rank == 0:
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
elif 'horovod' in args.kv_store:
if hvd.rank() == 0:
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
else:
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
res = dict(res)
acc = [res['correct-count'], res['total-count']]
acc = all_reduce(acc)
acc = acc[0]/acc[1]
mx_resnet_print(key=mlperf_constants.EVAL_STOP, metadata={'epoch_num': epoch + 1})
mx_resnet_print(key=mlperf_constants.EVAL_ACCURACY, val=acc,
metadata={'epoch_num': epoch + 1})
mx_resnet_print(key=mlperf_constants.BLOCK_STOP,
metadata={'first_epoch_num': block_epoch_start + 1})
if acc > accuracy_threshold:
mx_resnet_print(key=mlperf_constants.RUN_STOP,
metadata={'status': 'success'})
return epoch
if epoch < (num_epoch - 1):
block_epoch_start = epoch + 1
block_epoch_count = num_epoch - epoch - 1
if block_epoch_count > eval_period:
block_epoch_count = eval_period
mx_resnet_print(key=mlperf_constants.BLOCK_START,
metadata={'first_epoch_num': block_epoch_start + 1,
'epoch_count': block_epoch_count})
# end of 1 epoch, reset the data-iter for another epoch
train_data.reset()
mx_resnet_print(key=mlperf_constants.RUN_STOP,
metadata={'status': 'aborted'})
return num_epoch
def fit(args, kv, model, initializer, data_loader, devs, arg_params, aux_params, **kwargs):
"""
train a model
args : argparse returns
model : loaded model of the neural network
initializer : weight initializer
data_loader : function that returns the train and val data iterators
devs : devices for training
arg_params : model parameters
aux_params : model parameters
"""
if 'horovod' in args.kv_store:
kv = None
rank = hvd.rank()
num_workers = hvd.size()
else:
rank = kv.rank
num_workers = kv.num_workers
if args.profile_server_suffix:
mx.profiler.set_config(filename=args.profile_server_suffix, profile_all=True, profile_process='server')
mx.profiler.set_state(state='run', profile_process='server')
if args.profile_worker_suffix:
if num_workers > 1:
filename = 'rank' + str(rank) + '_' + args.profile_worker_suffix
else:
filename = args.profile_worker_suffix
mx.profiler.set_config(filename=filename, profile_all=True, profile_process='worker')
mx.profiler.set_state(state='run', profile_process='worker')
# logging
head = '%(asctime)-15s Node[' + str(rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
epoch_size = get_epoch_size(args, kv)
# data iterators
(train, val) = data_loader(args, kv)
if 'dist' in args.kv_store and not 'async' in args.kv_store:
logging.info('Resizing training data to %d batches per machine', epoch_size)
# resize train iter to ensure each machine has same number of batches per epoch
# if not, dist_sync can hang at the end with one machine waiting for other machines
if not args.use_dali:
train = mx.io.ResizeIter(train, epoch_size)
# save model
epoch_end_callbacks = []
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv)
optimizer_params = {
'learning_rate': lr,
'wd': args.wd,
'lr_scheduler': lr_scheduler,
'multi_precision': True}
if 'horovod' in args.kv_store:
optimizer_params['rescale_grad'] = 1. / args.batch_size
# Only a limited number of optimizers have 'momentum' property
has_momentum = {'sgd', 'dcasgd', 'nag', 'signum', 'lbsgd', 'sgdwlars', 'sgdwfastlars'}
mx_resnet_print(key='lars_opt_weight_decay', val=args.wd)
if args.optimizer in has_momentum:
optimizer_params['momentum'] = args.mom
mx_resnet_print(key='lars_opt_momentum', val=args.mom)
if args.optimizer in {'sgdwlars', 'sgdwfastlars'}:
optimizer_params['lars'] = True
optimizer_params['lars_eta'] = args.lars_eta
optimizer_params['lars_eps'] = args.lars_eps
mx_resnet_print(key=mlperf_constants.OPT_NAME,
val='lars')
mx_resnet_print(key=mlperf_constants.LARS_EPSILON,
val=args.lars_eps)
else:
mx_resnet_print(
key=mlperf_constants.OPT_NAME,
val='sgd')
if 'horovod' in args.kv_store:
# Setting idx2name dictionary, required to mask out entries for weight decay.
idx2name = {}
for i,n in enumerate(model._exec_group.param_names):
idx2name[i] = n
opt = mx.optimizer.create(args.optimizer, sym=None, param_idx2name=idx2name, **optimizer_params)
# Horovod: wrap optimizer with DistributedOptimizer
# Note: enabling skip_average in DistributedOptimizer. Normalization is baked into rescale_grad.
opt = hvd.DistributedOptimizer(opt)
else:
opt = args.optimizer
# evaluation metrices
eval_metrics = ['accuracy']
if args.top_k > 0:
eval_metrics.append(mx.metric.create(
'top_k_accuracy', top_k=args.top_k))
# callbacks that run after each batch
batch_end_callbacks = []
if 'horovod' in args.kv_store:
# if using horovod, only report on rank 0 with global batch size
if rank == 0:
batch_end_callbacks.append(mx.callback.Speedometer(
num_workers*args.batch_size, args.disp_batches))
mx_resnet_print(key=mlperf_constants.GLOBAL_BATCH_SIZE,
val=num_workers * args.batch_size)
else:
batch_end_callbacks.append(mx.callback.Speedometer(
args.batch_size, args.disp_batches))
mx_resnet_print(key=mlperf_constants.GLOBAL_BATCH_SIZE,
val=args.batch_size)
# run
last_epoch = mlperf_fit(model,
args,
train,
begin_epoch=0,
num_epoch=args.num_epochs,
eval_data=val,
eval_metric=eval_metrics,
kvstore=kv,
optimizer=opt,
optimizer_params=optimizer_params,
initializer=None if 'horovod' in args.kv_store else initializer,
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=batch_end_callbacks,
epoch_end_callback=epoch_end_callbacks, #checkpoint if args.use_dali else ,,
allow_missing=True,
eval_offset=args.eval_offset,
eval_period=args.eval_period,
accuracy_threshold=args.accuracy_threshold,
multi_gpu_per_process=(len(devs) > 1),
monitor=None)
if args.profile_server_suffix:
mx.profiler.set_state(state='run', profile_process='server')
if args.profile_worker_suffix:
mx.profiler.set_state(state='run', profile_process='worker')
|
|
"""
@package mi.instrument.mclane.pps.ooicore.test.test_driver
@file marine-integrations/mi/instrument/mclane/pps/ooicore/test/test_driver.py
@author Dan Mergens
@brief Test cases for ppsdn driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Dan Mergens'
__license__ = 'Apache 2.0'
import unittest
import time
import gevent
from mock import Mock
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.core.instrument.instrument_driver import DriverConfigKey, DriverProtocolState
from mi.core.time_tools import timegm_to_float
log = get_logger()
# MI imports.
from mi.idk.unit_test import \
InstrumentDriverTestCase, \
InstrumentDriverUnitTestCase, \
InstrumentDriverIntegrationTestCase, \
InstrumentDriverQualificationTestCase, \
DriverTestMixin, \
ParameterTestConfigKey, \
AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from mi.instrument.mclane.driver import \
ProtocolState, \
ProtocolEvent, \
Capability, \
Prompt, \
NEWLINE, \
McLaneSampleDataParticleKey
from mi.instrument.mclane.ras.ppsdn.driver import \
InstrumentDriver, \
DataParticleType, \
Command, \
Parameter, \
Protocol, \
PPSDNSampleDataParticle
from mi.core.exceptions import SampleException
# from interface.objects import AgentCommand
from mi.core.direct_access_server import DirectAccessTypes
from mi.core.instrument.instrument_driver import ResourceAgentState, ResourceAgentEvent
# Globals
raw_stream_received = False
parsed_stream_received = False
ACQUIRE_TIMEOUT = 45 * 60 + 50
CLEAR_TIMEOUT = 110
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.mclane.ras.ppsdn.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='DQPJJX',
instrument_agent_name='mclane_ras_ppsdn',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={DriverConfigKey.PARAMETERS: {
Parameter.CLEAR_VOLUME: 10,
Parameter.FILL_VOLUME: 10,
Parameter.FLUSH_VOLUME: 10,
}},
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DATA PARTICLE TEST MIXIN #
# Defines a set of assert methods used for data particle verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles.
###############################################################################
class UtilMixin(DriverTestMixin):
"""
Mixin class used for storing data particle constants and common data assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
# battery voltage request response - TODO not implemented
PPSDN_BATTERY_DATA = "Battery: 30.1V [Alkaline, 18V minimum]" + NEWLINE
# bag capacity response - TODO not implemented
PPSDN_CAPACITY_DATA = "Capacity: Maxon 250mL" + NEWLINE
PPSDN_VERSION_DATA = \
"Version:" + NEWLINE + \
NEWLINE + \
"McLane Research Laboratories, Inc." + NEWLINE + \
"CF2 Adaptive Water Transfer System" + NEWLINE + \
"Version 2.02 of Jun 7 2013 18:17" + NEWLINE + \
" Configured for: Maxon 250ml pump" + NEWLINE
# response from collect sample meta command (from FORWARD or REVERSE command)
PPSDN_SAMPLE_DATA1 = "Status 00 | 75 100 25 4 | 1.5 90.7 .907* 1 031514 001727 | 29.9 0" + NEWLINE
PPSDN_SAMPLE_DATA2 = "Status 00 | 75 100 25 4 | 3.2 101.2 101.2* 2 031514 001728 | 29.9 0" + NEWLINE
PPSDN_SAMPLE_DATA3 = "Result 00 | 75 100 25 4 | 77.2 98.5 99.1 47 031514 001813 | 29.8 1" + NEWLINE
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.CLOCK_SYNC: {STATES: [ProtocolState.COMMAND]},
}
###
# Parameter and Type Definitions
###
_driver_parameters = {
Parameter.FLUSH_VOLUME: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 150, REQUIRED: True},
Parameter.FLUSH_FLOWRATE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 100, REQUIRED: True},
Parameter.FLUSH_MINFLOW: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 75, REQUIRED: True},
Parameter.FILL_VOLUME: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 4000, REQUIRED: True},
Parameter.FILL_FLOWRATE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 100, REQUIRED: True},
Parameter.FILL_MINFLOW: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 75, REQUIRED: True},
Parameter.CLEAR_VOLUME: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 100, REQUIRED: True},
Parameter.CLEAR_FLOWRATE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 100, REQUIRED: True},
Parameter.CLEAR_MINFLOW: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 75, REQUIRED: True}}
###
# Data Particle Parameters
###
_sample_parameters = {
McLaneSampleDataParticleKey.PORT: {'type': int, 'value': 0},
McLaneSampleDataParticleKey.VOLUME_COMMANDED: {'type': int, 'value': 75},
McLaneSampleDataParticleKey.FLOW_RATE_COMMANDED: {'type': int, 'value': 100},
McLaneSampleDataParticleKey.MIN_FLOW_COMMANDED: {'type': int, 'value': 25},
McLaneSampleDataParticleKey.TIME_LIMIT: {'type': int, 'value': 4},
McLaneSampleDataParticleKey.VOLUME_ACTUAL: {'type': float, 'value': 1.5},
McLaneSampleDataParticleKey.FLOW_RATE_ACTUAL: {'type': float, 'value': 90.7},
McLaneSampleDataParticleKey.MIN_FLOW_ACTUAL: {'type': float, 'value': 0.907},
McLaneSampleDataParticleKey.TIMER: {'type': int, 'value': 1},
McLaneSampleDataParticleKey.TIME: {'type': unicode, 'value': '031514 001727'},
McLaneSampleDataParticleKey.BATTERY: {'type': float, 'value': 29.9},
McLaneSampleDataParticleKey.CODE: {'type': int, 'value': 0},
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
###
# Data Particle Parameters Methods
###
def assert_data_particle_sample(self, data_particle, verify_values=False):
"""
Verify an PPSDN sample data particle
@param data_particle: OPTAAA_SampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_header(data_particle, DataParticleType.PPSDN_PARSED)
self.assert_data_particle_parameters(data_particle, self._sample_parameters, verify_values)
# TODO - need to define status particle values
def assert_data_particle_status(self, data_particle, verify_values=False):
"""
Verify a PPSDN pump status data particle
@param data_particle: PPSDN_StatusDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
# self.assert_data_particle_header(data_particle, DataParticleType.PPSDN_STATUS)
# self.assert_data_particle_parameters(data_particle, self._status_parameters, verify_values)
def assert_time_synched(self, pps_time, tolerance=5):
"""
Verify the retrieved time is within acceptable tolerance
"""
pps_time = time.strptime(pps_time + 'UTC', '%m/%d/%y %H:%M:%S %Z')
current_time = time.gmtime()
diff = timegm_to_float(current_time) - timegm_to_float(pps_time)
log.info('clock synched within %d seconds', diff)
# verify that the time matches to within tolerance seconds
self.assertLessEqual(diff, tolerance)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class TestUNIT(InstrumentDriverUnitTestCase, UtilMixin):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
print '----- unit test -----'
#@unittest.skip('not completed yet')
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilites
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(Command())
# Test capabilities for duplicates, then verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, self.PPSDN_SAMPLE_DATA1)
self.assert_chunker_sample_with_noise(chunker, self.PPSDN_SAMPLE_DATA1)
self.assert_chunker_fragmented_sample(chunker, self.PPSDN_SAMPLE_DATA1)
self.assert_chunker_combined_sample(chunker, self.PPSDN_SAMPLE_DATA1)
self.assert_chunker_sample(chunker, self.PPSDN_SAMPLE_DATA2)
self.assert_chunker_sample_with_noise(chunker, self.PPSDN_SAMPLE_DATA2)
self.assert_chunker_fragmented_sample(chunker, self.PPSDN_SAMPLE_DATA2)
self.assert_chunker_combined_sample(chunker, self.PPSDN_SAMPLE_DATA2)
self.assert_chunker_sample(chunker, self.PPSDN_SAMPLE_DATA3)
self.assert_chunker_sample_with_noise(chunker, self.PPSDN_SAMPLE_DATA3)
self.assert_chunker_fragmented_sample(chunker, self.PPSDN_SAMPLE_DATA3)
self.assert_chunker_combined_sample(chunker, self.PPSDN_SAMPLE_DATA3)
def test_corrupt_data_sample(self):
# garbage is not okay
particle = PPSDNSampleDataParticle(self.PPSDN_SAMPLE_DATA1.replace('00', 'foo'),
port_timestamp=3558720820.531179)
with self.assertRaises(SampleException):
particle.generate()
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, initial_protocol_state=ProtocolState.FILL)
self.assert_raw_particle_published(driver, True)
# validating data particles are published
self.assert_particle_published(driver, self.PPSDN_SAMPLE_DATA1, self.assert_data_particle_sample, True)
# validate that a duplicate sample is not published - TODO
#self.assert_particle_not_published(driver, self.RASFL_SAMPLE_DATA1, self.assert_data_particle_sample, True)
# validate that a new sample is published
self.assert_particle_published(driver, self.PPSDN_SAMPLE_DATA2, self.assert_data_particle_sample, False)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock(spec="UNKNOWN WHAT SHOULD GO HERE FOR evt_callback")
protocol = Protocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: [
ProtocolEvent.DISCOVER,
],
ProtocolState.COMMAND: [
ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.INIT_PARAMS,
ProtocolEvent.START_DIRECT,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.CLEAR,
ProtocolEvent.CLOCK_SYNC,
],
ProtocolState.FLUSH: [
ProtocolEvent.FLUSH,
ProtocolEvent.PUMP_STATUS,
ProtocolEvent.INSTRUMENT_FAILURE,
],
ProtocolState.FILL: [
ProtocolEvent.FILL,
ProtocolEvent.PUMP_STATUS,
ProtocolEvent.INSTRUMENT_FAILURE,
],
ProtocolState.CLEAR: [
ProtocolEvent.CLEAR,
ProtocolEvent.PUMP_STATUS,
ProtocolEvent.INSTRUMENT_FAILURE,
],
ProtocolState.RECOVERY: [
],
ProtocolState.DIRECT_ACCESS: [
ProtocolEvent.STOP_DIRECT,
ProtocolEvent.EXECUTE_DIRECT,
],
}
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
#@unittest.skip('not completed yet')
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class TestINT(InstrumentDriverIntegrationTestCase, UtilMixin):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def assert_async_particle_not_generated(self, particle_type, timeout=10):
end_time = time.time() + timeout
while end_time > time.time():
if len(self.get_sample_events(particle_type)) > 0:
self.fail("assert_async_particle_not_generated: a particle of type %s was published" % particle_type)
time.sleep(.3)
def test_parameters(self):
"""
Test driver parameters and verify their type. Startup parameters also verify the parameter
value. This test confirms that parameters are being read/converted properly and that
the startup has been applied.
"""
self.assert_initialize_driver()
reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)
log.debug('Startup parameters: %s', reply)
self.assert_driver_parameters(reply)
# self.assert_get(Parameter.FLUSH_VOLUME, value=100)
self.assert_get(Parameter.FLUSH_VOLUME, value=10)
self.assert_get(Parameter.FLUSH_FLOWRATE, value=100)
self.assert_get(Parameter.FLUSH_MINFLOW, value=75)
# self.assert_get(Parameter.FILL_VOLUME, value=4000)
self.assert_get(Parameter.FILL_VOLUME, value=10)
self.assert_get(Parameter.FILL_FLOWRATE, value=100)
self.assert_get(Parameter.FILL_MINFLOW, value=75)
# self.assert_get(Parameter.CLEAR_VOLUME, value=100)
self.assert_get(Parameter.CLEAR_VOLUME, value=10)
self.assert_get(Parameter.CLEAR_FLOWRATE, value=100)
self.assert_get(Parameter.CLEAR_MINFLOW, value=75)
# Verify that readonly/immutable parameters cannot be set (throw exception)
self.assert_set_exception(Parameter.FLUSH_VOLUME)
self.assert_set_exception(Parameter.FLUSH_FLOWRATE)
self.assert_set_exception(Parameter.FLUSH_MINFLOW)
self.assert_set_exception(Parameter.FILL_VOLUME)
self.assert_set_exception(Parameter.FILL_FLOWRATE)
self.assert_set_exception(Parameter.FILL_MINFLOW)
self.assert_set_exception(Parameter.CLEAR_VOLUME)
self.assert_set_exception(Parameter.CLEAR_FLOWRATE)
self.assert_set_exception(Parameter.CLEAR_MINFLOW)
def test_execute_clock_sync_command_mode(self):
"""
Verify we can synchronize the instrument internal clock in command mode
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
reply = self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.CLOCK_SYNC)
pps_time = reply[1]['time']
self.assert_time_synched(pps_time)
def test_acquire_sample(self):
"""
Test that we can generate sample particle with command
"""
self.assert_initialize_driver()
self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.ACQUIRE_SAMPLE, driver_timeout=ACQUIRE_TIMEOUT)
self.assert_state_change(ProtocolState.FLUSH, ACQUIRE_TIMEOUT)
self.assert_state_change(ProtocolState.FILL, ACQUIRE_TIMEOUT)
self.assert_state_change(ProtocolState.CLEAR, ACQUIRE_TIMEOUT)
self.assert_state_change(ProtocolState.COMMAND, ACQUIRE_TIMEOUT)
self.assert_async_particle_generation(DataParticleType.PPSDN_PARSED, Mock(), 7)
def test_clear(self):
"""
Test user clear command
"""
self.assert_initialize_driver()
self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.CLEAR)
self.assert_state_change(ProtocolState.CLEAR, CLEAR_TIMEOUT)
self.assert_state_change(ProtocolState.COMMAND, CLEAR_TIMEOUT)
@unittest.skip('not completed yet')
def test_obstructed_flush(self):
"""
Test condition when obstruction limits flow rate during initial flush
"""
# TODO
@unittest.skip('not completed yet')
def test_obstructed_fill(self):
"""
Test condition when obstruction occurs during collection of sample
"""
# TODO
################################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. They generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
################################################################################
@attr('QUAL', group='mi')
class TestQUAL(InstrumentDriverQualificationTestCase, UtilMixin):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def test_discover(self):
"""
Overridden because instrument does not have autosample mode and driver will always go into command mode
during the discover process after a restart.
"""
self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver and cause it to re-discover which
# will always go back to command for this instrument
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
def test_reset(self):
"""
Verify the agent can be reset
"""
self.assert_enter_command_mode()
self.assert_reset()
self.assert_enter_command_mode()
self.assert_direct_access_start_telnet(inactivity_timeout=60, session_timeout=60)
self.assert_state_change(ResourceAgentState.DIRECT_ACCESS, DriverProtocolState.DIRECT_ACCESS, 30)
self.assert_reset()
def test_direct_access_telnet_mode(self):
"""
@brief This test automatically tests that the Instrument Driver properly supports direct access to the physical
instrument. (telnet mode)
"""
self.assert_enter_command_mode()
# go into direct access
self.assert_direct_access_start_telnet(timeout=600)
self.tcp_client.send_data("port\r\n")
if not self.tcp_client.expect("Port: 00\r\n"):
self.fail("test_direct_access_telnet_mode: did not get expected response")
self.assert_direct_access_stop_telnet()
@unittest.skip('Only enabled and used for manual testing of vendor SW')
def test_direct_access_telnet_mode_manual(self):
"""
@brief This test manually tests that the Instrument Driver properly supports direct access to the physical
instrument. (virtual serial port mode)
"""
self.assert_enter_command_mode()
# go direct access
cmd = AgentCommand(command=ResourceAgentEvent.GO_DIRECT_ACCESS,
kwargs={'session_type': DirectAccessTypes.vsp,
'session_timeout': 600,
'inactivity_timeout': 600})
retval = self.instrument_agent_client.execute_agent(cmd, timeout=600)
log.warn("go_direct_access retval=" + str(retval.result))
state = self.instrument_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.DIRECT_ACCESS)
print("test_direct_access_telnet_mode: waiting 120 seconds for manual testing")
gevent.sleep(120)
cmd = AgentCommand(command=ResourceAgentEvent.GO_COMMAND)
self.instrument_agent_client.execute_agent(cmd)
state = self.instrument_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.COMMAND)
def test_get_capabilities(self):
"""
@brief Walk through all driver protocol states and verify capabilities
returned by get_current_capabilities
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.CLEAR,
ProtocolEvent.CLOCK_SYNC,
ProtocolEvent.GET,
ProtocolEvent.SET,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# Streaming Mode - no autosample for RAS
##################
##################
# DA Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands()
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
def test_execute_clock_sync(self):
"""
Verify we can synchronize the instrument internal clock
"""
self.assert_enter_command_mode()
reply = self.assert_execute_resource(ProtocolEvent.CLOCK_SYNC)
pps_time = reply.result['time']
self.assert_time_synched(pps_time)
|
|
"""Various utility functions.
.. include:: math-definitions.rst
"""
import collections
import numpy as np
from numpy.core.umath_tests import inner1d
from scipy.special import spherical_jn, spherical_yn
from . import default
def rotation_matrix(n1, n2):
"""Compute rotation matrix for rotation from *n1* to *n2*.
Parameters
----------
n1, n2 : (3,) array_like
Two vectors. They don't have to be normalized.
Returns
-------
(3, 3) `numpy.ndarray`
Rotation matrix.
"""
n1 = normalize_vector(n1)
n2 = normalize_vector(n2)
I = np.identity(3)
if np.all(n1 == n2):
return I # no rotation
elif np.all(n1 == -n2):
return -I # flip
# TODO: check for *very close to* parallel vectors
# Algorithm from http://math.stackexchange.com/a/476311
v = v0, v1, v2 = np.cross(n1, n2)
s = np.linalg.norm(v) # sine
c = np.inner(n1, n2) # cosine
vx = [[0, -v2, v1],
[v2, 0, -v0],
[-v1, v0, 0]] # skew-symmetric cross-product matrix
return I + vx + np.dot(vx, vx) * (1 - c) / s**2
def wavenumber(omega, c=None):
"""Compute the wavenumber for a given radial frequency."""
if c is None:
c = default.c
return omega / c
def direction_vector(alpha, beta=np.pi/2):
"""Compute normal vector from azimuth, colatitude."""
return sph2cart(alpha, beta, 1)
def sph2cart(alpha, beta, r):
r"""Spherical to cartesian coordinate transform.
.. math::
x = r \cos \alpha \sin \beta \\
y = r \sin \alpha \sin \beta \\
z = r \cos \beta
with :math:`\alpha \in [0, 2\pi), \beta \in [0, \pi], r \geq 0`
Parameters
----------
alpha : float or array_like
Azimuth angle in radiants
beta : float or array_like
Colatitude angle in radiants (with 0 denoting North pole)
r : float or array_like
Radius
Returns
-------
x : float or `numpy.ndarray`
x-component of Cartesian coordinates
y : float or `numpy.ndarray`
y-component of Cartesian coordinates
z : float or `numpy.ndarray`
z-component of Cartesian coordinates
"""
x = r * np.cos(alpha) * np.sin(beta)
y = r * np.sin(alpha) * np.sin(beta)
z = r * np.cos(beta)
return x, y, z
def cart2sph(x, y, z):
r"""Cartesian to spherical coordinate transform.
.. math::
\alpha = \arctan \left( \frac{y}{x} \right) \\
\beta = \arccos \left( \frac{z}{r} \right) \\
r = \sqrt{x^2 + y^2 + z^2}
with :math:`\alpha \in [-pi, pi], \beta \in [0, \pi], r \geq 0`
Parameters
----------
x : float or array_like
x-component of Cartesian coordinates
y : float or array_like
y-component of Cartesian coordinates
z : float or array_like
z-component of Cartesian coordinates
Returns
-------
alpha : float or `numpy.ndarray`
Azimuth angle in radiants
beta : float or `numpy.ndarray`
Colatitude angle in radiants (with 0 denoting North pole)
r : float or `numpy.ndarray`
Radius
"""
r = np.sqrt(x**2 + y**2 + z**2)
alpha = np.arctan2(y, x)
beta = np.arccos(z / r)
return alpha, beta, r
def asarray_1d(a, **kwargs):
"""Squeeze the input and check if the result is one-dimensional.
Returns *a* converted to a `numpy.ndarray` and stripped of
all singleton dimensions. Scalars are "upgraded" to 1D arrays.
The result must have exactly one dimension.
If not, an error is raised.
"""
result = np.squeeze(np.asarray(a, **kwargs))
if result.ndim == 0:
result = result.reshape((1,))
elif result.ndim > 1:
raise ValueError("array must be one-dimensional")
return result
def asarray_of_rows(a, **kwargs):
"""Convert to 2D array, turn column vector into row vector.
Returns *a* converted to a `numpy.ndarray` and stripped of
all singleton dimensions. If the result has exactly one dimension,
it is re-shaped into a 2D row vector.
"""
result = np.squeeze(np.asarray(a, **kwargs))
if result.ndim == 1:
result = result.reshape(1, -1)
return result
def as_xyz_components(components, **kwargs):
r"""Convert *components* to `XyzComponents` of `numpy.ndarray`\s.
The *components* are first converted to NumPy arrays (using
:func:`numpy.asarray`) which are then assembled into an
`XyzComponents` object.
Parameters
----------
components : triple or pair of array_like
The values to be used as X, Y and Z arrays. Z is optional.
**kwargs
All further arguments are forwarded to :func:`numpy.asarray`,
which is applied to the elements of *components*.
"""
return XyzComponents([np.asarray(c, **kwargs) for c in components])
def as_delayed_signal(arg, **kwargs):
"""Make sure that the given argument can be used as a signal.
Parameters
----------
arg : sequence of 1 array_like followed by 1 or 2 scalars
The first element is converted to a NumPy array, the second
element is used as the sampling rate (in Hertz) and the optional
third element is used as the starting time of the signal (in
seconds). Default starting time is 0.
**kwargs
All keyword arguments are forwarded to :func:`numpy.asarray`.
Returns
-------
`DelayedSignal`
A named tuple consisting of a `numpy.ndarray` containing the
audio data, followed by the sampling rate (in Hertz) and the
starting time (in seconds) of the signal.
Examples
--------
Typically, this is used together with tuple unpacking to assign the
audio data, the sampling rate and the starting time to separate
variables:
>>> import sfs
>>> sig = [1], 44100
>>> data, fs, signal_offset = sfs.util.as_delayed_signal(sig)
>>> data
array([1])
>>> fs
44100
>>> signal_offset
0
"""
try:
data, samplerate, *time = arg
time, = time or [0]
except (IndexError, TypeError, ValueError):
pass
else:
valid_arguments = (not np.isscalar(data) and
np.isscalar(samplerate) and
np.isscalar(time))
if valid_arguments:
data = np.asarray(data, **kwargs)
return DelayedSignal(data, samplerate, time)
raise TypeError('expected audio data, samplerate, optional start time')
def strict_arange(start, stop, step=1, *, endpoint=False, dtype=None,
**kwargs):
"""Like :func:`numpy.arange`, but compensating numeric errors.
Unlike :func:`numpy.arange`, but similar to :func:`numpy.linspace`,
providing ``endpoint=True`` includes both endpoints.
Parameters
----------
start, stop, step, dtype
See :func:`numpy.arange`.
endpoint
See :func:`numpy.linspace`.
.. note:: With ``endpoint=True``, the difference between *start*
and *end* value must be an integer multiple of the
corresponding *spacing* value!
**kwargs
All further arguments are forwarded to :func:`numpy.isclose`.
Returns
-------
`numpy.ndarray`
Array of evenly spaced values. See :func:`numpy.arange`.
"""
remainder = (stop - start) % step
if np.any(np.isclose(remainder, (0.0, step), **kwargs)):
if endpoint:
stop += step * 0.5
else:
stop -= step * 0.5
elif endpoint:
raise ValueError("Invalid stop value for endpoint=True")
return np.arange(start, stop, step, dtype)
def xyz_grid(x, y, z, *, spacing, endpoint=True, **kwargs):
"""Create a grid with given range and spacing.
Parameters
----------
x, y, z : float or pair of float
Inclusive range of the respective coordinate or a single value
if only a slice along this dimension is needed.
spacing : float or triple of float
Grid spacing. If a single value is specified, it is used for
all dimensions, if multiple values are given, one value is used
per dimension. If a dimension (*x*, *y* or *z*) has only a
single value, the corresponding spacing is ignored.
endpoint : bool, optional
If ``True`` (the default), the endpoint of each range is
included in the grid. Use ``False`` to get a result similar to
:func:`numpy.arange`. See `strict_arange()`.
**kwargs
All further arguments are forwarded to `strict_arange()`.
Returns
-------
`XyzComponents`
A grid that can be used for sound field calculations.
See Also
--------
strict_arange, numpy.meshgrid
"""
if np.isscalar(spacing):
spacing = [spacing] * 3
ranges = []
scalars = []
for i, coord in enumerate([x, y, z]):
if np.isscalar(coord):
scalars.append((i, coord))
else:
start, stop = coord
ranges.append(strict_arange(start, stop, spacing[i],
endpoint=endpoint, **kwargs))
grid = np.meshgrid(*ranges, sparse=True, copy=False)
for i, s in scalars:
grid.insert(i, s)
return XyzComponents(grid)
def normalize(p, grid, xnorm):
"""Normalize sound field wrt position *xnorm*."""
return p / np.abs(probe(p, grid, xnorm))
def probe(p, grid, x):
"""Determine the value at position *x* in the sound field *p*."""
grid = as_xyz_components(grid)
x = asarray_1d(x)
r = np.linalg.norm(grid - x)
idx = np.unravel_index(r.argmin(), r.shape)
return p[idx]
def broadcast_zip(*args):
"""Broadcast arguments to the same shape and then use :func:`zip`."""
return zip(*np.broadcast_arrays(*args))
def normalize_vector(x):
"""Normalize a 1D vector."""
x = asarray_1d(x)
return x / np.linalg.norm(x)
def db(x, *, power=False):
"""Convert *x* to decibel.
Parameters
----------
x : array_like
Input data. Values of 0 lead to negative infinity.
power : bool, optional
If ``power=False`` (the default), *x* is squared before
conversion.
"""
with np.errstate(divide='ignore'):
return (10 if power else 20) * np.log10(np.abs(x))
class XyzComponents(np.ndarray):
"""See __init__()."""
def __init__(self, components):
r"""Triple (or pair) of components: x, y, and optionally z.
Instances of this class can be used to store coordinate grids
(either regular grids like in `xyz_grid()` or arbitrary point
clouds) or vector fields (e.g. particle velocity).
This class is a subclass of `numpy.ndarray`. It is
one-dimensional (like a plain `list`) and has a length of 3 (or
2, if no z-component is available). It uses ``dtype=object`` in
order to be able to store other `numpy.ndarray`\s of arbitrary
shapes but also scalars, if needed. Because it is a NumPy array
subclass, it can be used in operations with scalars and "normal"
NumPy arrays, as long as they have a compatible shape. Like any
NumPy array, instances of this class are iterable and can be
used, e.g., in for-loops and tuple unpacking. If slicing or
broadcasting leads to an incompatible shape, a plain
`numpy.ndarray` with ``dtype=object`` is returned.
To make sure the *components* are NumPy arrays themselves, use
`as_xyz_components()`.
Parameters
----------
components : (3,) or (2,) array_like
The values to be used as X, Y and Z data. Z is optional.
"""
# This method does nothing, it's only here for the documentation!
def __new__(cls, components):
# object arrays cannot be created and populated in a single step:
obj = np.ndarray.__new__(cls, len(components), dtype=object)
for i, component in enumerate(components):
obj[i] = component
return obj
def __array_finalize__(self, obj):
if self.ndim == 0:
pass # this is allowed, e.g. for np.inner()
elif self.ndim > 1 or len(self) not in (2, 3):
raise ValueError("XyzComponents can only have 2 or 3 components")
def __array_prepare__(self, obj, context=None):
if obj.ndim == 1 and len(obj) in (2, 3):
return obj.view(XyzComponents)
return obj
def __array_wrap__(self, obj, context=None):
if obj.ndim != 1 or len(obj) not in (2, 3):
return obj.view(np.ndarray)
return obj
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
if start == 0 and stop in (2, 3) and step == 1:
return np.ndarray.__getitem__(self, index)
# Slices other than xy and xyz are "downgraded" to ndarray
return np.ndarray.__getitem__(self.view(np.ndarray), index)
def __repr__(self):
return 'XyzComponents(\n' + ',\n'.join(
' {}={}'.format(name, repr(data).replace('\n', '\n '))
for name, data in zip('xyz', self)) + ')'
def make_property(index, doc):
def getter(self):
return self[index]
def setter(self, value):
self[index] = value
return property(getter, setter, doc=doc)
x = make_property(0, doc='x-component.')
y = make_property(1, doc='y-component.')
z = make_property(2, doc='z-component (optional).')
del make_property
def apply(self, func, *args, **kwargs):
"""Apply a function to each component.
The function *func* will be called once for each component,
passing the current component as first argument. All further
arguments are passed after that.
The results are returned as a new `XyzComponents` object.
"""
return XyzComponents([func(i, *args, **kwargs) for i in self])
DelayedSignal = collections.namedtuple('DelayedSignal', 'data samplerate time')
"""A tuple of audio data, sampling rate and start time.
This class (a `collections.namedtuple`) is not meant to be instantiated
by users.
To pass a signal to a function, just use a simple `tuple` or `list`
containing the audio data and the sampling rate (in Hertz), with an
optional starting time (in seconds) as a third item.
If you want to ensure that a given variable contains a valid signal, use
`sfs.util.as_delayed_signal()`.
"""
def image_sources_for_box(x, L, N, *, prune=True):
"""Image source method for a cuboid room.
The classical method by Allen and Berkley :cite:`Allen1979`.
Parameters
----------
x : (D,) array_like
Original source location within box.
Values between 0 and corresponding side length.
L : (D,) array_like
side lengths of room.
N : int
Maximum number of reflections per image source, see below.
prune : bool, optional
selection of image sources:
- If True (default):
Returns all images reflected up to N times.
This is the usual interpretation of N as "maximum order".
- If False:
Returns reflected up to N times between individual wall pairs,
a total number of :math:`M := (2N+1)^D`.
This larger set is useful e.g. to select image sources based on
distance to listener, as suggested by :cite:`Borish1984`.
Returns
-------
xs : (M, D) `numpy.ndarray`
original & image source locations.
wall_count : (M, 2D) `numpy.ndarray`
number of reflections at individual walls for each source.
"""
def _images_1d_unit_box(x, N):
result = np.arange(-N, N + 1, dtype=x.dtype)
result[N % 2::2] += x
result[1 - (N % 2)::2] += 1 - x
return result
def _count_walls_1d(a):
b = np.floor(a/2)
c = np.ceil((a-1)/2)
return np.abs(np.stack([b, c], axis=1)).astype(int)
L = asarray_1d(L)
x = asarray_1d(x)/L
D = len(x)
xs = [_images_1d_unit_box(coord, N) for coord in x]
xs = np.reshape(np.transpose(np.meshgrid(*xs, indexing='ij')), (-1, D))
wall_count = np.concatenate([_count_walls_1d(d) for d in xs.T], axis=1)
xs *= L
if prune is True:
N_mask = np.sum(wall_count, axis=1) <= N
xs = xs[N_mask, :]
wall_count = wall_count[N_mask, :]
return xs, wall_count
def spherical_hn2(n, z):
r"""Spherical Hankel function of 2nd kind.
Defined as https://dlmf.nist.gov/10.47.E6,
.. math::
\hankel{2}{n}{z} = \sqrt{\frac{\pi}{2z}}
\Hankel{2}{n + \frac{1}{2}}{z},
where :math:`\Hankel{2}{n}{\cdot}` is the Hankel function of the
second kind and n-th order, and :math:`z` its complex argument.
Parameters
----------
n : array_like
Order of the spherical Hankel function (n >= 0).
z : array_like
Argument of the spherical Hankel function.
"""
return spherical_jn(n, z) - 1j * spherical_yn(n, z)
def source_selection_plane(n0, n):
"""Secondary source selection for a plane wave.
Eq.(13) from :cite:`Spors2008`
"""
n0 = asarray_of_rows(n0)
n = normalize_vector(n)
return np.inner(n, n0) >= default.selection_tolerance
def source_selection_point(n0, x0, xs):
"""Secondary source selection for a point source.
Eq.(15) from :cite:`Spors2008`
"""
n0 = asarray_of_rows(n0)
x0 = asarray_of_rows(x0)
xs = asarray_1d(xs)
ds = x0 - xs
return inner1d(ds, n0) >= default.selection_tolerance
def source_selection_line(n0, x0, xs):
"""Secondary source selection for a line source.
compare Eq.(15) from :cite:`Spors2008`
"""
return source_selection_point(n0, x0, xs)
def source_selection_focused(ns, x0, xs):
"""Secondary source selection for a focused source.
Eq.(2.78) from :cite:`Wierstorf2014`
"""
x0 = asarray_of_rows(x0)
xs = asarray_1d(xs)
ns = normalize_vector(ns)
ds = xs - x0
return inner1d(ns, ds) >= default.selection_tolerance
def source_selection_all(N):
"""Select all secondary sources."""
return np.ones(N, dtype=bool)
def max_order_circular_harmonics(N):
r"""Maximum order of 2D/2.5D HOA.
It returns the maximum order for which no spatial aliasing appears.
It is given on page 132 of :cite:`Ahrens2012` as
.. math::
\mathtt{max\_order} =
\begin{cases}
N/2 - 1 & \text{even}\;N \\
(N-1)/2 & \text{odd}\;N,
\end{cases}
which is equivalent to
.. math::
\mathtt{max\_order} = \big\lfloor \frac{N - 1}{2} \big\rfloor.
Parameters
----------
N : int
Number of secondary sources.
"""
return (N - 1) // 2
def max_order_spherical_harmonics(N):
r"""Maximum order of 3D HOA.
.. math::
\mathtt{max\_order} = \lfloor \sqrt{N} \rfloor - 1.
Parameters
----------
N : int
Number of secondary sources.
"""
return int(np.sqrt(N) - 1)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import common as net_common
from tempest.common import debug
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.test import attr
from tempest.test import services
LOG = logging.getLogger(__name__)
class TestNetworkBasicOps(manager.NetworkScenarioTest):
"""
This smoke test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify network connectivity as follows:
* For a freshly-booted VM with an IP address ("port") on a given network:
- the Tempest host can ping the IP address. This implies, but
does not guarantee (see the ssh check that follows), that the
VM has been assigned the correct IP address and has
connectivity to the Tempest host.
- the Tempest host can perform key-based authentication to an
ssh server hosted at the IP address. This check guarantees
that the IP address is associated with the target VM.
# TODO(mnewby) - Need to implement the following:
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
- ping an external IP address, implying external connectivity.
- ping an external hostname, implying that dns is correctly
configured.
- ping an internal IP address, implying connectivity to another
VM on the same network.
There are presumed to be two types of networks: tenant and
public. A tenant network may or may not be reachable from the
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
a VM via a tenant network, a public network, or both. If both
networking types are to be evaluated, tests that need to be
executed remotely on the VM (via ssh) will only be run against
one of the networks (to minimize test execution time).
Determine which types of networks to test as follows:
* Configure tenant network checks (via the
'tenant_networks_reachable' key) if the Tempest host should
have direct connectivity to tenant networks. This is likely to
be the case if Tempest is running on the same host as a
single-node devstack installation with IP namespaces disabled.
* Configure checks for a public network if a public network has
been configured prior to the test suite being run and if the
Tempest host should have connectivity to that public network.
Checking connectivity for a public network requires that a
value be provided for 'public_network_id'. A value can
optionally be provided for 'public_router_id' if tenants will
use a shared router to access a public network (as is likely to
be the case when IP namespaces are not enabled). If a value is
not provided for 'public_router_id', a router will be created
for each tenant and use the network identified by
'public_network_id' as its gateway.
"""
CONF = config.TempestConfig()
@classmethod
def check_preconditions(cls):
super(TestNetworkBasicOps, cls).check_preconditions()
cfg = cls.config.network
if not (cfg.tenant_networks_reachable or cfg.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
@classmethod
def setUpClass(cls):
super(TestNetworkBasicOps, cls).setUpClass()
cls.check_preconditions()
# TODO(mnewby) Consider looking up entities as needed instead
# of storing them as collections on the class.
cls.keypairs = {}
cls.security_groups = {}
cls.networks = []
cls.subnets = []
cls.routers = []
cls.servers = []
cls.floating_ips = {}
def _get_router(self, tenant_id):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
router_id = self.config.network.public_router_id
network_id = self.config.network.public_network_id
if router_id:
result = self.network_client.show_router(router_id)
return net_common.AttributeDict(**result['router'])
elif network_id:
router = self._create_router(tenant_id)
router.add_gateway(network_id)
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, tenant_id, namestart='router-smoke-'):
name = data_utils.rand_name(namestart)
body = dict(
router=dict(
name=name,
admin_state_up=True,
tenant_id=tenant_id,
),
)
result = self.network_client.create_router(body=body)
router = net_common.DeletableRouter(client=self.network_client,
**result['router'])
self.assertEqual(router.name, name)
self.set_resource(name, router)
return router
def _create_keypairs(self):
self.keypairs[self.tenant_id] = self.create_keypair(
name=data_utils.rand_name('keypair-smoke-'))
def _create_security_groups(self):
self.security_groups[self.tenant_id] = self._create_security_group()
def _create_networks(self):
network = self._create_network(self.tenant_id)
router = self._get_router(self.tenant_id)
subnet = self._create_subnet(network)
subnet.add_to_router(router.id)
self.networks.append(network)
self.subnets.append(subnet)
self.routers.append(router)
def _check_networks(self):
# Checks that we see the newly created network/subnet/router via
# checking the result of list_[networks,routers,subnets]
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
for mynet in self.networks:
self.assertIn(mynet.name, seen_names)
self.assertIn(mynet.id, seen_ids)
seen_subnets = self._list_subnets()
seen_net_ids = [n['network_id'] for n in seen_subnets]
seen_subnet_ids = [n['id'] for n in seen_subnets]
for mynet in self.networks:
self.assertIn(mynet.id, seen_net_ids)
for mysubnet in self.subnets:
self.assertIn(mysubnet.id, seen_subnet_ids)
seen_routers = self._list_routers()
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
for myrouter in self.routers:
self.assertIn(myrouter.name, seen_router_names)
self.assertIn(myrouter.id, seen_router_ids)
def _create_server(self, name, network):
tenant_id = network.tenant_id
keypair_name = self.keypairs[tenant_id].name
security_groups = [self.security_groups[tenant_id].name]
create_kwargs = {
'nics': [
{'net-id': network.id},
],
'key_name': keypair_name,
'security_groups': security_groups,
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
return server
def _create_servers(self):
for i, network in enumerate(self.networks):
name = data_utils.rand_name('server-smoke-%d-' % i)
server = self._create_server(name, network)
self.servers.append(server)
def _check_tenant_network_connectivity(self):
if not self.config.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = self.config.compute.image_ssh_user
private_key = self.keypairs[self.tenant_id].private_key
for server in self.servers:
for net_name, ip_addresses in server.networks.iteritems():
for ip_address in ip_addresses:
self._check_vm_connectivity(ip_address, ssh_login,
private_key)
def _assign_floating_ips(self):
public_network_id = self.config.network.public_network_id
for server in self.servers:
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ips.setdefault(server, [])
self.floating_ips[server].append(floating_ip)
def _check_public_network_connectivity(self):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = self.config.compute.image_ssh_user
private_key = self.keypairs[self.tenant_id].private_key
try:
for server, floating_ips in self.floating_ips.iteritems():
for floating_ip in floating_ips:
ip_address = floating_ip.floating_ip_address
self._check_vm_connectivity(ip_address,
ssh_login,
private_key)
except Exception as exc:
LOG.exception(exc)
debug.log_ip_ns()
raise exc
@attr(type='smoke')
@services('compute', 'network')
def test_network_basic_ops(self):
self._create_keypairs()
self._create_security_groups()
self._create_networks()
self._check_networks()
self._create_servers()
self._assign_floating_ips()
self._check_public_network_connectivity()
self._check_tenant_network_connectivity()
|
|
import ctypes
import torch
from . import cudart
SUCCESS = 0
ERROR_NOT_READY = 34
class CudaError(RuntimeError):
def __init__(self, code):
msg = cudart().cudaGetErrorString(code).decode('utf-8')
super(CudaError, self).__init__('{0} ({1})'.format(msg, code))
def check_error(res):
if res != SUCCESS:
raise CudaError(res)
class Stream(torch._C._CudaStreamBase):
"""Wrapper around a CUDA stream.
Arguments:
device(int, optional): a device on which to allocate the Stream.
priority(int, optional): priority of the stream. Lower numbers
represent higher priorities.
"""
def __new__(cls, device=-1, priority=0, **kwargs):
with torch.cuda.device(device):
return super(Stream, cls).__new__(cls, priority=priority, **kwargs)
def wait_event(self, event):
"""Makes all future work submitted to the stream wait for an event.
Arguments:
event (Event): an event to wait for.
"""
check_error(cudart().cudaStreamWaitEvent(self, event, ctypes.c_int(0)))
def wait_stream(self, stream):
"""Synchronizes with another stream.
All future work submitted to this stream will wait until all kernels
submitted to a given stream at the time of call complete.
Arguments:
stream (Stream): a stream to synchronize.
"""
self.wait_event(stream.record_event())
def record_event(self, event=None):
"""Records an event.
Arguments:
event (Event, optional): event to record. If not given, a new one
will be allocated.
Returns:
Recorded event.
"""
if event is None:
event = Event()
check_error(cudart().cudaEventRecord(event, self))
return event
def query(self):
"""Checks if all the work submitted has been completed.
Returns:
A boolean indicating if all kernels in this stream are completed.
"""
res = cudart().cudaStreamQuery(self)
if res == ERROR_NOT_READY:
return False
check_error(res)
return True
def synchronize(self):
"""Wait for all the kernels in this stream to complete."""
check_error(cudart().cudaStreamSynchronize(self))
@staticmethod
def priority_range():
least_priority = ctypes.c_int()
greatest_priority = ctypes.c_int()
check_error(cudart().cudaDeviceGetStreamPriorityRange(
ctypes.byref(least_priority), ctypes.byref(greatest_priority)))
return (least_priority.value, greatest_priority.value)
@property
def priority(self):
priority = ctypes.c_int()
check_error(cudart().cudaStreamGetPriority(self, ctypes.byref(priority)))
return priority.value
@property
def _as_parameter_(self):
return ctypes.c_void_p(self.cuda_stream)
def __eq__(self, o):
if isinstance(o, Stream):
return o.device == self.device and o.cuda_stream == self.cuda_stream
return False
def __hash__(self):
return hash((self.cuda_stream, self.device))
def __repr__(self):
return ('<torch.cuda.Stream device={0} cuda_stream={1:#x}>'
.format(self.device, self.cuda_stream))
class EventHandle(ctypes.Structure):
IPC_HANDLE_SIZE = 64
_fields_ = [('reserved', ctypes.c_char * IPC_HANDLE_SIZE)]
class Event(object):
"""Wrapper around CUDA event.
Arguments:
enable_timing (bool): indicates if the event should measure time
(default: False)
blocking (bool): if true, :meth:`wait` will be blocking (default: False)
interprocess (bool): if true, the event can be shared between processes
(default: False)
"""
DEFAULT = 0x0
BLOCKING_SYNC = 0x1
DISABLE_TIMING = 0x2
INTERPROCESS = 0x4
def __init__(self, enable_timing=False, blocking=False, interprocess=False,
_handle=None):
flags = Event.DEFAULT
if not enable_timing:
flags |= Event.DISABLE_TIMING
if blocking:
flags |= Event.BLOCKING_SYNC
if interprocess:
flags |= Event.INTERPROCESS
ptr = ctypes.c_void_p()
self._cudart = cudart()
if _handle:
check_error(self._cudart.cudaIpcOpenEventHandle(ctypes.byref(ptr), _handle))
else:
check_error(self._cudart.cudaEventCreateWithFlags(ctypes.byref(ptr), ctypes.c_uint(flags)))
self._as_parameter_ = ptr
def __del__(self):
if hasattr(self, '_as_parameter_'):
check_error(self._cudart.cudaEventDestroy(self._as_parameter_))
del self._as_parameter_
def record(self, stream=None):
"""Records the event in a given stream."""
if stream is None:
stream = torch.cuda.current_stream()
stream.record_event(self)
def wait(self, stream=None):
"""Makes a given stream wait for the event."""
if stream is None:
stream = torch.cuda.current_stream()
stream.wait_event(self)
def query(self):
"""Checks if the event has been recorded.
Returns:
A boolean indicating if the event has been recorded.
"""
res = cudart().cudaEventQuery(self)
if res == ERROR_NOT_READY:
return False
check_error(res)
return True
def elapsed_time(self, end_event):
"""Returns the time elapsed before the event was recorded."""
time_ms = ctypes.c_float()
check_error(cudart().cudaEventElapsedTime(
ctypes.byref(time_ms), self, end_event))
return time_ms.value
def synchronize(self):
"""Synchronizes with the event."""
check_error(cudart().cudaEventSynchronize(self))
def ipc_handle(self):
"""Returns an IPC handle of this event."""
handle = EventHandle()
check_error(cudart().cudaIpcGetEventHandle(ctypes.byref(handle), self))
return handle
def __repr__(self):
return '<torch.cuda.Event {0:#x}>'.format(self._as_parameter_.value)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.SqlDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import sql_dataset_test_base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class SqlDatasetTest(sql_dataset_test_base.SqlDatasetTestBase):
# Test that SqlDataset can read from a database table.
def testReadResultSet(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string), 2)
with self.cached_session() as sess:
for _ in range(2): # Run twice to verify statelessness of db operations.
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC"
})
for _ in range(2): # Dataset is repeated. See setUp.
self.assertEqual((b"John", b"Doe", b"Hi!"), self.evaluate(get_next))
self.assertEqual((b"Jane", b"Moe", b"Hi again!"),
self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that SqlDataset works on a join query.
def testReadResultSetJoinQuery(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT students.first_name, state, motto FROM students "
"INNER JOIN people "
"ON students.first_name = people.first_name "
"AND students.last_name = people.last_name"
})
self.assertEqual((b"John", b"California", b"Hi!"),
self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that SqlDataset can read a database entry with a null-terminator
# in the middle of the text and place the entry in a `string` tensor.
def testReadResultSetNullTerminator(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, favorite_nonsense_word "
"FROM students ORDER BY first_name DESC"
})
self.assertEqual((b"John", b"Doe", b"n\0nsense"), self.evaluate(get_next))
self.assertEqual((b"Jane", b"Moe", b"nonsense\0"),
self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that SqlDataset works when used on two different queries.
# Because the output types of the dataset must be determined at graph-creation
# time, the two queries must have the same number and types of columns.
def testReadResultSetReuseSqlDataset(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", b"Doe", b"Hi!"), self.evaluate(get_next))
self.assertEqual((b"Jane", b"Moe", b"Hi again!"), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name, state FROM people "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", b"Doe", b"California"),
self.evaluate(get_next))
self.assertEqual((b"Benjamin", b"Franklin", b"Pennsylvania"),
self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that an `OutOfRangeError` is raised on the first call to
# `get_next_str_only` if result set is empty.
def testReadEmptyResultSet(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name, motto FROM students "
"WHERE first_name = 'Nonexistent'"
})
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that an error is raised when `driver_name` is invalid.
def testReadResultSetWithInvalidDriverName(self):
init_op = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))[0]
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
init_op,
feed_dict={
self.driver_name: "sqlfake",
self.query: "SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC"
})
# Test that an error is raised when a column name in `query` is nonexistent
def testReadResultSetWithInvalidColumnName(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, fake_column FROM students "
"ORDER BY first_name DESC"
})
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next)
# Test that an error is raised when there is a syntax error in `query`.
def testReadResultSetOfQueryWithSyntaxError(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELEmispellECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC"
})
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next)
# Test that an error is raised when the number of columns in `query`
# does not match the length of `output_types`.
def testReadResultSetWithMismatchBetweenColumnsAndOutputTypes(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name FROM students "
"ORDER BY first_name DESC"
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next)
# Test that no results are returned when `query` is an insert query rather
# than a select query. In particular, the error refers to the number of
# output types passed to the op not matching the number of columns in the
# result set of the query (namely, 0 for an insert statement.)
def testReadResultSetOfInsertQuery(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"INSERT INTO students (first_name, last_name, motto) "
"VALUES ('Foo', 'Bar', 'Baz'), ('Fizz', 'Buzz', 'Fizzbuzz')"
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int8` tensor.
def testReadResultSetInt8(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int8))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), self.evaluate(get_next))
self.assertEqual((b"Jane", 127), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int8` tensor.
def testReadResultSetInt8NegativeAndZero(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int8,
dtypes.int8))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC"
})
self.assertEqual((b"John", 0, -2), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int8` tensor.
def testReadResultSetInt8MaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.int8, dtypes.int8))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT desk_number, favorite_negative_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((9, -2), self.evaluate(get_next))
# Max and min values of int8
self.assertEqual((127, -128), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int16` tensor.
def testReadResultSetInt16(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int16))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), self.evaluate(get_next))
self.assertEqual((b"Jane", 127), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int16` tensor.
def testReadResultSetInt16NegativeAndZero(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int16,
dtypes.int16))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC"
})
self.assertEqual((b"John", 0, -2), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int16` tensor.
def testReadResultSetInt16MaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int16))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC"
})
# Max value of int16
self.assertEqual((b"John", 32767), self.evaluate(get_next))
# Min value of int16
self.assertEqual((b"Jane", -32768), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int32` tensor.
def testReadResultSetInt32(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int32))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), self.evaluate(get_next))
self.assertEqual((b"Jane", 127), self.evaluate(get_next))
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int32` tensor.
def testReadResultSetInt32NegativeAndZero(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int32))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, income FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 0), self.evaluate(get_next))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int32` tensor.
def testReadResultSetInt32MaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int32))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, favorite_number FROM students "
"ORDER BY first_name DESC"
})
# Max value of int32
self.assertEqual((b"John", 2147483647), self.evaluate(get_next))
# Min value of int32
self.assertEqual((b"Jane", -2147483648), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a numeric `varchar` from a SQLite database
# table and place it in an `int32` tensor.
def testReadResultSetInt32VarCharColumnAsInt(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int32))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, school_id FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 123), self.evaluate(get_next))
self.assertEqual((b"Jane", 1000), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in an `int64` tensor.
def testReadResultSetInt64(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int64))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), self.evaluate(get_next))
self.assertEqual((b"Jane", 127), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int64` tensor.
def testReadResultSetInt64NegativeAndZero(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int64))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, income FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 0), self.evaluate(get_next))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int64` tensor.
def testReadResultSetInt64MaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int64))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, favorite_big_number FROM students "
"ORDER BY first_name DESC"
})
# Max value of int64
self.assertEqual((b"John", 9223372036854775807), self.evaluate(get_next))
# Min value of int64
self.assertEqual((b"Jane", -9223372036854775808), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in a `uint8` tensor.
def testReadResultSetUInt8(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.uint8))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), self.evaluate(get_next))
self.assertEqual((b"Jane", 127), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read the minimum and maximum uint8 values from a
# SQLite database table and place them in `uint8` tensors.
def testReadResultSetUInt8MinAndMaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.uint8))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, brownie_points FROM students "
"ORDER BY first_name DESC"
})
# Min value of uint8
self.assertEqual((b"John", 0), self.evaluate(get_next))
# Max value of uint8
self.assertEqual((b"Jane", 255), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in a `uint16` tensor.
def testReadResultSetUInt16(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.uint16))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), self.evaluate(get_next))
self.assertEqual((b"Jane", 127), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read the minimum and maximum uint16 values from a
# SQLite database table and place them in `uint16` tensors.
def testReadResultSetUInt16MinAndMaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.uint16))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, account_balance FROM students "
"ORDER BY first_name DESC"
})
# Min value of uint16
self.assertEqual((b"John", 0), self.evaluate(get_next))
# Max value of uint16
self.assertEqual((b"Jane", 65535), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a 0-valued and 1-valued integer from a
# SQLite database table and place them as `True` and `False` respectively
# in `bool` tensors.
def testReadResultSetBool(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.bool))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, registration_complete FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", True), self.evaluate(get_next))
self.assertEqual((b"Jane", False), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read an integer that is not 0-valued or 1-valued
# from a SQLite database table and place it as `True` in a `bool` tensor.
def testReadResultSetBoolNotZeroOrOne(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.bool))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC"
})
self.assertEqual((b"John", True), self.evaluate(get_next))
self.assertEqual((b"Jane", True), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a float from a SQLite database table
# and place it in a `float64` tensor.
def testReadResultSetFloat64(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.float64))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, victories FROM townspeople "
"ORDER BY first_name"
})
self.assertEqual((b"George", b"Washington", 20.0),
self.evaluate(get_next))
self.assertEqual((b"John", b"Adams", -19.95), self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a float from a SQLite database table beyond
# the precision of 64-bit IEEE, without throwing an error. Test that
# `SqlDataset` identifies such a value as equal to itself.
def testReadResultSetFloat64OverlyPrecise(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.float64))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, accolades FROM townspeople "
"ORDER BY first_name"
})
self.assertEqual(
(b"George", b"Washington",
1331241.321342132321324589798264627463827647382647382643874),
self.evaluate(get_next))
self.assertEqual(
(b"John", b"Adams",
1331241321342132321324589798264627463827647382647382643874.0),
self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Test that `SqlDataset` can read a float from a SQLite database table,
# representing the largest integer representable as a 64-bit IEEE float
# such that the previous integer is also representable as a 64-bit IEEE float.
# Test that `SqlDataset` can distinguish these two numbers.
def testReadResultSetFloat64LargestConsecutiveWholeNumbersNotEqual(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.float64))
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, triumphs FROM townspeople "
"ORDER BY first_name"
})
self.assertNotEqual((b"George", b"Washington", 9007199254740992.0),
self.evaluate(get_next))
self.assertNotEqual((b"John", b"Adams", 9007199254740991.0),
self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
if __name__ == "__main__":
test.main()
|
|
from ..predicate import (KeyIndex, ClassIndex, PredicateRegistry,
match_instance, match_key)
from ..error import RegistrationError
import pytest
def test_key_index_permutations():
i = KeyIndex()
assert list(i.permutations('GET')) == ['GET']
def test_class_index_permutations():
class Foo(object):
pass
class Bar(Foo):
pass
class Qux:
pass
i = ClassIndex()
assert list(i.permutations(Foo)) == [Foo, object]
assert list(i.permutations(Bar)) == [Bar, Foo, object]
assert list(i.permutations(Qux)) == [Qux, object]
def test_multi_class_predicate_permutations():
class ABase(object):
pass
class ASub(ABase):
pass
class BBase(object):
pass
class BSub(BBase):
pass
i = PredicateRegistry(match_instance('a'), match_instance('a'))
assert list(i.permutations([ASub, BSub])) == [
(ASub, BSub),
(ASub, BBase),
(ASub, object),
(ABase, BSub),
(ABase, BBase),
(ABase, object),
(object, BSub),
(object, BBase),
(object, object),
]
def test_multi_key_predicate_permutations():
i = PredicateRegistry(
match_key('a'),
match_key('b'),
match_key('c'),
)
assert list(i.permutations(['A', 'B', 'C'])) == [
('A', 'B', 'C')]
def test_registry_single_key_predicate():
r = PredicateRegistry(match_key('a'))
r.register(('A',), 'A value')
assert r.component(('A',)) == 'A value'
assert r.component(('B',)) is None
assert list(r.all(('A',))) == ['A value']
assert list(r.all(('B',))) == []
def test_registry_single_class_predicate():
r = PredicateRegistry(match_instance('a'))
class Foo(object):
pass
class FooSub(Foo):
pass
class Qux(object):
pass
r.register((Foo,), 'foo')
assert r.component((Foo,)) == 'foo'
assert r.component((FooSub,)) == 'foo'
assert r.component((Qux,)) is None
assert list(r.all((Foo,))) == ['foo']
assert list(r.all((FooSub,))) == ['foo']
assert list(r.all((Qux,))) == []
def test_registry_single_classic_class_predicate():
r = PredicateRegistry(match_instance('a'))
class Foo:
pass
class FooSub(Foo):
pass
class Qux:
pass
r.register((Foo,), 'foo')
assert r.component((Foo,)) == 'foo'
assert r.component((FooSub,)) == 'foo'
assert r.component((Qux,)) is None
assert list(r.all((Foo,))) == ['foo']
assert list(r.all((FooSub,))) == ['foo']
assert list(r.all((Qux,))) == []
def test_registry_single_class_predicate_also_sub():
r = PredicateRegistry(match_instance('a'))
class Foo(object):
pass
class FooSub(Foo):
pass
class Qux(object):
pass
r.register((Foo,), 'foo')
r.register((FooSub,), 'sub')
assert r.component((Foo,)) == 'foo'
assert r.component((FooSub,)) == 'sub'
assert r.component((Qux,)) is None
assert list(r.all((Foo,))) == ['foo']
assert list(r.all((FooSub,))) == ['sub', 'foo']
assert list(r.all((Qux,))) == []
def test_registry_multi_class_predicate():
r = PredicateRegistry(
match_instance('a'),
match_instance('b'),
)
class A(object):
pass
class AA(A):
pass
class B(object):
pass
class BB(B):
pass
r.register((A, B), 'foo')
assert r.component((A, B)) == 'foo'
assert r.component((AA, BB)) == 'foo'
assert r.component((AA, B)) == 'foo'
assert r.component((A, BB)) == 'foo'
assert r.component((A, object)) is None
assert r.component((object, B)) is None
assert list(r.all((A, B))) == ['foo']
assert list(r.all((AA, BB))) == ['foo']
assert list(r.all((AA, B))) == ['foo']
assert list(r.all((A, BB))) == ['foo']
assert list(r.all((A, object))) == []
assert list(r.all((object, B))) == []
def test_registry_multi_mixed_predicate_class_key():
r = PredicateRegistry(
match_instance('a'),
match_key('b'),
)
class A(object):
pass
class AA(A):
pass
class Unknown(object):
pass
r.register((A, 'B'), 'foo')
assert r.component((A, 'B')) == 'foo'
assert r.component((A, 'unknown')) is None
assert r.component((AA, 'B')) == 'foo'
assert r.component((AA, 'unknown')) is None
assert r.component((Unknown, 'B')) is None
assert list(r.all((A, 'B'))) == ['foo']
assert list(r.all((A, 'unknown'))) == []
assert list(r.all((AA, 'B'))) == ['foo']
assert list(r.all((AA, 'unknown'))) == []
assert list(r.all((Unknown, 'B'))) == []
def test_registry_multi_mixed_predicate_key_class():
r = PredicateRegistry(
match_key('a'),
match_instance('b'),
)
class B(object):
pass
class BB(B):
pass
class Unknown(object):
pass
r.register(('A', B), 'foo')
assert r.component(('A', B)) == 'foo'
assert r.component(('A', BB)) == 'foo'
assert r.component(('A', Unknown)) is None
assert r.component(('unknown', Unknown)) is None
assert list(r.all(('A', B))) == ['foo']
assert list(r.all(('A', BB))) == ['foo']
assert list(r.all(('A', Unknown))) == []
assert list(r.all(('unknown', Unknown))) == []
def test_single_predicate_get_key():
def get_key(foo):
return foo
p = match_key('a', get_key)
assert p.get_key({'foo': 'value'}) == 'value'
def test_multi_predicate_get_key():
def a_key(**d):
return d['a']
def b_key(**d):
return d['b']
p = PredicateRegistry(
match_key('a', a_key),
match_key('b', b_key))
assert p.key(a='A', b='B') == ('A', 'B')
def test_single_predicate_fallback():
r = PredicateRegistry(match_key('a', fallback='fallback'))
r.register(('A',), 'A value')
assert r.component(('A',)) == 'A value'
assert r.component(('B',)) is None
assert r.fallback(('B',)) == 'fallback'
def test_multi_predicate_fallback():
r = PredicateRegistry(
match_key('a', fallback='fallback1'),
match_key('b', fallback='fallback2'))
r.register(('A', 'B'), 'value')
assert r.component(('A', 'B')) == 'value'
assert r.component(('A', 'C')) is None
assert r.fallback(('A', 'C')) == 'fallback2'
assert r.component(('C', 'B')) is None
assert r.fallback(('C', 'B')) == 'fallback1'
assert list(r.all(('A', 'B'))) == ['value']
assert list(r.all(('A', 'C'))) == []
assert list(r.all(('C', 'B'))) == []
def test_predicate_self_request():
m = PredicateRegistry(
match_key('a'),
match_key('b', fallback='registered for all'))
m.register(('foo', 'POST'), 'registered for post')
assert m.component(('foo', 'GET')) is None
assert m.fallback(('foo', 'GET')) == 'registered for all'
assert m.component(('foo', 'POST')) == 'registered for post'
assert m.fallback(('foo', 'POST')) is None
assert m.component(('bar', 'GET')) is None
# XXX using an incomplete key returns undefined results
def test_predicate_duplicate_key():
m = PredicateRegistry(
match_key('a'),
match_key('b', fallback='registered for all'))
m.register(('foo', 'POST'), 'registered for post')
with pytest.raises(RegistrationError):
m.register(('foo', 'POST'), 'registered again')
def test_name_request_method_body_model_registered_for_base():
m = PredicateRegistry(
match_key('name', fallback='name fallback'),
match_key('request_method', fallback='request_method fallback'),
match_instance('body_model', fallback='body_model fallback'))
class Foo(object):
pass
class Bar(Foo):
pass
m.register(('foo', 'POST', Foo), 'post foo')
assert m.component(('bar', 'GET', object)) is None
assert m.fallback(('bar', 'GET', object)) == 'name fallback'
assert m.component(('foo', 'GET', object)) is None
assert m.fallback(('foo', 'GET', object)) == 'request_method fallback'
assert m.component(('foo', 'POST', object)) is None
assert m.fallback(('foo', 'POST', object)) == 'body_model fallback'
assert m.component(('foo', 'POST', Foo)) == 'post foo'
assert m.component(('foo', 'POST', Bar)) == 'post foo'
def test_name_request_method_body_model_registered_for_base_and_sub():
m = PredicateRegistry(
match_key('name', fallback='name fallback'),
match_key('request', fallback='request_method fallback'),
match_instance('body_model', fallback='body_model fallback'))
class Foo(object):
pass
class Bar(Foo):
pass
m.register(('foo', 'POST', Foo), 'post foo')
m.register(('foo', 'POST', Bar), 'post bar')
assert m.component(('bar', 'GET', object)) is None
assert m.fallback(('bar', 'GET', object)) == 'name fallback'
assert m.component(('foo', 'GET', object)) is None
assert m.fallback(('foo', 'GET', object)) == 'request_method fallback'
assert m.component(('foo', 'POST', object)) is None
assert m.fallback(('foo', 'POST', object)) == 'body_model fallback'
assert m.component(('foo', 'POST', Foo)) == 'post foo'
assert m.component(('foo', 'POST', Bar)) == 'post bar'
def test_key_by_predicate_name():
p = match_key('foo', default='default')
assert p.key_by_predicate_name({'foo': 'value'}) == 'value'
assert p.key_by_predicate_name({}) == 'default'
def test_multi_key_by_predicate_name():
p = PredicateRegistry(
match_key('foo', default='default foo'),
match_key('bar', default='default bar'))
assert p.key_dict_to_predicate_key(
{'foo': 'FOO', 'bar': 'BAR'}) == ('FOO', 'BAR')
assert p.key_dict_to_predicate_key({}) == ('default foo', 'default bar')
def test_nameless_predicate_key():
p = match_key('a')
assert p.key_by_predicate_name({}) is None
|
|
from __future__ import absolute_import
from collections import namedtuple
from django.conf import settings
from surround.django.logging import setupModuleLogger
setupModuleLogger(globals())
class Result(namedtuple('Result', ['value', 'exception'])):
__slots__ = ()
def return_result(self):
self.throw_if_exception()
return self.value
def return_result_or_none(self):
if not self.success:
return None
return self.value
def throw_if_exception(self):
if self.exception is not None:
raise self.exception
@property
def success(self):
return self.exception is None
def map_state(self, on_success, *exception_clauses):
if self.success:
return on_success
for exceptions, result in exception_clauses:
if isinstance(self.exception, exceptions):
return result
self.throw_if_exception()
class Parameters(namedtuple('Parameters', ['args', 'kwargs'])):
__slots__ = ()
def __str__(self):
return ', '.join(list(map(str, self.args)) + ['%s=%r' % (k, v) for k, v in self.kwargs.items()])
def parameters(*args, **kwargs):
return Parameters(args, kwargs)
def execute(func, parameters):
try:
return Result(func(*parameters.args, **parameters.kwargs), None)
except Exception as e:
# logger.exception('during execution of %s(%s) exception occurred: %s', func, parameters, e)
if settings.SURROUND_EXECUTION_DEBUG:
raise
return Result(None, e)
class ExecutionException(Exception):
pass
class MultiParameters(dict):
def bind(self, name, *args, **kwargs):
self[name] = Parameters(args, kwargs)
def add(self, name, parameters):
self[name] = parameters
class MultiResult(object):
def __init__(self, results):
self.results = results
def __getitem__(self, key):
try:
return self.results[key].return_result()
except KeyError as e:
raise ExecutionException('failed to find key "%s" in %s: %s' % (key, self, e))
def get_result(self, key, throw_if_exception=True):
result = self.results[key]
if throw_if_exception:
result.throw_if_exception()
return result
def items(self):
return self.results.items()
def keys(self):
return self.results.keys()
class LazyFactory(object):
def __init__(self, class_owner, multi_func_name, const_attributes={}):
self.class_owner = class_owner
self.multi_func_name = multi_func_name
self.const_attributes = const_attributes
@property
def multi_func(self):
return getattr(self.class_owner, self.multi_func_name)
def __str__(self):
return '%s.%s.%s' % (self.class_owner.__module__, self.class_owner.__name__, self.multi_func_name)
def __call__(self, *args, **kwargs):
return LazyObject(self, Parameters(args, kwargs), self.const_attributes)
class LazyObject(object):
def __init__(self, factory, parameters, const_attributes):
self._factory = factory
self._parameters = parameters
self._execution_result = None
self._const_attributes = const_attributes
for k, v in parameters.kwargs.items():
setattr(self, k, v)
for k, v in const_attributes.items():
setattr(self, k, v)
@property
def _filled(self):
return self._execution_result is not None
def _fill(self, value):
self._execution_result = value
@property
def _auto_filled(self):
if not self._filled:
# import traceback ; traceback.print_stack()
self._fill(self._factory.multi_func.single(self._parameters))
return self._execution_result.return_result()
@property
def _filled_value(self):
return self._execution_result.return_result()
@property
def _success(self):
return self._execution_result.success
def __getattr__(self, name):
return getattr(self._auto_filled, name)
def __str__(self):
return 'LazyObject(%s, %s, %s)' % (self._factory, self._parameters, 'filled' if self._filled else 'empty')
__repr__ = __str__
def __reduce__(self):
return (self.__class__, (self._factory, self._parameters, self._const_attributes))
def __dir__(self):
return dir(self._auto_filled)
def multi_lazy_resolve(lazy_objects, final_throw_if_any=True, accumulate_successes=False):
multi_parameters = MultiParameters()
multi_func = None
not_filled = []
not_filled_number = 0
if accumulate_successes:
successes = []
else:
successes = None
for lazy in lazy_objects:
if lazy._filled:
if accumulate_successes:
successes.append(lazy._filled_value)
continue
not_filled.append(lazy)
multi_parameters.add(not_filled_number, lazy._parameters)
not_filled_number += 1
next_multi_func = lazy._factory.multi_func
if multi_func is None:
multi_func = next_multi_func
else:
if multi_func != next_multi_func:
raise ExecutionException('inconsistent multi functions stored in lazy objects')
if multi_func is None:
return successes
multi_results = multi_func.multi(multi_parameters)
for num, lazy in enumerate(not_filled):
result = multi_results.get_result(num, throw_if_exception=False)
lazy._fill(result)
if result.success:
if accumulate_successes:
successes.append(lazy._filled_value)
else:
if final_throw_if_any:
result.throw_if_exception()
return successes
|
|
# -*- coding: utf-8 -*-
"""
Objects representing objects used with ProofreadPage Extension.
The extension is supported by MW 1.21+.
This module includes objects:
* ProofreadPage(Page)
* FullHeader
"""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import re
import json
import pywikibot
class FullHeader(object):
"""Header of a ProofreadPage object."""
p_header = re.compile(
r'<pagequality level="(?P<ql>\d)" user="(?P<user>.*?)" />'
r'<div class="pagetext">(?P<header>.*)',
re.DOTALL)
_template = ('<pagequality level="{0.ql}" user="{0.user}" />'
'<div class="pagetext">{0.header}\n\n\n')
def __init__(self, text=None):
"""Constructor."""
self._text = text or ''
m = self.p_header.search(self._text)
if m:
self.ql = int(m.group('ql'))
self.user = m.group('user')
self.header = m.group('header')
else:
self.ql = ProofreadPage.NOT_PROOFREAD
self.user = ''
self.header = ''
def __str__(self):
"""Return a string representation."""
return self._template.format(self)
class ProofreadPage(pywikibot.Page):
"""ProofreadPage page used in Mediawiki ProofreadPage extension."""
WITHOUT_TEXT = 0
NOT_PROOFREAD = 1
PROBLEMATIC = 2
PROOFREAD = 3
VALIDATED = 4
open_tag = '<noinclude>'
close_tag = '</noinclude>'
p_open = re.compile(r'<noinclude>')
p_close = re.compile(r'(</div>|\n\n\n)?</noinclude>')
def __init__(self, source, title=''):
"""Instantiate a ProofreadPage object.
Raises UnknownExtension if source Site has no ProofreadPage Extension.
"""
if not isinstance(source, pywikibot.site.BaseSite):
site = source.site
else:
site = source
ns = site.proofread_page_ns
super(ProofreadPage, self).__init__(source, title, ns=ns)
if self.namespace() != site.proofread_page_ns:
raise ValueError('Page %s must belong to %s namespace'
% (self.title(), ns))
def decompose(fn):
"""Decorator.
Decompose text if needed and recompose text.
"""
def wrapper(obj, *args, **kwargs):
if not hasattr(obj, '_full_header'):
obj._decompose_page()
_res = fn(obj, *args, **kwargs)
obj._compose_page()
return _res
return wrapper
@property
@decompose
def ql(self):
"""Return page quality level."""
return self._full_header.ql
@ql.setter
@decompose
def ql(self, value):
"""Set page quality level."""
if value not in self.site.proofread_levels:
raise ValueError('Not valid QL value: %s (legal values: %s)'
% (value, self.site.proofread_levels))
# TODO: add logic to validate ql value change, considering
# site.proofread_levels.
self._full_header.ql = value
@property
@decompose
def user(self):
"""Return user in page header."""
return self._full_header.user
@user.setter
@decompose
def user(self, value):
"""Set user in page header."""
self._full_header.user = value
@property
@decompose
def status(self):
"""Return Proofread Page status."""
try:
return self.site.proofread_levels[self.ql]
except KeyError:
pywikibot.warning('Not valid status set for %s: quality level = %s'
% (self.title(asLink=True), self.ql))
return None
def without_text(self):
"""Set Page QL to "Without text"."""
self.ql = self.WITHOUT_TEXT
def problematic(self):
"""Set Page QL to "Problematic"."""
self.ql = self.PROBLEMATIC
def not_proofread(self):
"""Set Page QL to "Not Proofread"."""
self.ql = self.NOT_PROOFREAD
def proofread(self):
"""Set Page QL to "Proofread"."""
# TODO: check should be made to be consistent with Proofread Extension
self.ql = self.PROOFREAD
def validate(self):
"""Set Page QL to "Validated"."""
# TODO: check should be made to be consistent with Proofread Extension
self.ql = self.VALIDATED
@property
@decompose
def header(self):
"""Return editable part of Page header."""
return self._full_header.header
@header.setter
@decompose
def header(self, value):
"""Set editable part of Page header."""
self._full_header.header = value
@property
@decompose
def body(self):
"""Return Page body."""
return self._body
@body.setter
@decompose
def body(self, value):
"""Set Page body."""
self._body = value
@property
@decompose
def footer(self):
"""Return Page footer."""
return self._footer
@footer.setter
@decompose
def footer(self, value):
"""Set Page footer."""
self._footer = value
def _create_empty_page(self):
"""Create empty page."""
self._full_header = FullHeader()
self._body = ''
self._footer = ''
self.user = self.site.username() # Fill user field in empty header.
self._compose_page()
@property
def text(self):
"""Override text property.
Preload text returned by EditFormPreloadText to preload non-existing
pages.
"""
# Text is already cached.
if hasattr(self, '_text'):
return self._text
# If page does not exist, preload it
if self.exists():
# If page exists, load it
super(ProofreadPage, self).text
else:
self._text = self.preloadText()
self.user = self.site.username() # Fill user field in empty header.
return self._text
@text.setter
def text(self, value):
"""Update current text.
Mainly for use within the class, called by other methods.
Use self.header, self.body and self.footer to set page content,
@param value: New value or None
@param value: basestring
Raises:
exception Error: the page is not formatted according to ProofreadPage
extension.
"""
self._text = value
if self._text:
self._decompose_page()
else:
self._create_empty_page()
@text.deleter
def text(self):
"""Delete current text."""
if hasattr(self, '_text'):
del self._text
def _decompose_page(self):
"""Split Proofread Page text in header, body and footer.
Raises:
exception Error: the page is not formatted according to ProofreadPage
extension.
"""
if not self.text: # Property force page text loading.
self._create_empty_page()
return
open_queue = list(self.p_open.finditer(self._text))
close_queue = list(self.p_close.finditer(self._text))
len_oq = len(open_queue)
len_cq = len(close_queue)
if (len_oq != len_cq) or (len_oq < 2 or len_cq < 2):
raise pywikibot.Error('ProofreadPage %s: invalid format'
% self.title(asLink=True))
f_open, f_close = open_queue[0], close_queue[0]
self._full_header = FullHeader(self._text[f_open.end():f_close.start()])
l_open, l_close = open_queue[-1], close_queue[-1]
self._footer = self._text[l_open.end():l_close.start()]
self._body = self._text[f_close.end():l_open.start()]
def _compose_page(self):
"""Compose Proofread Page text from header, body and footer."""
fmt = ('{0.open_tag}{0._full_header}{0.close_tag}'
'{0._body}'
'{0.open_tag}{0._footer}</div>{0.close_tag}')
self._text = fmt.format(self)
return self._text
def _page_to_json(self):
"""Convert page text to json format.
This is the format accepted by action=edit specifying
contentformat=application/json. This format is recommended to save the
page, as it is not subject to possible errors done in composing the
wikitext header and footer of the page or changes in the ProofreadPage
extension format.
"""
page_dict = {'header': self.header,
'body': self.body,
'footer': self.footer,
'level': {'level': self.ql, 'user': self.user},
}
# ensure_ascii=False returns a unicode
return json.dumps(page_dict, ensure_ascii=False)
def save(self, *args, **kwargs): # see Page.save()
"""Save page content after recomposing the page."""
summary = kwargs.pop('summary', '')
summary = self.pre_summary + summary
# Save using contentformat='application/json'
kwargs['contentformat'] = 'application/json'
kwargs['contentmodel'] = 'proofread-page'
text = self._page_to_json()
super(ProofreadPage, self).save(*args, text=text, summary=summary,
**kwargs)
@property
def pre_summary(self):
"""Return trailing part of edit summary.
The edit summary shall be appended to pre_summary to highlight
Status in the edit summary on wiki.
"""
return '/* {0.status} */ '.format(self)
|
|
# Performs extraction of data that matches extraction rules.
# This is automatically invoked by core.module code if extraction has been
# enabled by the user; other modules need not reference this module directly.
import os
import re
import sys
import stat
import shlex
import tempfile
import subprocess
import binwalk.core.common
from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg
from binwalk.core.common import file_size, file_md5, unique_file_name, BlockFile
class ExtractDetails(object):
def __init__(self, **kwargs):
for (k, v) in iterator(kwargs):
setattr(self, k, v)
class ExtractInfo(object):
def __init__(self):
self.carved = {}
self.extracted = {}
self.directory = None
class Extractor(Module):
'''
Extractor class, responsible for extracting files from the target file and executing external applications, if requested.
'''
# Extract rules are delimited with a colon.
# <case insensitive matching string>:<file extension>[:<command to run>]
RULE_DELIM = ':'
# Comments in the extract.conf files start with a pound
COMMENT_DELIM = '#'
# Place holder for the extracted file name in the command
FILE_NAME_PLACEHOLDER = '%e'
# Unique path delimiter, used for generating unique output file/directory names.
# Useful when, for example, extracting two squashfs images (squashfs-root,
# squashfs-root-0).
UNIQUE_PATH_DELIMITER = '%%'
TITLE = 'Extraction'
ORDER = 9
PRIMARY = False
CLI = [
Option(short='e',
long='extract',
kwargs={'load_default_rules': True, 'enabled': True},
description='Automatically extract known file types'),
Option(short='D',
long='dd',
type=list,
dtype='type:ext:cmd',
kwargs={'manual_rules': [], 'enabled': True},
description='Extract <type> signatures, give the files an extension of <ext>, and execute <cmd>'),
Option(short='M',
long='matryoshka',
kwargs={'matryoshka': 8},
description='Recursively scan extracted files'),
Option(short='d',
long='depth',
type=int,
kwargs={'matryoshka': 0},
description='Limit matryoshka recursion depth (default: 8 levels deep)'),
Option(short='C',
long='directory',
type=str,
kwargs={'base_directory': 0},
description='Extract files/folders to a custom directory (default: current working directory)'),
Option(short='j',
long='size',
type=int,
kwargs={'max_size': 0},
description='Limit the size of each extracted file'),
Option(short='n',
long='count',
type=int,
kwargs={'max_count': 0},
description='Limit the number of extracted files'),
#Option(short='u',
# long='limit',
# type=int,
# kwargs={'recursive_max_size': 0},
# description="Limit the total size of all extracted files"),
Option(short='r',
long='rm',
kwargs={'remove_after_execute': True},
description='Delete carved files after extraction'),
Option(short='z',
long='carve',
kwargs={'run_extractors': False},
description="Carve data from files, but don't execute extraction utilities"),
Option(short='V',
long='subdirs',
kwargs={'extract_into_subdirs': True},
description="Extract into sub-directories named by the offset"),
]
KWARGS = [
Kwarg(name='max_size', default=None),
Kwarg(name='recursive_max_size', default=None),
Kwarg(name='max_count', default=None),
Kwarg(name='base_directory', default=None),
Kwarg(name='remove_after_execute', default=False),
Kwarg(name='load_default_rules', default=False),
Kwarg(name='run_extractors', default=True),
Kwarg(name='extract_into_subdirs', default=False),
Kwarg(name='manual_rules', default=[]),
Kwarg(name='matryoshka', default=0),
Kwarg(name='enabled', default=False),
]
def load(self):
# Holds a list of extraction rules loaded either from a file or when
# manually specified.
self.extract_rules = []
# The input file specific output directory path (default to CWD)
if self.base_directory:
self.directory = os.path.realpath(self.base_directory)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
else:
self.directory = os.getcwd()
# Key value pairs of input file path and output extraction path
self.output = {}
# Number of extracted files
self.extraction_count = 0
# Override the directory name used for extraction output directories
self.output_directory_override = None
if self.load_default_rules:
self.load_defaults()
for manual_rule in self.manual_rules:
self.add_rule(manual_rule)
if self.matryoshka:
self.config.verbose = True
def add_pending(self, f):
# Ignore symlinks
if os.path.islink(f):
return
# Get the file mode to check and see if it's a block/char device
try:
file_mode = os.stat(f).st_mode
except OSError as e:
return
# Only add this to the pending list of files to scan
# if the file is a regular file. Special files (block/character
# devices) can be tricky; they may fail to open, or worse, simply
# hang when an attempt to open them is made. So for recursive
# extraction purposes, they are ignored, albeit with a warning to
# the user.
if stat.S_ISREG(file_mode):
# Make sure we can open the file too...
try:
fp = binwalk.core.common.BlockFile(f)
fp.close()
self.pending.append(f)
except IOError as e:
binwalk.core.common.warning("Ignoring file '%s': %s" % (f, str(e)))
else:
binwalk.core.common.warning("Ignoring file '%s': Not a regular file" % f)
def reset(self):
# Holds a list of pending files that should be scanned; only populated
# if self.matryoshka == True
self.pending = []
# Holds a dictionary of extraction directories created for each scanned
# file.
self.extraction_directories = {}
# Holds a dictionary of the last directory listing for a given directory; used for identifying
# newly created/extracted files that need to be appended to
# self.pending.
self.last_directory_listing = {}
def callback(self, r):
# Make sure the file attribute is set to a compatible instance of
# binwalk.core.common.BlockFile
try:
r.file.size
except KeyboardInterrupt as e:
pass
except Exception as e:
return
if not r.size:
size = r.file.size - r.offset
else:
size = r.size
# Only extract valid results that have been marked for extraction and displayed to the user.
# Note that r.display is still True even if --quiet has been specified; it is False if the result has been
# explicitly excluded via the -y/-x options.
if r.valid and r.extract and r.display and (not self.max_count or self.extraction_count < self.max_count):
# Create some extract output for this file, it it doesn't already
# exist
if not binwalk.core.common.has_key(self.output, r.file.path):
self.output[r.file.path] = ExtractInfo()
# Attempt extraction
binwalk.core.common.debug("Extractor callback for %s @%d [%s]" % (r.file.name,
r.offset,
r.description))
(extraction_directory, dd_file, scan_extracted_files, extraction_utility) = self.extract(r.offset,
r.description,
r.file.path,
size,
r.name)
# If the extraction was successful, self.extract will have returned
# the output directory and name of the dd'd file
if extraction_directory and dd_file:
# Track the number of extracted files
self.extraction_count += 1
# Get the full path to the dd'd file and save it in the output
# info for this file
dd_file_path = os.path.join(extraction_directory, dd_file)
self.output[r.file.path].carved[r.offset] = dd_file_path
self.output[r.file.path].extracted[r.offset] = ExtractDetails(files=[], command=extraction_utility)
# Do a directory listing of the output directory
directory_listing = set(os.listdir(extraction_directory))
# If this is a newly created output directory, self.last_directory_listing won't have a record of it.
# If we've extracted other files to this directory before, it
# will.
if not has_key(self.last_directory_listing, extraction_directory):
self.last_directory_listing[extraction_directory] = set()
# Loop through a list of newly created files (i.e., files that
# weren't listed in the last directory listing)
for f in directory_listing.difference(self.last_directory_listing[extraction_directory]):
# Build the full file path and add it to the extractor
# results
file_path = os.path.join(extraction_directory, f)
real_file_path = os.path.realpath(file_path)
self.result(description=file_path, display=False)
# Also keep a list of files created by the extraction
# utility
if real_file_path != dd_file_path:
self.output[r.file.path].extracted[r.offset].files.append(real_file_path)
# If recursion was specified, and the file is not the same
# one we just dd'd
if (self.matryoshka and
file_path != dd_file_path and
scan_extracted_files and
self.directory in real_file_path):
# If the recursion level of this file is less than or
# equal to our desired recursion level
if len(real_file_path.split(self.directory)[1].split(os.path.sep)) <= self.matryoshka:
# If this is a directory and we are supposed to process directories for this extractor,
# then add all files under that directory to the
# list of pending files.
if os.path.isdir(file_path):
for root, dirs, files in os.walk(file_path):
for f in files:
full_path = os.path.join(root, f)
self.add_pending(full_path)
# If it's just a file, it to the list of pending
# files
else:
self.add_pending(file_path)
# Update the last directory listing for the next time we
# extract a file to this same output directory
self.last_directory_listing[
extraction_directory] = directory_listing
def append_rule(self, r):
self.extract_rules.append(r.copy())
def prepend_rule(self, r):
self.extract_rules = [r] + self.extract_rules
def add_rule(self, txtrule=None, regex=None, extension=None, cmd=None, codes=[0, None], recurse=True, prepend=False):
rules = self.create_rule(txtrule, regex, extension, cmd, codes, recurse)
for r in rules:
if prepend:
self.prepend_rule(r)
else:
self.append_rule(r)
def create_rule(self, txtrule=None, regex=None, extension=None, cmd=None, codes=[0, None], recurse=True):
'''
Adds a set of rules to the extraction rule list.
@txtrule - Rule string, or list of rule strings, in the format <regular expression>:<file extension>[:<command to run>]
@regex - If rule string is not specified, this is the regular expression string to use.
@extension - If rule string is not specified, this is the file extension to use.
@cmd - If rule string is not specified, this is the command to run.
Alternatively a callable object may be specified, which will be passed one argument: the path to the file to extract.
@codes - A list of valid return codes for the extractor.
@recurse - If False, extracted directories will not be recursed into when the matryoshka option is enabled.
Returns None.
'''
rules = []
created_rules = []
match = False
r = {
'extension': '',
'cmd': '',
'regex': None,
'codes': codes,
'recurse': recurse,
}
# Process single explicitly specified rule
if not txtrule and regex and extension:
r['extension'] = extension
r['regex'] = re.compile(regex)
if cmd:
r['cmd'] = cmd
return [r]
# Process rule string, or list of rule strings
if not isinstance(txtrule, type([])):
rules = [txtrule]
else:
rules = txtrule
for rule in rules:
r['cmd'] = ''
r['extension'] = ''
try:
values = self._parse_rule(rule)
match = values[0]
r['regex'] = re.compile(values[0])
r['extension'] = values[1]
r['cmd'] = values[2]
r['codes'] = values[3]
r['recurse'] = values[4]
except KeyboardInterrupt as e:
raise e
except Exception:
pass
# Verify that the match string was retrieved.
if match:
created_rules.append(r)
return created_rules
def remove_rules(self, description):
'''
Remove all rules that match a specified description.
@description - The description to match against.
Returns the number of rules removed.
'''
rm = []
description = description.lower()
for i in range(0, len(self.extract_rules)):
if self.extract_rules[i]['regex'].search(description):
rm.append(i)
for i in rm:
self.extract_rules.pop(i)
return len(rm)
def edit_rules(self, description, key, value):
'''
Edit all rules that match a specified description.
@description - The description to match against.
@key - The key to change for each matching rule.
@value - The new key value for each matching rule.
Returns the number of rules modified.
'''
count = 0
description = description.lower()
for i in range(0, len(self.extract_rules)):
if self.extract_rules[i]['regex'].search(description):
if has_key(self.extract_rules[i], key):
self.extract_rules[i][key] = value
count += 1
return count
def clear_rules(self):
'''
Deletes all extraction rules.
Returns None.
'''
self.extract_rules = []
def get_rules(self, description=None):
'''
Returns a list of extraction rules that match a given description.
@description - The description to match against.
Returns a list of extraction rules that match the given description.
If no description is provided, a list of all rules are returned.
'''
if description:
rules = []
description = description.lower()
for i in range(0, len(self.extract_rules)):
if self.extract_rules[i]['regex'].search(description):
rules.append(self.extract_rules[i])
else:
rules = self.extract_rules
return rules
def load_from_file(self, fname):
'''
Loads extraction rules from the specified file.
@fname - Path to the extraction rule file.
Returns None.
'''
try:
# Process each line from the extract file, ignoring comments
with open(fname, 'r') as f:
for rule in f.readlines():
self.add_rule(rule.split(self.COMMENT_DELIM, 1)[0])
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise Exception("Extractor.load_from_file failed to load file '%s': %s" % (fname, str(e)))
def load_defaults(self):
'''
Loads default extraction rules from the user and system extract.conf files.
Returns None.
'''
# Load the user extract file first to ensure its rules take precedence.
extract_files = [
self.config.settings.user.extract,
self.config.settings.system.extract,
]
for extract_file in extract_files:
if extract_file:
try:
self.load_from_file(extract_file)
except KeyboardInterrupt as e:
raise e
except Exception as e:
if binwalk.core.common.DEBUG:
raise Exception("Extractor.load_defaults failed to load file '%s': %s" % (extract_file, str(e)))
def get_output_directory_override(self):
'''
Returns the current output directory basename override value.
'''
return self.output_directory_override
def override_output_directory_basename(self, dirname):
'''
Allows the overriding of the default extraction directory basename.
@dirname - The directory base name to use.
Returns the current output directory basename override value.
'''
self.output_directory_override = dirname
return self.output_directory_override
def build_output_directory(self, path):
'''
Set the output directory for extracted files.
@path - The path to the file that data will be extracted from.
Returns None.
'''
# If we have not already created an output directory for this target
# file, create one now
if not has_key(self.extraction_directories, path):
basedir = os.path.dirname(path)
basename = os.path.basename(path)
if basedir != self.directory:
# During recursive extraction, extracted files will be in subdirectories
# of the CWD. This allows us to figure out the subdirectory by simply
# splitting the target file's base directory on our known CWD.
#
# However, the very *first* file being scanned is not necessarily in the
# CWD, so this will raise an IndexError. This is easy to handle though,
# since the very first file being scanned needs to have its contents
# extracted to ${CWD}/_basename.extracted, so we just set the subdir
# variable to a blank string when an IndexError is encountered.
try:
subdir = basedir.split(self.directory)[1][1:]
except IndexError as e:
subdir = ""
else:
subdir = ""
if self.output_directory_override:
output_directory = os.path.join(self.directory, subdir, self.output_directory_override)
else:
outdir = os.path.join(self.directory, subdir, '_' + basename)
output_directory = unique_file_name(outdir, extension='extracted')
if not os.path.exists(output_directory):
os.mkdir(output_directory)
self.extraction_directories[path] = output_directory
self.output[path].directory = os.path.realpath(output_directory) + os.path.sep
# Else, just use the already created directory
else:
output_directory = self.extraction_directories[path]
return output_directory
def cleanup_extracted_files(self, tf=None):
'''
Set the action to take after a file is extracted.
@tf - If set to True, extracted files will be cleaned up after running a command against them.
If set to False, extracted files will not be cleaned up after running a command against them.
If set to None or not specified, the current setting will not be changed.
Returns the current cleanup status (True/False).
'''
if tf is not None:
self.remove_after_execute = tf
return self.remove_after_execute
def extract(self, offset, description, file_name, size, name=None):
'''
Extract an embedded file from the target file, if it matches an extract rule.
Called automatically by Binwalk.scan().
@offset - Offset inside the target file to begin the extraction.
@description - Description of the embedded file to extract, as returned by libmagic.
@file_name - Path to the target file.
@size - Number of bytes to extract.
@name - Name to save the file as.
Returns the name of the extracted file (blank string if nothing was extracted).
'''
fname = ''
rule = None
recurse = False
original_dir = os.getcwd()
rules = self.match(description)
file_path = os.path.realpath(file_name)
# No extraction rules for this file
if not rules:
return (None, None, False, str(None))
else:
binwalk.core.common.debug("Found %d matching extraction rules" % len(rules))
# Generate the output directory name where extracted files will be
# stored
output_directory = self.build_output_directory(file_name)
# Extract to end of file if no size was specified
if not size:
size = file_size(file_path) - offset
if os.path.isfile(file_path):
os.chdir(output_directory)
# Extract into subdirectories named by the offset
if self.extract_into_subdirs:
# Remove trailing L that is added by hex()
offset_dir = "0x%X" % offset
os.mkdir(offset_dir)
os.chdir(offset_dir)
# Loop through each extraction rule until one succeeds
for i in range(0, len(rules)):
rule = rules[i]
# Make sure we don't recurse into any extracted directories if
# instructed not to
if rule['recurse'] in [True, False]:
recurse = rule['recurse']
else:
recurse = True
# Copy out the data to disk, if we haven't already
fname = self._dd(file_path, offset, size, rule['extension'], output_file_name=name)
# If there was a command specified for this rule, try to execute it.
# If execution fails, the next rule will be attempted.
if rule['cmd']:
# Note the hash of the original file; if --rm is specified and the
# extraction utility modifies the original file rather than creating
# a new one (AFAIK none currently do, but could happen in the future),
# we don't want to remove this file.
if self.remove_after_execute:
fname_md5 = file_md5(fname)
# Execute the specified command against the extracted file
if self.run_extractors:
extract_ok = self.execute(rule['cmd'], fname, rule['codes'])
else:
extract_ok = True
# Only clean up files if remove_after_execute was specified.
# Only clean up files if the file was extracted sucessfully, or if we've run
# out of extractors.
if self.remove_after_execute and (extract_ok == True or i == (len(rules) - 1)):
# Remove the original file that we extracted,
# if it has not been modified by the extractor.
try:
if file_md5(fname) == fname_md5:
os.unlink(fname)
except KeyboardInterrupt as e:
raise e
except Exception as e:
pass
# If the command executed OK, don't try any more rules
if extract_ok == True:
break
# Else, remove the extracted file if this isn't the last rule in the list.
# If it is the last rule, leave the file on disk for the
# user to examine.
elif i != (len(rules) - 1):
try:
os.unlink(fname)
except KeyboardInterrupt as e:
raise e
except Exception as e:
pass
# If there was no command to execute, just use the first rule
else:
break
os.chdir(original_dir)
if rule is not None:
return (output_directory, fname, recurse, str(rule['cmd']))
else:
return (output_directory, fname, recurse, '')
def _entry_offset(self, index, entries, description):
'''
Gets the offset of the first entry that matches the description.
@index - Index into the entries list to begin searching.
@entries - Dictionary of result entries.
@description - Case insensitive description.
Returns the offset, if a matching description is found.
Returns -1 if a matching description is not found.
'''
description = description.lower()
for (offset, infos) in entries[index:]:
for info in infos:
if info['description'].lower().startswith(description):
return offset
return -1
def match(self, description):
'''
Check to see if the provided description string matches an extract rule.
Called internally by self.extract().
@description - Description string to check.
Returns the associated rule dictionary if a match is found.
Returns None if no match is found.
'''
rules = []
ordered_rules = []
description = description.lower()
for rule in self.extract_rules:
if rule['regex'].search(description):
rules.append(rule)
# Plugin rules should take precedence over external extraction commands.
for rule in rules:
if callable(rule['cmd']):
ordered_rules.append(rule)
for rule in rules:
if not callable(rule['cmd']):
ordered_rules.append(rule)
return ordered_rules
def _parse_rule(self, rule):
'''
Parses an extraction rule.
@rule - Rule string.
Returns an array of ['<case insensitive matching string>', '<file extension>', '<command to run>', '<comma separated return codes>', <recurse into extracted directories: True|False>].
'''
values = rule.strip().split(self.RULE_DELIM, 4)
if len(values) >= 4:
codes = values[3].split(',')
for i in range(0, len(codes)):
try:
codes[i] = int(codes[i], 0)
except ValueError as e:
binwalk.core.common.warning("The specified return code '%s' for extractor '%s' is not a valid number!" % (codes[i], values[0]))
values[3] = codes
if len(values) >= 5:
values[4] = (values[4].lower() == 'true')
return values
def _dd(self, file_name, offset, size, extension, output_file_name=None):
'''
Extracts a file embedded inside the target file.
@file_name - Path to the target file.
@offset - Offset inside the target file where the embedded file begins.
@size - Number of bytes to extract.
@extension - The file exension to assign to the extracted file on disk.
@output_file_name - The requested name of the output file.
Returns the extracted file name.
'''
total_size = 0
# Default extracted file name is <displayed hex offset>.<extension>
default_bname = "%X" % (offset + self.config.base)
if self.max_size and size > self.max_size:
size = self.max_size
if not output_file_name or output_file_name is None:
bname = default_bname
else:
# Strip the output file name of invalid/dangerous characters (like
# file paths)
bname = os.path.basename(output_file_name)
fname = unique_file_name(bname, extension)
try:
# If byte swapping is enabled, we need to start reading at a swap-size
# aligned offset, then index in to the read data appropriately.
if self.config.swap_size:
adjust = offset % self.config.swap_size
else:
adjust = 0
offset -= adjust
# Open the target file and seek to the offset
fdin = self.config.open_file(file_name)
fdin.seek(offset)
# Open the output file
try:
fdout = BlockFile(fname, 'w')
except KeyboardInterrupt as e:
raise e
except Exception as e:
# Fall back to the default name if the requested name fails
fname = unique_file_name(default_bname, extension)
fdout = BlockFile(fname, 'w')
while total_size < size:
(data, dlen) = fdin.read_block()
if dlen < 1:
break
else:
total_size += (dlen - adjust)
if total_size > size:
dlen -= (total_size - size)
fdout.write(str2bytes(data[adjust:dlen]))
adjust = 0
# Cleanup
fdout.close()
fdin.close()
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise Exception("Extractor.dd failed to extract data from '%s' to '%s': %s" %
(file_name, fname, str(e)))
binwalk.core.common.debug("Carved data block 0x%X - 0x%X from '%s' to '%s'" %
(offset, offset + size, file_name, fname))
return fname
def execute(self, cmd, fname, codes=[0, None]):
'''
Execute a command against the specified file.
@cmd - Command to execute.
@fname - File to run command against.
@codes - List of return codes indicating cmd success.
Returns True on success, False on failure, or None if the external extraction utility could not be found.
'''
tmp = None
rval = 0
retval = True
binwalk.core.common.debug("Running extractor '%s'" % str(cmd))
try:
if callable(cmd):
try:
retval = cmd(fname)
except KeyboardInterrupt as e:
raise e
except Exception as e:
binwalk.core.common.warning("Internal extractor '%s' failed with exception: '%s'" % (str(cmd), str(e)))
elif cmd:
# If not in debug mode, create a temporary file to redirect
# stdout and stderr to
if not binwalk.core.common.DEBUG:
tmp = tempfile.TemporaryFile()
# Generate unique file paths for all paths in the current
# command that are surrounded by UNIQUE_PATH_DELIMITER
while self.UNIQUE_PATH_DELIMITER in cmd:
need_unique_path = cmd.split(self.UNIQUE_PATH_DELIMITER)[
1].split(self.UNIQUE_PATH_DELIMITER)[0]
unique_path = binwalk.core.common.unique_file_name(need_unique_path)
cmd = cmd.replace(self.UNIQUE_PATH_DELIMITER + need_unique_path + self.UNIQUE_PATH_DELIMITER, unique_path)
# Execute.
for command in cmd.split("&&"):
# Replace all instances of FILE_NAME_PLACEHOLDER in the
# command with fname
command = command.strip().replace(self.FILE_NAME_PLACEHOLDER, fname)
binwalk.core.common.debug("subprocess.call(%s, stdout=%s, stderr=%s)" % (command, str(tmp), str(tmp)))
rval = subprocess.call(shlex.split(command), stdout=tmp, stderr=tmp)
if rval in codes:
retval = True
else:
retval = False
binwalk.core.common.debug('External extractor command "%s" completed with return code %d (success: %s)' % (cmd, rval, str(retval)))
# TODO: Should errors from all commands in a command string be checked? Currently we only support
# specifying one set of error codes, so at the moment, this is not done; it is up to the
# final command to return success or failure (which presumably it will if previous necessary
# commands were not successful, but this is an assumption).
# if retval == False:
# break
except KeyboardInterrupt as e:
raise e
except Exception as e:
binwalk.core.common.warning("Extractor.execute failed to run external extractor '%s': %s, '%s' might not be installed correctly" % (str(cmd), str(e), str(cmd)))
retval = None
if tmp is not None:
tmp.close()
return retval
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SM3 optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import tensorflow.compat.v1 as tf
from sm3 import sm3
class SM3Test(tf.test.TestCase):
def setUp(self):
super(SM3Test, self).setUp()
self._learning_rate = 0.1
self._momentum = 0.9
def testDenseScalarLayer(self):
"""Test a single dense scalar layer."""
with self.cached_session() as sess:
var = tf.Variable(0.5)
grad_np = 0.1
grad = tf.Variable(grad_np)
opt = sm3.SM3Optimizer(
learning_rate=self._learning_rate, momentum=self._momentum)
step = opt.apply_gradients([(grad, var)])
sess.run(tf.global_variables_initializer())
# Check that variable and momentum are as expected before starting
# training.
var_np = sess.run(var)
gbar_np = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(0.5, var_np)
self.assertAllClose(0.0, gbar_np)
accumulator = numpy.zeros_like(gbar_np)
for _ in range(2):
# Run a step of training.
step.run()
# Expected preconditioned gradient, momentum, and parameter.
accumulator += numpy.square(grad_np)
exp_p_grad = grad_np / numpy.sqrt(accumulator)
exp_gbar_np = (
self._momentum * gbar_np + (1 - self._momentum) * exp_p_grad)
exp_var = var_np - self._learning_rate * exp_gbar_np
# Check that variable and momentum are as expected after one step of
# training.
var_np = sess.run(var)
gbar_np = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(exp_var, var_np)
self.assertAllClose(exp_gbar_np, gbar_np)
def testDenseVectorLayer(self):
"""Test a single dense vector layer."""
with self.cached_session() as sess:
var = tf.Variable([0.5, 0.3])
grad_np = [0.1, 0.1]
grad = tf.Variable(grad_np)
opt = sm3.SM3Optimizer(
learning_rate=self._learning_rate, momentum=self._momentum)
step = opt.apply_gradients([(grad, var)])
sess.run(tf.global_variables_initializer())
# Check that variable and momentum are as expected before starting
# training.
var_np = sess.run(var)
gbar_np = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose([0.5, 0.3], var_np)
self.assertAllClose([0.0, 0.0], gbar_np)
accumulator = numpy.zeros_like(gbar_np)
for _ in range(2):
# Run a step of training.
step.run()
# Expected preconditioned gradient, momentum, and parameter.
accumulator += numpy.square(grad_np)
exp_p_grad = grad_np / numpy.sqrt(accumulator)
exp_gbar_np = (
self._momentum * gbar_np + (1 - self._momentum) * exp_p_grad)
exp_var = var_np - self._learning_rate * exp_gbar_np
# Check that variable and momentum are as expected after one step of
# training.
var_np = sess.run(var)
gbar_np = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(exp_var, var_np)
self.assertAllClose(exp_gbar_np, gbar_np)
def testDenseLayerMatrix(self):
"""Test a single dense matrix layer."""
with self.cached_session() as sess:
var = tf.Variable([[0.5, 0.5], [0.5, 0.5]])
grad_np = [[0.1, 0.05], [0.03, 0.02]]
grad = tf.Variable(grad_np)
opt = sm3.SM3Optimizer(
learning_rate=self._learning_rate, momentum=self._momentum)
step = opt.apply_gradients([(grad, var)])
sess.run(tf.global_variables_initializer())
# Check that variable and momentum are as expected before starting
# training.
var_np = sess.run(var)
gbar_np = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(var_np, [[0.5, 0.5], [0.5, 0.5]])
self.assertAllClose([[0.0, 0.0], [0.0, 0.0]], gbar_np)
row_accumulator = numpy.zeros([2, 1])
col_accumulator = numpy.zeros([1, 2])
accumulator = numpy.zeros_like(gbar_np)
for _ in range(2):
# Run a step of training.
step.run()
accumulator = numpy.minimum(row_accumulator, col_accumulator)
# Expected preconditioned gradient, momentum, and parameter.
accumulator += numpy.square(grad_np)
# Update SM3 accumulators.
row_accumulator = numpy.amax(accumulator, axis=1, keepdims=True)
col_accumulator = numpy.amax(accumulator, axis=0, keepdims=True)
exp_p_grad = grad_np / numpy.sqrt(accumulator)
exp_gbar_np = (
self._momentum * gbar_np + (1 - self._momentum) * exp_p_grad)
exp_var = var_np - self._learning_rate * exp_gbar_np
# Check that variable and momentum are as expected after one step of
# training.
var_np = sess.run(var)
gbar_np = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(exp_var, var_np)
self.assertAllClose(exp_gbar_np, gbar_np)
def testZeroGradientNoOpAtFirstStep(self):
"""Test that checks that epsilon handling is unncessary."""
with self.cached_session() as sess:
var = tf.Variable(0.5)
grad = tf.Variable(0.0)
opt = sm3.SM3Optimizer(
learning_rate=self._learning_rate, momentum=self._momentum)
step = opt.apply_gradients([(grad, var)])
sess.run(tf.global_variables_initializer())
# Check that variable and momentum are as expected before starting
# training.
var_np = sess.run(var)
gbar_np = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(0.5, var_np)
self.assertAllClose(0.0, gbar_np)
# Run one step of training.
step.run()
var_np = sess.run(var)
gbar_np = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(0.5, var_np)
self.assertAllClose(0.0, gbar_np)
def testSparseUpdates(self):
"""Test that checks sparse updates."""
with self.cached_session() as sess:
var = tf.Variable([[0.5, 0.05], [0.05, 1.0], [0.15, 3.0], [0.35, 2.0]])
# A sparse gradient that updates index 1, and 3.
grad_np = [[0.1, 0.05], [0.01, 1.5]]
indices_np = [1, 3]
shape = [2, 2]
grad = tf.IndexedSlices(
tf.constant(grad_np, shape=shape),
tf.constant(indices_np), # indices
tf.constant(shape)) # shape
opt = sm3.SM3Optimizer(
learning_rate=self._learning_rate, momentum=self._momentum)
step = opt.apply_gradients([(grad, var)])
sess.run(tf.global_variables_initializer())
# Check that variable and momentum are as expected before starting
# training.
var_np = sess.run(var)
self.assertAllClose([[0.5, 0.05], [0.05, 1.0], [0.15, 3.0], [0.35, 2.0]],
var_np)
# Run one step of training.
step.run()
accumulator = numpy.zeros_like(var_np)
accumulator[indices_np, :] += numpy.square(grad_np)
row_accumulator = numpy.amax(accumulator, axis=1, keepdims=True)
# Update SM3 accumulators.
exp_p_grad = grad_np / numpy.sqrt(accumulator[indices_np, :])
exp_var_np = var_np
exp_var_np[indices_np, :] = var_np[
indices_np, :] - self._learning_rate * exp_p_grad
var_np = sess.run(var)
self.assertAllClose(exp_var_np, var_np)
row_accumulator_var = numpy.reshape(
sess.run(opt.get_slot(var, 'accumulator_0')), [4, 1])
self.assertAllClose(row_accumulator_var, row_accumulator)
if __name__ == '__main__':
tf.test.main()
|
|
""" Module to perform a trapezoid model fit to flux time seres data
Author: Christopher J Burke
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
import sys
def phaseData(t, per, to):
"""Phase the data at period per and centered at to
INPUT:
t - time of data
per - period to phase time data period and t should
be in same units
to - epoch of phase zero
OUTPUT:
phi - data phased running from -0.5<phi<=0.5
"""
phi = np.mod(t - to, per) / per
phi = np.where(phi > 0.5, phi - 1.0, phi)
return phi
class trp_parameters:
"""Storage class for the parameters of the trapezoid fit algorithms
CONTENTS:
samplen - [Int] subsampling of LC model data
***MUST BE ODD*** No checking this
likehoodmoddisplay - [Int] If debugLevel > =3 display likelihood call
model and residual every iteration mod of
this parameter
cadlen - [Days] Cadence duration
fitregion - [float] Factor of duration around midpoint to actually
fit to data.
"""
def __init__(self):
self.samplen = 15
self.likehoodmoddisplay = 10
self.cadlen = 29.424/60.0/24.0 #Kepler cadence
self.fitregion = 4.0
self.debugLevel = 4
def __str__(self):
for k in self.__dict__:
print k, self.__dict__[k]
return ''
class trp_originalestimates:
"""Storage class for the original parameter estimations
CONTENTS:
period [Days] - Initial orbital period
***By default this is fixed during fitting***
epoch [Days] - Initial epoch
duration [Hours] - Initial duration fitted **In Hours**
depth [ppm] - Initial depth
"""
def __init__(self):
self.period = 1.0
self.epoch = 0.1
self.duration = 3.0
self.depth = 100.0
def __str__(self):
for k in self.__dict__:
print k, self.__dict__[k]
return ''
class trp_planetestimates:
"""Storage class for estimating a planet model based
upon the trapezoid fit solution. See Carter et al. 2008
CONTENTS:
u1 - quadratic limb darkening parameters to use
u2 - ''
period [Days] - Resulting period currently not fit
radiusRatio - from purely geometric depth=(Rp/Rstar)^2
impactParameter - Impact parameter
tauzero - [Days] - transit timescale constant
semiaxisRatio - Semi-major axis to stellar radius ratio
surfaceBright - Limb darkened surface brightness at crossing
impact parameter
equivRadiusRatio - Crude approximation to radius ratio
taking into account limb darkening
that works better than the purely geometric
radius ratio
minDepth [ppm] - minimum depth from model
avgDepth [ppm] - mean depth across transit
epoch - epoch of fit midpoint
bigT [day] - trapezoid model full duration
littleT [day] - trapezoid model ingress/egress duration
depth [ppm] - trapezoid model depth parameter
"""
def __init__(self):
self.u1 = 0.40 # limb darkening for Sun in Kepler passband
self.u2 = 0.27
self.period = 1.0
self.radiusRatio = 0.0
self.impactParameter = 0.5
self.tauzero = 0.1
self.semiaxisRatio = 20.0
self.surfaceBright = 0.5
self.equivRadiusRatio = 0.0
self.minDepth = 0.0
self.epoch = 0.0
self.bigT = 0.0
self.littleT = 0.0
self.depth = 0.0
def __str__(self):
for k in self.__dict__:
print k, self.__dict__[k]
return ''
class trp_ioblk:
"""Define a class that contains all the data needed to perform
trapezod fits. Numerous functions will use as input this
class. This is purely a storage class
See trp_setup to illustrate how to iinitialize these
storage classes
CONTENTS:
parm - [class] Storage class trp_parameters for algorithm parameters
origests - [class] Storage class trp_originalestimates for initial
parameter estimates
"""
def __init__(self):
self.parm = trp_parameters()
self.origests = trp_originalestimates()
self.planetests = trp_planetestimates()
self.physval_names = ['']
self.fixed = np.array([0])
self.nparm = 0
self.physval_mins = np.array([0.0])
self.physval_maxs = np.array([0.0])
self.physvals = np.array([0.0])
self.physvalsavs = np.array([0.0])
self.bestphysvals = np.array([0.0])
self.boundedvals = np.array([0.0])
self.boundedvalsavs = np.array([0.0])
self.bestboundedvals = np.array([0.0])
self.model = np.array([0.0])
self.errscl = 1.0
self.chi2min = 0.0
self.minimized = False
self.sampleit = np.array([0.0])
self.fitdata = np.array(0, dtype=np.bool)
self.normlc = np.array([0.0])
self.normes = np.array([0.0])
self.normts = np.array([0.0])
self.normots = np.array([0.0])
self.timezpt = 0.0
def __str__(self):
for k in self.__dict__:
print k, self.__dict__[k]
return ''
def boundedvals(ioblk):
"""Convert parameters to bounded versions that the minimzer will use
INPUT:
ioblk - [class] trp_ioblk class
OUTPUT:
ioblk - [class]
err - [0 ok ; 1 not ok]
"""
err = 0 # Error flag
maxmindelta = ioblk.physval_maxs - ioblk.physval_mins
datamindelta = ioblk.physvals - ioblk.physval_mins
ioblk.boundedvals = -np.log( maxmindelta / datamindelta - 1.0)
if ~np.isfinite(ioblk.boundedvals).all():
print "Bounded Vals Bad"
print ioblk.boundedvals
print ioblk.physvals
err = 1
return ioblk, err
def unboundedvals(ioblk):
"""Convert bounded parameter values that the minimizer uses to physvals
INPUT:
ioblk - [class] trp_ioblk class
OUTPUT:
ioblk - [class]
err - [0 ok ; 1 not ok]
"""
err = 0 # Error flag
maxmindelta = ioblk.physval_maxs - ioblk.physval_mins
ioblk.physvals = ioblk.physval_mins + \
(maxmindelta / (1.0 + np.exp( -ioblk.boundedvals )))
#if np.sum( np.isfinite(ioblk.physvals) ) != np.size(ioblk.boundedvals) :
if ~np.isfinite(ioblk.physvals).all():
print "UnBounded Vals Bad"
print ioblk.boundedvals
print ioblk.physvals
err = 1
return ioblk, err
def trapezoid(t, depth, bigT, littleT):
"""Trapezoid shape for model
INPUT:
t - [float] vector of independent values to evaluate
trapezoid model
depth - [float] depth of trapezoid
bigT - [float] full trapezoid duration
littleT - [float] 'ingress/egress' duration
OUTPUT:
output - [float] vector of trapezoid model values
"""
output = np.full_like(t, 1.0)
t = np.abs(t)
output = np.where(t <= bigT/2.0 - littleT/2.0, 1.0 - depth, output)
output = np.where(np.logical_and(t > bigT/2.0 - littleT/2.0, \
t < bigT/2.0 + littleT/2.0), \
1.0 - depth + ((depth/littleT)* \
(t-bigT/2.0 + littleT/2.0)), output)
return output
def trapezoid_model_onemodel(ts, period, epoch, depth, bigT, littleT, subsamplen):
"""Make a trapezoid model at the given input parameters. This routine
generates the ioblk class which is used in the transit model.
You can save time if you want to generate many models by
calling this function once to generate the ioblk and then call
trapezoid_model_raw() to generate the models at other inputs
bypassing some of the setup routines in this function.
INPUT:
ts - Mid cadence time stamps
period - Period of signal ***assumed fixed during model generation**
epoch - Estimated epoch of signal. Must be on same system
as ts
depth [ppm] - Model depth
bigT [hr] -full transit duration in hours
littleT [hr] - ingress time in hours
subsample - Subsample each cadence by this factor
OUTPUT:
ioblk - structure class containing model ligh curve
located at ioblk.modellc
"""
# Instantiate trp_ioblk class and fill in values
ioblk = trp_ioblk()
ioblk.parm.debugLevel = 0
ioblk.parm.samplen = subsamplen
ioblk.normots = ts
ioblk.origests.period = period
ioblk.origests.epoch = epoch
ioblk.origests.depth = depth
ioblk.origests.duration = bigT
# Calculate this from timeSeries
ioblk.parm.cadlen = np.median(np.diff(ts))
ioblk = trp_setup(ioblk)
# update the tratio
ioblk.physvals[3] = littleT / bigT
ioblk, err = boundedvals(ioblk)
ioblk.physvalsavs = ioblk.physvals
ioblk.boundedvalsavs = ioblk.boundedvals
ioblk, err = trapezoid_model(ioblk)
return ioblk
def trapezoid_model_raw(ioblk, epoch, depth, bigT, littleT):
"""If you have a preexisting ioblk from fit or trapezoid_model_onemodel()
You can just call this function to get another model
at a different epoch depth duration and ingress time
****period is not variable at this point call
trapezoid_model_onemodel() instead
INPUT:
ioblk - pre-existing ioblk from fitting or trapezoid_model_onemodel()
epoch - Estimated epoch of signal. Must be on same system
as ts
depth [ppm] - Model depth
bigT [hr] -full transit duration in hours
littleT [hr] - ingress time in hour
OUTPUT:
ioblk - structure class containing model ligh curve
located at ioblk.modellc
"""
ioblk.physvals[0] = epoch - ioblk.origests.epoch
ioblk.physvals[1] = depth / 1.0e6
ioblk.physvals[2] = bigT / 24.0
ioblk.physvals[3] = littleT / bigT
ioblk, err = boundedvals(ioblk)
ioblk, err = trapezoid_model(ioblk)
return ioblk
def trapezoid_model(ioblk):
"""Generate a subsampled model at the current parameters
INPUT:
ioblk - [class] trp_ioblk class structure
OUTPUT:
ioblk - [class] modified ioblk
err - [0 ok; 1 not ok] Error flag
"""
err = 0
to = ioblk.physvals[0]
depth = ioblk.physvals[1]
bigT = ioblk.physvals[2]
tRatio = ioblk.physvals[3]
littleT = tRatio * bigT
per = ioblk.origests.period
ts = ioblk.normts
phi = phaseData(ts, per, to)
lc = np.ones_like(ioblk.normts)
cadlen = ioblk.parm.cadlen
samplen = ioblk.parm.samplen
# Call trapezoid model for data points without any subsampling needed
idx = np.where(np.logical_and(ioblk.fitdata, ioblk.sampleit == 1))[0]
if idx.size > 0:
ztmp = phi[idx] * per
lctmp = trapezoid(ztmp, depth, bigT, littleT)
lc[idx] = lctmp
# Call trapezoid model for data points that need subsampling
idx = np.where(np.logical_and(ioblk.fitdata, ioblk.sampleit > 1))[0]
if idx.size > 0:
ztmp = phi[idx] * per
deltaXSmall = cadlen / np.float(samplen)
smallBlock = np.linspace(-cadlen/2.0 + deltaXSmall/2.0,
cadlen/2.0 - deltaXSmall/2.0, samplen)
oN = ztmp.size
ztmp_highres = np.tile(ztmp, samplen)
ztmp_highres = np.reshape(ztmp_highres, (samplen, oN))
smallBlock_highres = np.tile(smallBlock, oN)
smallBlock_highres = np.reshape(smallBlock_highres, (oN, samplen))
smallBlock_highres = np.transpose(smallBlock_highres)
ztmp_highres = ztmp_highres + smallBlock_highres
ztmp_highres = ztmp_highres.ravel(order='F')
lctmp_highres = trapezoid(ztmp_highres, depth, bigT, littleT)
nN = ztmp_highres.size
lctmp = lctmp_highres.reshape([oN, nN/oN]).mean(1)
lc[idx] = lctmp
ioblk.modellc = lc
if np.sum(np.isfinite(lc)) != lc.size:
err = 1
return ioblk, err
def trp_setup(ioblk):
"""Setup various data products before minimizing
INPUT:
ioblk - [class] trp_ioblk class structure
OUTPUT:
ioblk - [class] modified ioblk
"""
per = ioblk.origests.period
eph = ioblk.origests.epoch
dur = ioblk.origests.duration
depth = ioblk.origests.depth / 1.0e6
durday = dur / 24.0
phidur = dur / 24.0 / per
# Normalize the time series
ts = ioblk.normots
medianEvent = np.median(np.round((ts - eph)/per))
ioblk.timezpt = eph + (medianEvent * per)
ioblk.normts = ioblk.normots - ioblk.timezpt
# identify in transit data to over sample and fitting region
phi = phaseData(ioblk.normts, per, 0.0)
ioblk.sampleit = np.where(abs(phi) < (phidur * 1.5), ioblk.parm.samplen, 1)
ioblk.fitdata = np.where(abs(phi) < (phidur * ioblk.parm.fitregion),\
True, False)
# always fit less than a 0.25 of phase space for stability
# and efficiency reasons
ioblk.fitdata = np.where(abs(phi) > 0.25, False, ioblk.fitdata)
# Set parameters and bounds
ioblk.physval_names = ['To', 'Depth', 'BigT', 'TRatio']
ioblk.physval_mins = np.array([-durday*1.5, 1.0e-6, 0.0, 1.0e-10])
ioblk.physval_maxs = np.array([ durday*1.5, depth*5.0, durday*3.0, 1.0])
ioblk.fixed = np.array([0, 0, 0, 0])
ioblk.physvals = np.array([0.0, depth, durday, 0.2])
ioblk.nparm = np.size(ioblk.fixed)
# Validate trapezoid fit inputs look reasonable
trp_validate(ioblk)
ioblk.modellc = np.full_like(ioblk.normlc, 1.0)
ioblk.chi2min = ioblk.normlc.size * 2000.0
ioblk.likecount = 0
ioblk, err = boundedvals(ioblk)
# physvalsavs and boundedvalsavs are used to store parameters
# that are fixed during the calculation
# ***They must be populated with fixed values before moving forward
ioblk.physvalsavs = ioblk.physvals
ioblk.boundedvalsavs = ioblk.boundedvals
ioblk.bestphysvals = ioblk.physvals
ioblk.bestboundedvals = ioblk.boundedvals
ioblk.minimized = False
return ioblk
def trp_validate(ioblk):
# Check that physvals are within limits
if (np.any(np.greater_equal(ioblk.physvals, ioblk.physval_maxs))):
print 'physvals: {} is greater than physval_maxs: {}'.format( \
ioblk.physvals,ioblk.physval_maxs)
raise ValueError("TrapFit: physvals has value greater than physval_maxs")
if (np.any(np.less_equal(ioblk.physvals, ioblk.physval_mins))):
print 'physvals: {} is less than physval_mins: {}'.format( \
ioblk.physvals,ioblk.physval_mins)
raise ValueError("TrapFit: physvals has value less than physval_mins")
# Check for NaNs in input data series
if (np.any(np.isnan(ioblk.normlc))):
raise ValueError("TrapFit: Input light curve contains NaN")
if (np.any(np.isnan(ioblk.normes))):
raise ValueError("TrapFit: Input uncertainty estimate contains NaN")
if (np.any(np.isnan(ioblk.normots))):
raise ValueError("TrapFit: Input time data contains NaN")
# Check for input data series that has negative flux data should be
# normalized to 1.0
if (np.any(np.less(ioblk.normlc,0.0))):
raise ValueError("TrapFit: Negative Flux in light curve")
def trp_likehood(pars,ioblk):
"""Return a residual time series of data minus model
trp_setup(ioblk) should be called before this function is called
INPUT:
pars - [numpy array] vector of parameter values
ioblk - [class] trp_ioblk class structure
OUTPUT:
residuals - sum of squares of residuals of data - model
ioblk - [class] modified ioblk
"""
ioblk.likecount += 1
# Update parameters into bounded values
idx = np.where(ioblk.fixed == 0)[0]
ioblk.boundedvals[idx] = pars
ioblk.boundedvals = np.where(ioblk.fixed == 1, ioblk.boundedvalsavs,
ioblk.boundedvals)
# Convert to unbounded values
ioblk, err = unboundedvals(ioblk)
# Generate Model
ioblk, err = trapezoid_model(ioblk)
# Calculate residuals
idx = np.where(ioblk.fitdata)[0]
residuals = (ioblk.normlc[idx] - ioblk.modellc[idx])/(ioblk.normes[idx] * ioblk.errscl)
# Return scalar summed residuals
residuals = np.sum(residuals**2)
# Do plotting
if ioblk.parm.debugLevel > 2:
if ioblk.likecount == 1: # Setup figures for first time
ioblk.fighandle = plt.figure(figsize=(3,2),dpi=300,
facecolor='white')
ioblk.axhandle = plt.gca()
ioblk.axhandle.set_position([0.125, 0.125, 0.825, 0.825])
ioblk.axhandle.set_axis_bgcolor('white')
if np.mod(ioblk.likecount, ioblk.parm.likehoodmoddisplay) == 0 \
or ioblk.likecount == 1:
plt.figure(ioblk.fighandle.number)
plt.cla()
period = ioblk.origests.period
tzero = ioblk.physvals[0]
ts = ioblk.normts
phi = phaseData(ts, period, tzero)
plt.plot(phi,ioblk.normlc,'.',markersize=0.6)
plt.plot(phi,ioblk.modellc,'.r',markersize=0.6)
plt.pause(0.0001) # This line forces a draw it seems
# getting matplotlib to plot in a non blocking
# manner has a storied history on the web
# this method may fail in later versions
if ioblk.parm.debugLevel > 3:
raw_input("Press [ENTER]")
return residuals
def trp_iterate_solution(ioblk, nIter):
"""Peform multiple iterations starting from random initial conditions
return the best solution in a chi2 sense among the nIter iterations
"""
bestChi2s = np.zeros(nIter)
bestParameters = np.zeros((ioblk.physvals.size, nIter))
gdFits = np.zeros(nIter, dtype=np.bool)
depth = ioblk.origests.depth / 1.0e6
for i in range(nIter):
ioblk.physvals = ioblk.physval_mins + \
np.random.rand(ioblk.physvals.size) * \
(ioblk.physval_maxs - ioblk.physval_mins)
# Force depth parameter to start at minimum half the depth
if ioblk.physvals[1] < np.abs(depth/2.0):
ioblk.physvals[1] = depth / 2.0
# Replace random starts with parameters values that are fixed
ioblk.physvals = np.where(ioblk.fixed == 1, ioblk.physvalsavs, \
ioblk.physvals)
ioblk, err = boundedvals(ioblk)
freeidx = np.where(ioblk.fixed == 0)[0]
startParameters = ioblk.boundedvals[freeidx]
#usemethod = 'Nelder-Mead'
usemethod = 'Powell'
useoptions = {'xtol': 1e-5, 'ftol': 1e-5, 'maxiter': 2000, 'maxfev': 2000}
#usemethod = 'CG'
#useoptions = {'gtol': 1e-5, 'maxiter': 2000}
allOutput = opt.minimize(trp_likehood, startParameters, args=(ioblk,), \
method=usemethod, options=useoptions)
ioblk.boundedvals[freeidx] = allOutput['x']
ioblk.boundedvals = np.where(ioblk.fixed == 1, ioblk.boundedvalsavs, \
ioblk.boundedvals)
ioblk, err = unboundedvals(ioblk)
chi2min = allOutput['fun']
if ioblk.parm.debugLevel > 0:
strout = "%s %d %s %f" % ("It: ",i," Chi2: ",chi2min)
print strout
print ioblk.physvals
if np.isfinite(ioblk.physvals).all():
gdFits[i] = True
bestChi2s[i] = chi2min
bestParameters[:,i] = ioblk.physvals
# Done with iterations find the best one by chi2min
bestMaskedIdx = np.argmin(bestChi2s[gdFits])
ioblk.chi2min = bestChi2s[gdFits][bestMaskedIdx]
ioblk.bestphysvals = bestParameters[:,gdFits][:,bestMaskedIdx]
ioblk.physvals = ioblk.bestphysvals
ioblk, err = boundedvals(ioblk)
ioblk.bestboundedvals = ioblk.boundedvals
if ioblk.parm.debugLevel > 0:
strout = "%s %f" % ("Overall Best Chi2 Min: ",ioblk.chi2min)
print strout
print ioblk.physvals
ioblk.minimized = True
return ioblk
def trp_estimate_planet(ioblk):
"""Convert the trapezoid fit solution into a crude estimate
of a planet model that is close to trapezoid solution
This fills out values in trp_planetestimates class
"""
if not ioblk.minimized:
strout = "Warning getting planet estimates for non converged \
trapezoid fit. Do not trust results"
print strout
ioblk.planetests.period = ioblk.origests.period
ioblk.planetests.epoch = ioblk.timezpt + ioblk.bestphysvals[0]
ioblk.planetests.bigT = ioblk.bestphysvals[2]
ioblk.planetests.littleT = ioblk.bestphysvals[3] * \
ioblk.planetests.bigT
ioblk.planetests.depth = ioblk.bestphysvals[1]
# call likehood to get best transit model
idx = np.where(ioblk.fixed == 0)[0]
resids = trp_likehood(ioblk.bestboundedvals[idx], ioblk)
trapmodlc = ioblk.modellc
ioblk.planetests.minDepth = (1.0 - trapmodlc.min()) * 1.0e6
ioblk.planetests.radiusRatio = np.sqrt(ioblk.planetests.minDepth / 1.0e6)
ioblk.planetests.impactParameter = np.sqrt(1.0 - \
np.amin([ioblk.planetests.radiusRatio * \
ioblk.planetests.bigT/ioblk.planetests.littleT, 1.0]))
ioblk.planetests.tauzero = np.sqrt(ioblk.planetests.bigT * \
ioblk.planetests.littleT / 4.0 / \
ioblk.planetests.radiusRatio)
ioblk.planetests.semiaxisRatio = ioblk.planetests.period / 2.0 / \
np.pi / ioblk.planetests.tauzero
mu = np.sqrt(1.0 - ioblk.planetests.impactParameter**2)
ioblk.planetests.surfaceBright = 1.0 - ioblk.planetests.u1*(1.0-mu) - \
ioblk.planetests.u2*(1.0-mu)**2
ioblk.planetests.equivRadiusRatio = ioblk.planetests.radiusRatio / \
np.sqrt(ioblk.planetests.surfaceBright)
return ioblk
def trapezoid_fit(timeSeries, dataSeries, errorSeries, \
signalPeriod, signalEpoch, signalDuration, signalDepth, \
fitTrialN=13, fitRegion=4.0, errorScale=1.0, debugLevel=0,
sampleN=15, showFitInterval=30):
"""Perform a trapezoid fit to a normalized flux time series
Assumes all data has the same cadence duration
Period is fixed during the trapezoid fit
AUTHOR: Christopher J Burke
INPUT:
timeSeries - Mid cadence time stamps
dataSeries - Normalized time series
errorSeries - Error time series
signalPeriod - Period of signal ***assumed fixed during model fit**
signalEpoch - Estimated epoch of signal. Must be on same system
as timeSeries
signalDuration [hr] - Estimated signal duration ***In hours**
signalDepth [ppm] - Estimated signal depth
fitTrialN - How many trial fits to perform starting at random
initial locations. Increase this if you find the
minimization is returning local minima
fitRegion - Fit data within fitRegion*signalDuration of signalEpoch
errorScale - Default 1.0 - Scale the errorbars by this factor
debugLevel - 0 Show nothing; 1-Show some text about iterations
2 Show some more text; 3 - Show graphical fit in
progress; 4 - pause for each graphical fit
sampleN - Subsample each cadence by this factor
showFitInterval - If debugLevel >=3 the show every showFitInterval
function evaluation
OUTPUT:
ioblk - An instance of trp_ioblk which is a class used to store
all information pertaining to fit results
"""
# Instantiate trp_ioblk class and fill in values
ioblk = trp_ioblk()
ioblk.parm.debugLevel = debugLevel
ioblk.parm.samplen = sampleN
ioblk.parm.likehoodmoddisplay = showFitInterval
ioblk.fitregion = fitRegion
ioblk.normlc = dataSeries
ioblk.normes = errorSeries
ioblk.errscl = errorScale
ioblk.normots = timeSeries
ioblk.origests.period = signalPeriod
ioblk.origests.epoch = signalEpoch
ioblk.origests.duration = signalDuration # input duration is hours
ioblk.origests.depth = signalDepth
# Calculate this from timeSeries
ioblk.parm.cadlen = np.median(np.diff(timeSeries))
# setup some more variables
ioblk = trp_setup(ioblk)
# Find solution by trying random initial conditions
ioblk = trp_iterate_solution(ioblk,fitTrialN)
# Convert the trapezoid fit solution into a pseudo planet model parameters
ioblk = trp_estimate_planet(ioblk)
# Raise an exception if final model is consistent with flat
if (np.all(np.abs(ioblk.modellc - ioblk.modellc[0]) \
< (10.0 * sys.float_info.epsilon))):
raise ValueError("TrapFit: Model light curve is flat!")
# Check for NaNs in output model
if (np.any(np.isnan(ioblk.modellc))):
raise ValueError("TrapFit: Output Model light curve contains NaN")
return ioblk
# Run the test of a trapezoid model fit in gaussian noise
if __name__ == "__main__":
# Make some fake data
dataSpan = 80.0 # in Days
exposureLength = 1.0/48.0 # in Days simulating 48 cadences per day
nData = dataSpan / exposureLength
noiseLevel = 40.0 # noise per observation in ppm
signalDepth = 300.0 # signal depth in ppm
signalDuration = 5.0 / 24.0 # in Days
signalDurationHours = signalDuration * 24.0
signalPeriod = 10.4203 # in Days
signalEpoch = 5.1 # in Days
timeSeries = np.linspace(0.0, dataSpan, nData);
dataSeries = 1.0 + np.random.randn(nData) / 1.0e6 * noiseLevel
errorSeries = np.full_like(dataSeries,noiseLevel/1.0e6)
# Instantiate trp_ioblk class and fill in values
ioblk = trp_ioblk()
ioblk.parm.samplen = 15
ioblk.parm.cadlen = exposureLength
ioblk.fitregion = 4.0
ioblk.normlc = dataSeries
ioblk.normes = errorSeries
ioblk.normots = timeSeries
ioblk.origests.period = signalPeriod
ioblk.origests.epoch = signalEpoch
ioblk.origests.duration = signalDurationHours # input duration is hours
ioblk.origests.depth = signalDepth
# setup some more variables
ioblk = trp_setup(ioblk)
ioblk.physvals = np.array([0.0, signalDepth/1.0e6, signalDuration, 0.1])
# Make a model trapezoid light curve
ioblk, err = trapezoid_model(ioblk)
#Phase data
phasedSeries = phaseData(timeSeries, signalPeriod, signalEpoch)
# Insert signal
phaseDuration = signalDuration / signalPeriod
dataSeries = dataSeries * ioblk.modellc
#plt.plot(phasedSeries, dataSeries, '.')
#plt.show()
#plt.plot(timeSeries, dataSeries, '.')
#plt.show()
# Test fitting
ioblk = trapezoid_fit(timeSeries, dataSeries, errorSeries, \
signalPeriod, signalEpoch+0.001, signalDurationHours*0.9, \
signalDepth*1.1, \
fitTrialN=2, fitRegion=4.0, errorScale=1.0, debugLevel=3,
sampleN=15, showFitInterval=30)
print ioblk
# test generating model
newioblk = trapezoid_model_onemodel(timeSeries, signalPeriod, \
signalEpoch, signalDepth, signalDurationHours, \
signalDurationHours*0.1, ioblk.parm.samplen)
plt.close('all')
plt.plot(phasedSeries, newioblk.modellc,'.b')
newioblk = trapezoid_model_raw(newioblk, signalEpoch+0.05, signalDepth*1.5, \
signalDurationHours*2.0, signalDurationHours*2.0*0.2)
plt.plot(phasedSeries, newioblk.modellc, '.r')
plt.show()
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import argparse
import os
import sys
import subprocess
import json
import tarfile
import contextlib
import re
from pwd import getpwuid
from os import listdir
from os.path import join, isfile, isdir, dirname
import datetime as dt
from cp3_llbb.SAMADhi.SAMADhi import Dataset, Sample, File, DbStore
def get_options():
"""
Parse and return the arguments provided by the user.
"""
username = getpwuid(os.stat(os.getcwd()).st_uid).pw_name
parser = argparse.ArgumentParser(description='Provide a list of things to be deleted in /storage/data/cms/store/user/')
parser.add_argument('--crabUsername', action='store', dest='crabUsername', default=username, type=str,
help='crab / storage username')
parser.add_argument('--ingridUsername', action='store', dest='ingridUsername', default=username, type=str,
help='ingrid username')
options = parser.parse_args()
return options
def get_dataset(inputDataset = None, inputID = None):
dbstore = DbStore()
if inputDataset is not None:
resultset = dbstore.find(Dataset, Dataset.name == inputDataset)
elif inputID is not None:
resultset = dbstore.find(Dataset, Dataset.dataset_id == inputID)
return list(resultset.values(Dataset.name, Dataset.dataset_id, Dataset.nevents, Dataset.process))
def main(crabUsername, ingridUsername):
dbstore = DbStore()
print "##### Get the list of potential DB samples of interest"
list_allDBsamples = []
results = dbstore.find(Sample)
for r in results:
if r.author is None:
continue
for f in r.files:
if crabUsername in f.lfn:
p = '/storage/data/cms' + re.sub('/output.*root', '', f.lfn)
if p not in list_allDBsamples:
list_allDBsamples.append(p)
if crabUsername in r.path or ingridUsername in r.author:
if r.path == '':
continue
if r.path not in list_allDBsamples:
list_allDBsamples.append(r.path)
# print r.path
print ""
storageDir = join('/storage/data/cms/store/user/', crabUsername)
print "##### Get the list of user paths in %s" % storageDir
list_allUserDirs = {}
currentTime = dt.datetime.now()
tcut = getDateMinusT(currentTime, month = 1)
for d in listdir(storageDir):
if not isdir(join(storageDir, d)):
continue
if 'CRAB_PrivateMC' in d or 'testFiles' in d :
continue
for subd in listdir(join(storageDir, d)):
if not isdir(join(storageDir, d, subd)):
continue
for taskStamp in listdir(join(storageDir, d, subd)):
if not isdir(join(storageDir, d, subd, taskStamp)):
continue
try:
ttask = int(taskStamp.replace('_', ''))
except ValueError:
print("Warning: could not interpret path {}, skipping it...".format(taskStamp))
continue
if ttask >= tcut:
continue
for taskID in listdir(join(storageDir, d, subd, taskStamp)):
if not isdir(join(storageDir, d, subd, taskStamp, taskID)):
continue
myPath = join(storageDir, d, subd, taskStamp, taskID)
if myPath in list_allDBsamples:
continue
# print isFramework(myPath), myPath
try:
mySize = subprocess.check_output(["du", '-s', myPath]).split()[0].decode('utf-8')
except subprocess.CalledProcessError:
print("Error while accessing file in path {}, skipping it!".format(myPath))
continue
list_allUserDirs[ttask] = {'path': myPath, 'size': int(mySize) * 1024, 'is CP3-llbb': isFramework(myPath)}
print '# Tasks older than 6 months'
print '# timestamp= ', getDateMinusT(currentTime, month = 6)
totalSize = 0
finalprint = ''
for t in list_allUserDirs:
if t < getDateMinusT(currentTime, month = 6) and list_allUserDirs[t]['is CP3-llbb']:
totalSize += list_allUserDirs[t]['size']
finalprint += "# size= %s\nrm -r %s\n" % (sizeof_fmt(list_allUserDirs[t]['size']), list_allUserDirs[t]['path'])
print '# totalSize= ', sizeof_fmt(totalSize)
print finalprint
print '# Tasks between 3 and 6 months old'
print '# timestamp= ', getDateMinusT(currentTime, month = 3)
totalSize = 0
finalprint = ''
for t in list_allUserDirs:
if getDateMinusT(currentTime, month = 6) < t < getDateMinusT(currentTime, month = 3) and list_allUserDirs[t]['is CP3-llbb']:
totalSize += list_allUserDirs[t]['size']
finalprint += "# size= %s\nrm -r %s\n" % (sizeof_fmt(list_allUserDirs[t]['size']), list_allUserDirs[t]['path'])
print '# totalSize= ', sizeof_fmt(totalSize)
print finalprint
print '# Tasks between 1 and 3 months old'
print '# timestamp= ', getDateMinusT(currentTime, month = 1)
totalSize = 0
finalprint = ''
for t in list_allUserDirs:
if getDateMinusT(currentTime, month = 3) < t < getDateMinusT(currentTime, month = 1) and list_allUserDirs[t]['is CP3-llbb']:
totalSize += list_allUserDirs[t]['size']
finalprint += "# size= %s\nrm -r %s\n" % (sizeof_fmt(list_allUserDirs[t]['size']), list_allUserDirs[t]['path'])
print '# totalSize= ', sizeof_fmt(totalSize)
print finalprint
print '# The following tasks could not be asserted to be cp3_llbb framework tasks or not... deal with them as you see fit:'
totalSize = 0
finalprint = ''
for t in list_allUserDirs:
if not list_allUserDirs[t]['is CP3-llbb']:
totalSize += list_allUserDirs[t]['size']
finalprint += "# size= %s\tpath= %s\n" % (sizeof_fmt(list_allUserDirs[t]['size']), list_allUserDirs[t]['path'])
print '# totalSize= ', sizeof_fmt(totalSize)
print finalprint
def getDateMinusT(currentTime, year = 0, month = 3, day = 0):
day = day + 365 * year
day = day + 31 * month
t = dt.timedelta(day)
t = currentTime - t
y = t.year - 2000
mo = t.month
d = t.day
h = t.hour
mi = t.minute
s = t.second
t = [y, mo, d, h, mi, s]
t = map(str, t)
t = [x.zfill(2) for x in t]
t = ''.join(t)
return int(t)
def sizeof_fmt(num, suffix='B'):
# Taken from http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
for unit in ['','k','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
def isFramework(path, f = 'log/cmsRun_1.log.tar.gz'):
# Resurrecting some parsing code last seen in runPostCrab
# https://github.com/cp3-llbb/GridIn/commit/2c5b8b07b30206688d87dafb3b0a9dbfb61e71c7
# print path, f
tarLog = join(path, f)
if not isfile(tarLog):
# print "\t", isdir(dirname(tarLog)), dirname(tarLog)
if isdir(dirname(tarLog)):
logs = [x for x in listdir(dirname(tarLog)) if isfile(join(dirname(tarLog),x))]
if len(logs) < 1:
print("Warning: no log files found for this task, no way to tell if it was produced by our framework or not")
return False
return isFramework(dirname(tarLog), f = logs[0])
else:
if 'failed' not in f:
# maybe the log does not exist because all tasks ran and failed ?
return isFramework(path, f = 'failed/log/cmsRun_1.log.tar.gz')
else:
# impossible to assert if this is a FW task
return False
isFW = False
with tarfile.open(tarLog) as tar:
for tarFile in tar.getmembers():
if 'stdout' not in tarFile.name:
continue
# For some reason, even though we are using python 2.7, the with statement here seems broken... Using contextlib to handle the file opening / reading cleanly
with contextlib.closing(tar.extractfile(tarFile)) as file:
for line in file:
if ('cp3_llbb/Framework' in line
or 'HHAnalysis' in line or 'ZAAnalysis' in line or 'TTAnalysis' in line
or 'hh_analyzer' in line or 'za_analyzer' in line or 'tt_analyzer' in line):
isFW = True
break
return isFW
if __name__ == '__main__':
options = get_options()
main(options.crabUsername, options.ingridUsername)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import uuid
import ldap
import ldap.filter
from keystone import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import ldap as common_ldap
from keystone.common import models
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone import identity
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
@dependency.requires('assignment_api')
class Identity(identity.Driver):
def __init__(self, conf=None):
super(Identity, self).__init__()
if conf is None:
conf = CONF
self.user = UserApi(conf)
self.group = GroupApi(conf)
def default_assignment_driver(self):
return "keystone.assignment.backends.ldap.Assignment"
def is_domain_aware(self):
return False
def generates_uuids(self):
return False
# Identity interface
def authenticate(self, user_id, password):
try:
user_ref = self._get_user(user_id)
except exception.UserNotFound:
raise AssertionError(_('Invalid user / password'))
if not user_id or not password:
raise AssertionError(_('Invalid user / password'))
conn = None
try:
conn = self.user.get_connection(user_ref['dn'],
password, end_user_auth=True)
if not conn:
raise AssertionError(_('Invalid user / password'))
except Exception:
raise AssertionError(_('Invalid user / password'))
finally:
if conn:
conn.unbind_s()
return self.user.filter_attributes(user_ref)
def _get_user(self, user_id):
return self.user.get(user_id)
def get_user(self, user_id):
return self.user.get_filtered(user_id)
def list_users(self, hints):
return self.user.get_all_filtered()
def get_user_by_name(self, user_name, domain_id):
# domain_id will already have been handled in the Manager layer,
# parameter left in so this matches the Driver specification
return self.user.filter_attributes(self.user.get_by_name(user_name))
# CRUD
def create_user(self, user_id, user):
self.user.check_allow_create()
user_ref = self.user.create(user)
return self.user.filter_attributes(user_ref)
def update_user(self, user_id, user):
self.user.check_allow_update()
old_obj = self.user.get(user_id)
if 'name' in user and old_obj.get('name') != user['name']:
raise exception.Conflict(_('Cannot change user name'))
if self.user.enabled_mask:
self.user.mask_enabled_attribute(user)
self.user.update(user_id, user, old_obj)
return self.user.get_filtered(user_id)
def delete_user(self, user_id):
self.user.check_allow_delete()
self.assignment_api.delete_user(user_id)
user = self.user.get(user_id)
user_dn = user['dn']
groups = self.group.list_user_groups(user_dn)
for group in groups:
self.group.remove_user(user_dn, group['id'], user_id)
if hasattr(user, 'tenant_id'):
self.project.remove_user(user.tenant_id, user_dn)
self.user.delete(user_id)
def create_group(self, group_id, group):
self.group.check_allow_create()
group['name'] = clean.group_name(group['name'])
return common_ldap.filter_entity(self.group.create(group))
def get_group(self, group_id):
return self.group.get_filtered(group_id)
def update_group(self, group_id, group):
self.group.check_allow_update()
if 'name' in group:
group['name'] = clean.group_name(group['name'])
return common_ldap.filter_entity(self.group.update(group_id, group))
def delete_group(self, group_id):
self.group.check_allow_delete()
return self.group.delete(group_id)
def add_user_to_group(self, user_id, group_id):
user_ref = self._get_user(user_id)
user_dn = user_ref['dn']
self.group.add_user(user_dn, group_id, user_id)
def remove_user_from_group(self, user_id, group_id):
user_ref = self._get_user(user_id)
user_dn = user_ref['dn']
self.group.remove_user(user_dn, group_id, user_id)
def list_groups_for_user(self, user_id, hints):
user_ref = self._get_user(user_id)
user_dn = user_ref['dn']
return self.group.list_user_groups_filtered(user_dn)
def list_groups(self, hints):
return self.group.get_all_filtered()
def list_users_in_group(self, group_id, hints):
users = []
for user_dn in self.group.list_group_users(group_id):
user_id = self.user._dn_to_id(user_dn)
try:
users.append(self.user.get_filtered(user_id))
except exception.UserNotFound:
LOG.debug(("Group member '%(user_dn)s' not found in"
" '%(group_id)s'. The user should be removed"
" from the group. The user will be ignored."),
dict(user_dn=user_dn, group_id=group_id))
return users
def check_user_in_group(self, user_id, group_id):
user_refs = self.list_users_in_group(group_id, driver_hints.Hints())
for x in user_refs:
if x['id'] == user_id:
break
else:
# Try to fetch the user to see if it even exists. This
# will raise a more accurate exception.
self.get_user(user_id)
raise exception.NotFound(_("User '%(user_id)s' not found in"
" group '%(group_id)s'") %
{'user_id': user_id,
'group_id': group_id})
# TODO(termie): turn this into a data object and move logic to driver
class UserApi(common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap):
DEFAULT_OU = 'ou=Users'
DEFAULT_STRUCTURAL_CLASSES = ['person']
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = 'inetOrgPerson'
NotFound = exception.UserNotFound
options_name = 'user'
attribute_options_names = {'password': 'pass',
'email': 'mail',
'name': 'name',
'enabled': 'enabled',
'default_project_id': 'default_project_id'}
immutable_attrs = ['id']
model = models.User
def __init__(self, conf):
super(UserApi, self).__init__(conf)
self.enabled_mask = conf.ldap.user_enabled_mask
self.enabled_default = conf.ldap.user_enabled_default
def _ldap_res_to_model(self, res):
obj = super(UserApi, self)._ldap_res_to_model(res)
if self.enabled_mask != 0:
enabled = int(obj.get('enabled', self.enabled_default))
obj['enabled'] = ((enabled & self.enabled_mask) !=
self.enabled_mask)
obj['dn'] = res[0]
return obj
def mask_enabled_attribute(self, values):
value = values['enabled']
values.setdefault('enabled_nomask', int(self.enabled_default))
if value != ((values['enabled_nomask'] & self.enabled_mask) !=
self.enabled_mask):
values['enabled_nomask'] ^= self.enabled_mask
values['enabled'] = values['enabled_nomask']
del values['enabled_nomask']
def create(self, values):
if self.enabled_mask:
orig_enabled = values['enabled']
self.mask_enabled_attribute(values)
values = super(UserApi, self).create(values)
if self.enabled_mask:
values['enabled'] = orig_enabled
return values
def check_password(self, user_id, password):
user = self.get(user_id)
return utils.check_password(password, user.password)
def get_filtered(self, user_id):
user = self.get(user_id)
return self.filter_attributes(user)
def get_all_filtered(self):
return [self.filter_attributes(user) for user in self.get_all()]
def filter_attributes(self, user):
return identity.filter_user(common_ldap.filter_entity(user))
class GroupApi(common_ldap.BaseLdap):
DEFAULT_OU = 'ou=UserGroups'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'groupOfNames'
DEFAULT_ID_ATTR = 'cn'
DEFAULT_MEMBER_ATTRIBUTE = 'member'
NotFound = exception.GroupNotFound
options_name = 'group'
attribute_options_names = {'description': 'desc',
'name': 'name'}
immutable_attrs = ['name']
model = models.Group
def _ldap_res_to_model(self, res):
model = super(GroupApi, self)._ldap_res_to_model(res)
model['dn'] = res[0]
return model
def __init__(self, conf):
super(GroupApi, self).__init__(conf)
self.member_attribute = (getattr(conf.ldap, 'group_member_attribute')
or self.DEFAULT_MEMBER_ATTRIBUTE)
def create(self, values):
data = values.copy()
if data.get('id') is None:
data['id'] = uuid.uuid4().hex
if 'description' in data and data['description'] in ['', None]:
data.pop('description')
return super(GroupApi, self).create(data)
def delete(self, group_id):
if self.subtree_delete_enabled:
super(GroupApi, self).deleteTree(group_id)
else:
# TODO(spzala): this is only placeholder for group and domain
# role support which will be added under bug 1101287
group_ref = self.get(group_id)
group_dn = group_ref['dn']
if group_dn:
self._delete_tree_nodes(group_dn, ldap.SCOPE_ONELEVEL)
super(GroupApi, self).delete(group_id)
def update(self, group_id, values):
old_obj = self.get(group_id)
return super(GroupApi, self).update(group_id, values, old_obj)
def add_user(self, user_dn, group_id, user_id):
group_ref = self.get(group_id)
group_dn = group_ref['dn']
try:
super(GroupApi, self).add_member(user_dn, group_dn)
except exception.Conflict:
raise exception.Conflict(_(
'User %(user_id)s is already a member of group %(group_id)s') %
{'user_id': user_id, 'group_id': group_id})
def remove_user(self, user_dn, group_id, user_id):
group_ref = self.get(group_id)
group_dn = group_ref['dn']
try:
super(GroupApi, self).remove_member(user_dn, group_dn)
except ldap.NO_SUCH_ATTRIBUTE:
raise exception.UserNotFound(user_id=user_id)
def list_user_groups(self, user_dn):
"""Return a list of groups for which the user is a member."""
user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
query = '(&(objectClass=%s)(%s=%s)%s)' % (self.object_class,
self.member_attribute,
user_dn_esc,
self.ldap_filter or '')
return self.get_all(query)
def list_user_groups_filtered(self, user_dn):
"""Return a filtered list of groups for which the user is a member."""
user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
query = '(&(objectClass=%s)(%s=%s)%s)' % (self.object_class,
self.member_attribute,
user_dn_esc,
self.ldap_filter or '')
return self.get_all_filtered(query)
def list_group_users(self, group_id):
"""Return a list of user dns which are members of a group."""
group_ref = self.get(group_id)
group_dn = group_ref['dn']
try:
attrs = self._ldap_get_list(group_dn, ldap.SCOPE_BASE,
attrlist=(self.member_attribute,))
except ldap.NO_SUCH_OBJECT:
raise self.NotFound(group_id=group_id)
users = []
for dn, member in attrs:
user_dns = member.get(self.member_attribute, [])
for user_dn in user_dns:
if self._is_dumb_member(user_dn):
continue
users.append(user_dn)
return users
def get_filtered(self, group_id):
group = self.get(group_id)
return common_ldap.filter_entity(group)
def get_all_filtered(self, query=None):
return [common_ldap.filter_entity(group)
for group in self.get_all(query)]
|
|
#!/usr/bin/env python
import sys
import os
import itertools
import shutil
import getopt
import re
import time
import const
import pdb
import traceback
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
from datetime import *
import metacomm.combinatorics.all_pairs2
import unittest
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
Manifest_Row = 0
Device_Ip = ""
Device_Ip_List = []
Device_SSH_List = []
Pack_Type = "xpk"
Test_Flag = "positive"
Test_Device_Type = "ssh"
test_start_time = datetime.now().strftime('%m-%d-%H:%M:%S')
reload(sys)
sys.setdefaultencoding("utf-8")
def do_Selfcom(self_combin_file, out_file):
try:
file = open(self_combin_file)
allpairs_in = open(out_file, 'a+')
while True:
line = file.readline()
if not line:
break
allpairs_in.writelines(line + "\n")
file.close()
allpairs_in.close()
return
except Exception as e:
print Exception, ":", e
def fileline_count(fp):
return len(open(fp).readlines())
def del_Seed1(in_file):
try:
caseline = ""
row = 0
file = open(in_file)
items = []
self_file = []
s_name = p_name = ""
if (os.path.isdir("self")):
do_Clear(const.path + "/self")
os.mkdir(const.path + "/self")
while True:
p_name = s_name
line = file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
s_name = items[0].split("-")[0]
if ((p_name != s_name) and (p_name != "")):
fp = open(const.path + "/self/" + s_name + "_input.txt", 'a+')
fp.writelines(line + "\n")
else:
fp = open(const.path + "/self/" + s_name + "_input.txt", 'a+')
fp.writelines(line + "\n")
if (s_name != p_name):
self_file.append(s_name)
fp.close()
file.close()
if (os.path.isfile(const.selfcomb_file)):
os.remove(const.selfcomb_file)
for i in range(0, len(self_file)):
line_count = fileline_count(
const.path +
"/self/" +
self_file[i] +
"_input.txt")
if (line_count >= 2):
lists = [[] for m in range(line_count)]
open_input_file = open(
const.path +
"/self/" +
self_file[i] +
"_input.txt",
'a+')
while True:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
get_item = items[1].split(",")
for g in get_item:
lists[row].append(g)
row = row + 1
input_pair = all_pairs(lists)
open_input_file.close()
output_pair = open(
const.path +
"/self/" +
self_file[i] +
"_output.txt",
'a+')
for e, v in enumerate(input_pair):
for c in range(0, len(v)):
caseline = caseline + v[c]
caseline = caseline + ","
output_pair.writelines(self_file[i] + ":" + caseline[:-1])
output_pair.close()
else:
open_input_file = open(
const.path +
"/self/" +
self_file[i] +
"_input.txt",
'r')
output_pair = open(
const.path +
"/self/" +
self_file[i] +
"_output.txt",
'a+')
while True:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
output_pair.writelines(line)
output_pair.close()
open_input_file .close()
# 1*********input_seed -> selfcomb.txt
# if more self combination, each self generate itself output
# file,finally all self_input generate one selfcomb.txt
do_Selfcom(
const.path +
"/self/" +
self_file[i] +
"_output.txt",
const.selfcomb_file)
row = 0
# 2*********selfcomb -> output file by allpairs
gen_selfcomb_File1(const.selfcomb_file, in_file)
# 3*********output -> manifest.json
gen_Manifest_Json1(const.output_file, in_file)
return "Manifest.json output ------------------------->O.K"
except Exception as e:
print Exception, ":", e
return "Manifest.json output ------------------------->Error"
def gen_Manifest_Json1(output_file, in_file):
try:
global Manifest_Row
global Pack_Type
manifest = "{\n "
file = open(output_file)
if (Test_Flag == "positive"):
testfile = open("test.py", 'w+')
testfile.writelines(
"#!/usr/bin/env python \n# coding=utf-8 \nimport random,os,sys,unittest,run_test,codecs \nreload(sys) \nsys.setdefaultencoding( \"utf-8\" ) \nclass TestCaseUnit(unittest.TestCase): \n ")
name_list = []
get_self = ""
line = file.readline().strip('\n\r')
items = line.split(" ")
counters = len(items)
try:
os.mkdir(const.path + "/tcs")
except:
print "make tcs folder error"
for i in items:
name_list.append(i)
while True:
line = file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(" ")
counters = len(items)
os.mkdir(const.path +
"/tcs/Crosswalk-Manifest-Check" +
str(Manifest_Row +
1))
fp = open(const.path +
"/tcs/Crosswalk-Manifest-Check" +
str(Manifest_Row +
1) +
"/manifest.json", 'w')
for i in range(0, len(items)):
if ((name_list[i]) != "icons" and (name_list[i]) != "xwalk_permissions" and (
name_list[i]) != "xwalk_launch_screen"):
if (items[i].find("000") != -1):
items[i] = items[i].replace("000", " ")
get_self = get_self + "\"" + \
name_list[i] + "\"" + " : " + "\"" + \
items[i].replace("null", "") + "\",\n"
else:
get_self = get_self + "\"" + \
name_list[
i].strip() + "\"" + " : " + "\"" + items[i].replace("null", "") + "\",\n"
else:
items[i] = items[i].replace("comma", ",")
get_self = get_self + "\"" + \
name_list[i] + "\"" + " : " + \
items[i].replace("null", "") + ",\n"
get_self = "{\n" + get_self[:-2] + "\n}"
fp.writelines(get_self)
print "\n-----------------------------------------------------------", items[0]
print get_self
testfile.writelines("\n def test_case_" +
str(Manifest_Row +
1) +
"(self):\n self.assertEqual(\"Pass\", run_test.run_test_result(\"Crosswalk-Manifest-Check" +
str(Manifest_Row +
1) +
"\"," +
"\"" +
items[0].decode("utf-8") +
"\"" +
"))" +
"\n ")
Manifest_Row = Manifest_Row + 1
get_self = ""
testfile.writelines(
"\nif __name__ == '__main__':\n suite1 = unittest.TestLoader().loadTestsFromTestCase(TestCaseUnit)\n suite = unittest.TestSuite([suite1])\n unittest.TextTestRunner(verbosity=2).run(suite) ")
file.close()
testfile.close()
return "<--------------- Generate manifest.json O.K ------------------>"
except Exception as e:
print Exception, "------------------------->:", e
print traceback.format_exc()
return "Generate manifest.json error"
def gen_selfcomb_File1(comb_file, in_file):
try:
# if (os.path.isfile("./allpairs/output.txt") &
# (Test_Flag=="positive")):
do_Clear("./allpairs/output.txt")
# do_Clear("./allpairs/output_negative.txt")
if (Test_Flag == "negative"):
open_output_file = open(const.output_file_ne, 'a+')
else:
open_output_file = open(const.output_file, 'a+')
caseline = ""
get_items = ""
get_case = ""
row = 0
line_count = fileline_count(comb_file)
if (line_count >= 2):
lists = [[] for m in range(line_count)]
open_input_file = open(comb_file)
while True:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
get_items = get_items + items[0].split("-")[0] + "\t"
open_output_file.writelines(get_items.rstrip("\t") + "\n")
open_input_file.close()
open_input_file = open(comb_file)
for i in range(0, len(lists)):
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":") # items[0]=field;#item[1]=value
value = line[len(items[0]) + 1:]
get_item = value.split(",")
for g in get_item:
lists[row].append(g)
row = row + 1
# print lists
input_pair = all_pairs(lists)
for e, v in enumerate(input_pair):
for c in range(0, len(v)):
get_case = get_case + v[c] + "\t"
open_output_file.writelines(get_case.rstrip("\t") + "\n")
get_case = ""
open_output_file.close()
return "Generate selfcombination file ------------------------->O.K"
except:
print traceback.format_exc()
def app_Folder(path_tcs):
try:
for file in os.listdir(path_tcs):
copy_Files(const.path_resource, os.getcwd() + "/tcs/" + file)
return "Webapp folder copy ------------------------->O.K", path_tcs
except Exception as e:
print Exception, ":", e
return "Webapp folder copy ------------------------->error", path_tcs
def copy_Files(sourceDir, targetDir):
try:
copyFileCounts = 0
for f in os.listdir(sourceDir):
sourceF = os.path.join(sourceDir, f)
targetF = os.path.join(targetDir, f)
if os.path.isfile(sourceF):
# create folder
if not os.path.exists(targetDir):
os.makedirs(targetDir)
copyFileCounts = copyFileCounts + 1
# if not exist to copy
if not os.path.exists(targetF):
# copy file
open(targetF, "wb").write(open(sourceF, "rb").read())
# else:
# print ("file exist do not copy")
if os.path.isdir(sourceF):
copy_Files(sourceF, targetF)
return "Copy File O.k", sourceDir, "------------------------->", targetDir
except Exception as e:
print Exception, ":", e
return "Copy File error", sourceDir, "------------------------->", targetDir
def do_Clear(sourceDir):
try:
if (os.path.exists(sourceDir)):
if (os.path.isdir(sourceDir)):
shutil.rmtree(sourceDir)
else:
os.remove(sourceDir)
except IOError as e:
print Exception, "Clear :" + sourceDir + " ------------------------->error", e
def Usage():
print "<-------------------------test.py usage:------------------------->"
print "-h,--help: print help message"
print "-n, --negative seed test"
print "-o, --order: input allpairs order default 2"
print "-p, --pack: pack xpk or wgt default wgt"
print "--foo: Test option "
def main(argv):
try:
global Pack_Type
global Test_Flag
global Test_Device_Type
do_Clear(const.path_tcs)
do_Clear(const.path + "/self")
do_Clear(const.report_path + "/manifest_all_positive.txt")
do_Clear(const.report_path + "/manifest_all_negative.txt")
os.system("rm -f " + const.seed_negative + "/*~")
os.system("rm -f " + const.seed_positive + "/*~")
opts, args = getopt.getopt(
argv[
1:], 'h:o:p:n', [
'help', 'order=', 'pack='])
if (len(opts) == 0):
print "Auto generate manifest.json------------------------->", opts
# input_seed -> selfcomb.txt->manifest.json
del_Seed1(const.seed_file)
for o, a in opts:
if o in ('-h', '--help'):
Usage()
sys.exit(1)
elif o in ('-n'):
print ("**************negative**********")
Test_Flag = "negative"
if (Test_Flag == "negative"):
del_Seed(const.seed_file_na)
else:
del_Seed(const.seed_file)
elif o in ('-o', '--order'):
allpairs_order_get = a
print "Auto generate manifest.json------------------------->"
# create all manifest.json->positive.txt and nagative.txt
print "------------------>"
# del_Seed1(const.seed_file)
#import testfile
#suite1 = unittest.TestLoader().loadTestsFromTestCase(testfile.TestCaseUnit)
# manifest folder -> webapp
# app_Folder(const.path_tcs)
do_Clear(const.path + "/self")
elif o in ('--foo', ):
sys.exit(0)
elif o in ('-p', '--pack'):
print "Auto generate manifest.json------------------------->", opts
# input_seed -> selfcomb.txt->manifest.json
Pack_Type = a
print "Pack_Type------------------------->", Pack_Type
sys.exit(0)
else:
print "***unhandled option***"
sys.exit(3)
except Exception as e:
print Exception, ":", e
print traceback.format_exc()
Usage()
sys.exit(2)
finally:
do_Clear(const.path + "/opt")
do_Clear(const.path + "/self")
os.system("rm -rf *.zip")
os.system("rm -rf *.pem")
if __name__ == "__main__":
main(sys.argv)
|
|
from django.test import TestCase
from django.test.client import Client
try:
import json
except:
from django.utils import simplejson as json
from MacroExpansion import MacroExpansion
from KeyValueTree import KeyValueTree
from truth.models import Truth, KeyValue as TruthKeyValue
class TestMacroExpansion(TestCase):
fixtures = ['testdata.json']
def test_import(self):
try:
from MacroExpansion import MacroExpansion
except:
raise(BaseException('Unable to import Macro Expansion'))
try:
from KeyValueTree import KeyValueTree
except:
raise(BaseException('Unable to import KeyValueTree'))
def test_key_value_not_found(self):
m = MacroExpansion('host:fake-hostname2:ip_address')
self.assertEqual(m.output(),'10.99.32.1')
def test_key_value_found(self):
m = MacroExpansion('host:fake-hostname2:ip_address')
self.assertEqual(m.output(),'10.99.32.1')
#TODO Add checks for setting every property of a sytem through the api
class SystemApi(TestCase):
fixtures = ['testdata.json']
new_hostname = 'new_hostname999'
new_host_id = 3
def setup(self):
self.client = Client()
def test_get_system_not_found_by_id(self):
resp = self.client.get('/api/v2/system/-1/', follow=True)
self.assertEqual(404, resp.status_code)
def test_get_system_by_id(self):
resp = self.client.get('/api/v2/system/1/', follow=True)
self.assertEqual(200, resp.status_code)
def test_get_system_by_hostname(self):
resp = self.client.get('/api/v2/system/asfdasfasfasdfasfasdfsadf/', follow=True)
self.assertEqual(404, resp.status_code)
resp = self.client.get('/api/v2/system/fake-hostname2/', follow=True)
self.assertEqual(200, resp.status_code)
def test_key_value_tree(self):
tree = KeyValueTree('fake-hostname2').final
self.assertEqual(tree['nic.0.ipv4_address.0'],'10.99.32.1')
def test_key_value_api(self):
resp = self.client.get('/api/v2/keyvalue/?keystore=fake-hostname2', follow=True)
self.assertEqual(json.loads(resp.content)['truth:test:cluster_name'], 'Test Cluster Name')
self.assertEqual(json.loads(resp.content)['host:fake-hostname1:nic.0.ipv4_address.0'], '10.99.32.3')
resp = self.client.put('/en-US/api/v2/keyvalue/5/', {'key':'nic.0.ipv4_address.0', 'value':'14.14.14.14', 'system_id':'1'})
resp = self.client.get('/api/v2/keyvalue/?keystore=fake-hostname2', follow=True)
self.assertEqual(json.loads(resp.content)['host:fake-hostname1:nic.0.ipv4_address.0'], '10.99.32.3')
resp = self.client.get('/api/v2/keyvalue/?key=cluster_owner', follow=True)
self.assertEqual(json.loads(resp.content)['truth:test:cluster_owner'], 'The Cluster Owner')
resp = self.client.get('/api/v2/keyvalue/?value=10.99.32.3', follow=True)
self.assertEqual(json.loads(resp.content)['host:fake-hostname1:nic.0.ipv4_address.0'], '10.99.32.3')
def test_search_by_asset_tag(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'asset_tag':'65432'}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content)[0]['asset_tag'], '65432')
self.assertEqual(json.loads(resp.content)[0]['hostname'], 'fake-hostname2')
def test_search_by_serial(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'serial':'39993'}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content)[0]['serial'], '39993')
self.assertEqual(json.loads(resp.content)[0]['hostname'], 'fake-hostname1')
def test_search_by_serial_and_asset_tag_not_found(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'serial':'39993', 'asset_tag':'99999asdf'}, follow=True)
self.assertEqual(resp.status_code, 404)
def test_search_by_system_rack(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'system_rack_id':'1'}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content)[0]['hostname'], 'fake-hostname1')
def test_search_by_system_rack_and_rack_order(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'system_rack_id':'1', 'rack_order':'1.00'}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content)[0]['hostname'], 'fake-hostname1')
def test_search_by_system_rack_and_rack_order_not_found(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'system_rack_id':'1', 'rack_order':'2.00'}, follow=True)
self.assertEqual(resp.status_code, 404)
def test_search_by_system_rack_and_serial(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'system_rack_id':'1', 'serial':'39993'}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content)[0]['hostname'], 'fake-hostname1')
def test_search_by_system_switch_ports(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'switch_ports':'101.02'}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content)[0]['hostname'], 'fake-hostname1')
def test_search_by_system_switch_ports_not_found(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'switch_ports':'shouldnteverhavethisasaswitchport101.02'}, follow=True)
self.assertEqual(resp.status_code, 404)
def test_search_by_system_rack_and_serial_not_found(self):
resp = self.client.get('/api/v2/system/3/', {'search':True, 'system_rack_id':'1', 'serial':'39993asdf'}, follow=True)
self.assertEqual(resp.status_code, 404)
class DHCPApi(TestCase):
fixtures = ['testdata.json']
def setup(self):
self.client = Client()
def test_get_single_scope(self):
resp = self.client.get('/api/v2/keyvalue/?key_type=dhcp_scopes', follow=True)
scope_list = json.loads(resp.content)
self.assertEqual(scope_list[0]['dhcp.is_scope'], 'True')
self.assertEqual(scope_list[0]['dhcp.scope.start'], '10.0.1.0')
self.assertEqual(scope_list[0]['dhcp.scope.end'], '10.0.1.255')
self.assertEqual(scope_list[0]['dhcp.scope.name'], 'phx-vlan73')
def test_get_second_scope(self):
resp = self.client.get('/api/v2/keyvalue/?key_type=dhcp_scopes', follow=True)
scope_list = json.loads(resp.content)
"""self.assertEqual(scope_list[1]['dhcp.is_scope'], 'True')
self.assertEqual(scope_list[1]['dhcp.scope.start'], '10.0.0.0')
self.assertEqual(scope_list[1]['dhcp.scope.end'], '10.0.0.255')
self.assertEqual(scope_list[1]['dhcp.scope.name'], 'phx-vlan81')"""
def test_get_multiple_scopes(self):
resp = self.client.get('/api/v2/keyvalue/?key_type=dhcp_scopes', follow=True)
scope_list = json.loads(resp.content)
"""self.assertEqual(scope_list[0]['dhcp.is_scope'], 'True')
self.assertEqual(scope_list[0]['dhcp.scope.start'], '10.0.1.0')
self.assertEqual(scope_list[0]['dhcp.scope.end'], '10.0.1.255')
self.assertEqual(scope_list[0]['dhcp.scope.name'], 'phx-vlan73')
self.assertEqual(scope_list[1]['dhcp.is_scope'], 'True')
self.assertEqual(scope_list[1]['dhcp.scope.start'], '10.0.0.0')
self.assertEqual(scope_list[1]['dhcp.scope.end'], '10.0.0.255')
self.assertEqual(scope_list[1]['dhcp.scope.name'], 'phx-vlan81')"""
def test_get_system_by_scope(self):
resp = self.client.get('/api/v2/keyvalue/?key_type=system_by_scope&scope=phx-vlan73', follow=True)
system_list = json.loads(resp.content)
self.assertEqual(system_list[0]['nic.0.mac_address.0'],'00:00:00:00:00:AA')
self.assertEqual(system_list[0]['nic.0.ipv4_address.0'],'10.99.32.1')
self.assertEqual(system_list[0]['nic.1.mac_address.0'],'00:00:00:00:00:BB')
self.assertEqual(system_list[0]['nic.1.ipv4_address.0'],'10.99.32.2')
def test_get_adapters_by_system(self):
resp = self.client.get('/api/v2/keyvalue/?key_type=adapters_by_system&system=fake-hostname2', follow=True)
system_list = json.loads(resp.content)
#print system_list
def test_delete_network_adapter(self):
resp = self.client.delete('/en-US/api/v2/keyvalue/1/', {'system_hostname':'fake-hostname2', 'adapter_number':'0', 'key_type':'delete_network_adapter'})
#print "The content is %s" % resp.content
class TestReverseDNSApi(TestCase):
fixtures = ['testdata.json']
def setup(self):
self.client = Client()
def test_get_single_reverse_zone_names_with_descriptions(self):
resp = self.client.get('/api/v2/reverse_dns/1/get_reverse_dns_zones_with_names/', follow=True)
self.assertEqual(resp.status_code, 200)
scope_list = json.loads(resp.content)
self.assertEqual(len(scope_list), 2)
self.assertEqual(scope_list[0]['name'], 'phx1-32.8.10.in-addr.arpa')
self.assertEqual(scope_list[0]['description'], '10.99.32.0 reverse dns zone')
self.assertEqual(scope_list[1]['name'], 'phx1-33.8.10.in-addr.arpa')
self.assertEqual(scope_list[1]['description'], '10.99.33.0 reverse dns zone')
def test_get_system_by_reverse_dns_zone(self):
resp = self.client.get('/api/v2/keyvalue/?key_type=system_by_reverse_dns_zone&zone=phx1-32.8.10.in-addr.arpa', follow=True)
self.assertEqual(resp.status_code, 200)
system_list = json.loads(resp.content)
self.assertEqual(len(system_list), 2)
self.assertEqual(system_list[0]['nic.0.ipv4_address.0'],'10.99.32.1')
self.assertEqual(system_list[0]['hostname'],'fake-hostname2')
self.assertEqual(system_list[0]['nic.1.ipv4_address.0'],'10.99.32.2')
self.assertEqual(system_list[1]['nic.0.ipv4_address.0'],'10.99.32.3')
self.assertEqual(system_list[1]['hostname'],'fake-hostname1')
class KeyValueApi(TestCase):
fixtures = ['testdata.json']
def setup(self):
self.client = Client()
def test_get_adapters_by_system(self):
resp = self.client.get('/api/v2/keyvalue/3/', {'key_type':'adapters_by_system','system':'fake-hostname2'}, follow=True)
#print resp.content
def test_keyvalue_set_invalid_ip(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'1.1.1asdfasdf.1','key':'nic.0.ipv4_address.0'})
self.assertEqual(resp.status_code, 401)
def test_keyvalue_set_valid_ip(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'1', 'value':'10.99.32.1','key':'nic.0.ipv4_address.0'})
self.assertEqual(resp.status_code, 200)
def test_keyvalue_set_invalid_mac_address(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'asdfsadfsadf','key':'nic.0.mac_address.0'})
self.assertEqual(resp.status_code, 401)
def test_keyvalue_set_valid_mac_address(self):
resp = self.client.put('/en-US/api/v2/keyvalue/2/', {'system_id':'1', 'value':'00:00:00:00:00:00','key':'nic.0.mac_address.0'})
self.assertEqual(resp.status_code, 200)
def test_keyvalue_set_invalid_is_dhcp_scope(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'true','key':'is_dhcp_scope'})
self.assertEqual(resp.status_code, 401)
"""def test_keyvalue_set_valid_is_dhcp_scope(self):
resp = self.client.put('/en-US/api/v2/keyvalue/1/', {'system_id':'1', 'value':'True','key':'is_dhcp_scope'})
self.assertEqual(resp.status_code, 200)"""
def test_keyvalue_set_invalid_dhcp_scope_start(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'1.1.1asdfasdf.1','key':'dhcp.scope.start'})
self.assertEqual(resp.status_code, 401)
"""def test_keyvalue_set_valid_dhcp_scope_start(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'1', 'value':'10.99.32.1','key':'dhcp.scope.start'})
self.assertEqual(resp.status_code, 200)"""
def test_keyvalue_set_invalid_dhcp_scope_end(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'1.1.1asdfasdf.1','key':'dhcp.scope.end'})
self.assertEqual(resp.status_code, 401)
"""def test_keyvalue_set_valid_dhcp_scope_end(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'10.99.32.1','key':'dhcp.scope.end'})
self.assertEqual(resp.status_code, 200)"""
def test_keyvalue_set_invalid_dhcp_pool_start(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'1.1.1asdfasdf.1','key':'dhcp.pool.start'})
self.assertEqual(resp.status_code, 401)
"""def test_keyvalue_set_valid_dhcp_pool_start(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'10.99.32.1','key':'dhcp.pool.start'})
self.assertEqual(resp.status_code, 200)"""
def test_keyvalue_set_invalid_dhcp_pool_end(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'1.1.1asdfasdf.1','key':'dhcp.pool.end'})
self.assertEqual(resp.status_code, 401)
"""def test_keyvalue_set_valid_dhcp_pool_end(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'10.99.32.1','key':'dhcp.pool.end'})
self.assertEqual(resp.status_code, 200)"""
def test_keyvalue_set_invalid_dhcp_scope_netmask(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'1.1.1asdfasdf.1','key':'dhcp.scope.start'})
self.assertEqual(resp.status_code, 401)
"""def test_keyvalue_set_valid_dhcp_scope_netmask(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'1', 'value':'10.99.32.1','key':'dhcp.scope.start'})
self.assertEqual(resp.status_code, 200)"""
def test_keyvalue_set_invalid_dhcp_ntp_server(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'1.1.1asdfasdf.1','key':'dhcp.option.ntp_server.0'})
self.assertEqual(resp.status_code, 401)
"""def test_keyvalue_set_valid_dhcp_ntp_server(self):
resp = self.client.put('/en-US/api/v2/keyvalue/3/', {'system_id':'2', 'value':'10.99.32.1','key':'dhcp.option.ntp_server.0'})
self.assertEqual(resp.status_code, 200)"""
|
|
#!/usr/bin/env python3
import sys
import re
import heapq
def extract(s):
return [int(x) for x in re.findall(r'(-?\d+).?', s)]
def vadd(v1, v2):
return tuple(x + y for x, y in zip(v1, v2))
UP, RIGHT, DOWN, LEFT = VDIRS = (0, -1), (1, 0), (0, 1), (-1, 0),
def draw(painted):
minx = min(x for x, y in painted)
miny = min(y for x, y in painted)
maxx = max(x for x, y in painted)
maxy = max(y for x, y in painted)
l = ""
for y in (list((range(miny, maxy+1)))):
for x in range(minx, maxx+1):
l += painted.get((x,y), " ")
l += "\n"
print(l)
def draw2(package, state):
orig_map = package[-1]
d, tomove = state
m = orig_map.copy()
for kind, poses in d:
for pos in poses:
m[pos] = kind
draw(m)
print(d)
print("TO MOVE:", tomove)
def dijkstra(m, edges, start, heuristic=None, target=None):
cost = {start: 0}
path = {}
todo = [(0, 0, start)]
explored = 0
while todo and todo[0][-1] != target:
_, k, cur = heapq.heappop(todo)
if k != cost[cur]:
continue
explored += 1
if explored % 10000 == 0:
print("explored", explored)
draw2(m, cur)
print(cost[cur])
print()
# print("AT")
# draw2(m, cur)
# print()
nbrs = list(edges(m, cur))
for nbr, weight in nbrs:
ncost = cost[cur] + weight
if nbr not in cost or ncost < cost[nbr]:
# print("EXPLORING")
# print(draw2(m, nbr))
# print()
cost[nbr] = ncost
path[nbr] = cur
hcost = ncost if not heuristic else ncost + heuristic(nbr)
heapq.heappush(todo, (hcost, ncost, nbr))
# print("-----------------\n")
print("TOTAL EXPLORED", explored)
return cost, path
##############################
costs = {'A': 1, 'B': 10, 'C': 100, 'D': 1000}
target_cols = {'A': 3, 'B': 5, 'C': 7, 'D': 9}
target_col_set = set(target_cols.values())
def fromkey(d):
d, x = d
return dict(d), x
def tokey(d, x):
return tuple(sorted((k, frozenset(v)) for k, v in d.items())), x
def heuristic(state):
c = 0
for kind, poses in state[0]:
for pos in poses:
c += abs(target_cols[kind] - pos[0]) * costs[kind]
if pos[1] == 1:
c += costs[kind]
# XXX
return c
def upd(t, i, x):
t = list(t)
t[i] = x
return tuple(t)
def edges(z, state):
HALLWAY = 1
hallway, rooms, above, targets, _ = z
all = hallway|rooms
locs, tomove = state
allposes = {x for _, s in locs for x in s}
if tomove:
(tm_k, tm_pos, tm_stopped) = tomove
# stop the dude
if not tm_stopped:
if tm_pos[0] not in target_col_set or tm_pos in targets[tm_k]:
yield (state[0], ()), 0
if tm_pos in targets[tm_k]:
others = {x for k, p in locs if k != tm_k for x in p} # perf??
tm_occupado = (others & targets[tm_k])
if not tm_occupado:
yield (locs, ()), 0
else:
for kind, poses in locs:
for pos in poses:
occupado = (
{x for k, p in locs if k != kind for x in p} & targets[kind])
if (pos not in targets[kind]) or occupado:
if pos in hallway:
if occupado:
continue
tgt = target_cols[kind]
no = False
for i in range(min(pos[0], tgt), max(pos[0], tgt)+1):
if i != pos[0] and (i, 1) in allposes:
no = True
break
if no:
continue
yield (state[0], (kind, pos, pos in hallway)), 0
if not tomove:
return
for dir in VDIRS:
nbr = vadd(dir, tm_pos)
if nbr not in all:
continue
if nbr in allposes:
continue
if nbr[1] > tm_pos[1]:
if nbr[0] != target_cols[tm_k]:
continue
others = {x for k, p in locs if k != tm_k for x in p} # perf??
tm_occupado = (others & targets[tm_k])
if tm_occupado:
continue
idx = "ABCD".index(tm_k)
npos = locs[idx][1] - {tm_pos} | {nbr}
nlocs = upd(locs, idx, (tm_k, npos))
yield (nlocs, (tm_k, nbr, tm_stopped)), costs[tm_k]
EXTRA = '''\
#D#C#B#A#
#D#B#A#C#'''.split('\n')
def main(args):
PART2 = True
if args[1] == '-1':
PART2 = False
args.pop(1)
data = [s for s in open(args[1])]
if PART2:
data[3:3] = EXTRA
m = {(x, y): v for y, l in enumerate(data) for x, v in enumerate(l) if v != "\n"}
blank_map = {k: v if not v.isalpha() else " " for k, v in m.items()}
hallway = {(x, 1) for x in range(1, 12)}
noobs = {
noob: {k for k, v in m.items() if v == noob}
for noob in 'ABCD'
}
targets = {
k: {(target_cols[k], i) for i in range(2,3+2*PART2+1)}
for k in 'ABCD'
}
rooms = {v for s in targets.values() for v in s}
above = hallway & {vadd(UP, x) for x in rooms}
package = hallway, rooms, above, targets, blank_map
target = tokey(targets, ())
start = tokey(noobs, ())
cost, _ = dijkstra(package, edges, start, heuristic=heuristic, target=target)
print(cost[target])
if __name__ == '__main__':
main(sys.argv)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_utils import timeutils
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.aws.autoscaling import scaling_policy as aws_sp
from heat.engine import scheduler
from heat.tests.autoscaling import inline_templates
from heat.tests import common
from heat.tests import utils
as_template = inline_templates.as_template
as_params = inline_templates.as_params
class TestAutoScalingPolicy(common.HeatTestCase):
def setUp(self):
super(TestAutoScalingPolicy, self).setUp()
def create_scaling_policy(self, t, stack, resource_name):
rsrc = stack[resource_name]
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_validate_scaling_policy_ok(self):
t = template_format.parse(inline_templates.as_template)
t['Resources']['WebServerScaleUpPolicy']['Properties'][
'ScalingAdjustment'] = 33
t['Resources']['WebServerScaleUpPolicy']['Properties'][
'AdjustmentType'] = 'PercentChangeInCapacity'
t['Resources']['WebServerScaleUpPolicy']['Properties'][
'MinAdjustmentStep'] = 2
stack = utils.parse_stack(t, params=as_params)
self.policy = stack['WebServerScaleUpPolicy']
self.assertIsNone(self.policy.validate())
def test_validate_scaling_policy_error(self):
t = template_format.parse(inline_templates.as_template)
t['Resources']['WebServerScaleUpPolicy']['Properties'][
'ScalingAdjustment'] = 1
t['Resources']['WebServerScaleUpPolicy']['Properties'][
'AdjustmentType'] = 'ChangeInCapacity'
t['Resources']['WebServerScaleUpPolicy']['Properties'][
'MinAdjustmentStep'] = 2
stack = utils.parse_stack(t, params=as_params)
self.policy = stack['WebServerScaleUpPolicy']
ex = self.assertRaises(exception.ResourcePropertyValueDependency,
self.policy.validate)
self.assertIn('MinAdjustmentStep property should only '
'be specified for AdjustmentType with '
'value PercentChangeInCapacity.', six.text_type(ex))
def test_scaling_policy_bad_group(self):
t = template_format.parse(inline_templates.as_template_bad_group)
stack = utils.parse_stack(t, params=as_params)
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
ex = self.assertRaises(exception.ResourceFailure, up_policy.signal)
self.assertIn('Alarm WebServerScaleUpPolicy could '
'not find scaling group', six.text_type(ex))
def test_scaling_policy_adjust_no_action(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
group = stack['WebServerGroup']
self.patchobject(group, 'adjust',
side_effect=exception.NoActionRequired())
mock_fin_scaling = self.patchobject(up_policy, '_finished_scaling')
with mock.patch.object(up_policy, '_is_scaling_allowed',
return_value=True) as mock_isa:
self.assertRaises(exception.NoActionRequired,
up_policy.handle_signal)
mock_isa.assert_called_once_with()
mock_fin_scaling.assert_called_once_with('ChangeInCapacity : 1',
size_changed=False)
def test_scaling_policy_adjust_size_changed(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
group = stack['WebServerGroup']
self.patchobject(group, 'adjust')
mock_fin_scaling = self.patchobject(up_policy, '_finished_scaling')
with mock.patch.object(up_policy, '_is_scaling_allowed',
return_value=True) as mock_isa:
self.assertIsNone(up_policy.handle_signal())
mock_isa.assert_called_once_with()
mock_fin_scaling.assert_called_once_with('ChangeInCapacity : 1',
size_changed=True)
def test_scaling_policy_not_alarm_state(self):
"""If the details don't have 'alarm' then don't progress."""
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
test = {'current': 'not_an_alarm'}
with mock.patch.object(pol, '_is_scaling_allowed',
side_effect=AssertionError()) as dont_call:
self.assertRaises(exception.NoActionRequired,
pol.handle_signal, details=test)
self.assertEqual([], dont_call.call_args_list)
def test_scaling_policy_cooldown_toosoon(self):
"""If _is_scaling_allowed() returns False don't progress."""
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
group = stack['WebServerGroup']
test = {'current': 'alarm'}
with mock.patch.object(group, 'adjust',
side_effect=AssertionError) as dont_call:
with mock.patch.object(pol, '_is_scaling_allowed',
return_value=False) as mock_isa:
self.assertRaises(exception.NoActionRequired,
pol.handle_signal, details=test)
mock_isa.assert_called_once_with()
self.assertEqual([], dont_call.call_args_list)
def test_scaling_policy_cooldown_ok(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
test = {'current': 'alarm'}
group = self.patchobject(pol.stack, 'resource_by_refid').return_value
group.name = 'fluffy'
with mock.patch.object(pol, '_is_scaling_allowed',
return_value=True) as mock_isa:
pol.handle_signal(details=test)
mock_isa.assert_called_once_with()
group.adjust.assert_called_once_with(1, 'ChangeInCapacity', None)
@mock.patch.object(aws_sp.AWSScalingPolicy, '_get_ec2_signed_url')
def test_scaling_policy_refid_signed_url(self, mock_get_ec2_url):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
rsrc = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
mock_get_ec2_url.return_value = 'http://signed_url'
self.assertEqual('http://signed_url', rsrc.FnGetRefId())
def test_scaling_policy_refid_rsrc_name(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
rsrc = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
rsrc.resource_id = None
self.assertEqual('WebServerScaleUpPolicy', rsrc.FnGetRefId())
def test_refid_convergence_cache_data(self):
t = template_format.parse(as_template)
cache_data = {'WebServerScaleUpPolicy': {
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': 'http://convg_signed_url'
}}
stack = utils.parse_stack(t, cache_data=cache_data)
rsrc = stack['WebServerScaleUpPolicy']
self.assertEqual('http://convg_signed_url', rsrc.FnGetRefId())
class TestCooldownMixin(common.HeatTestCase):
def setUp(self):
super(TestCooldownMixin, self).setUp()
def create_scaling_policy(self, t, stack, resource_name):
rsrc = stack[resource_name]
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_cooldown_is_in_progress_toosoon(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
now = timeutils.utcnow()
previous_meta = {'cooldown': {
now.isoformat(): 'ChangeInCapacity : 1'}}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertFalse(pol._is_scaling_allowed())
def test_cooldown_is_in_progress_scaling_unfinished(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
previous_meta = {'scaling_in_progress': True}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertFalse(pol._is_scaling_allowed())
def test_cooldown_not_in_progress(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
previous_meta = {
'cooldown': {
awhile_ago.isoformat(): 'ChangeInCapacity : 1'
},
'scaling_in_progress': False
}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertTrue(pol._is_scaling_allowed())
def test_scaling_policy_cooldown_zero(self):
t = template_format.parse(as_template)
# Create the scaling policy (with Cooldown=0) and scale up one
properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
properties['Cooldown'] = '0'
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
now = timeutils.utcnow()
previous_meta = {now.isoformat(): 'ChangeInCapacity : 1'}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertTrue(pol._is_scaling_allowed())
def test_scaling_policy_cooldown_none(self):
t = template_format.parse(as_template)
# Create the scaling policy no Cooldown property, should behave the
# same as when Cooldown==0
properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
del properties['Cooldown']
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
now = timeutils.utcnow()
previous_meta = {now.isoformat(): 'ChangeInCapacity : 1'}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertTrue(pol._is_scaling_allowed())
def test_metadata_is_written(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
nowish = timeutils.utcnow()
reason = 'cool as'
meta_set = self.patchobject(pol, 'metadata_set')
self.patchobject(timeutils, 'utcnow', return_value=nowish)
pol._finished_scaling(reason)
meta_set.assert_called_once_with(
{'cooldown': {nowish.isoformat(): reason},
'scaling_in_progress': False})
class ScalingPolicyAttrTest(common.HeatTestCase):
def setUp(self):
super(ScalingPolicyAttrTest, self).setUp()
t = template_format.parse(as_template)
self.stack = utils.parse_stack(t, params=as_params)
self.policy = self.stack['WebServerScaleUpPolicy']
self.assertIsNone(self.policy.validate())
scheduler.TaskRunner(self.policy.create)()
self.assertEqual((self.policy.CREATE, self.policy.COMPLETE),
self.policy.state)
def test_alarm_attribute(self):
self.assertIn("WebServerScaleUpPolicy",
self.policy.FnGetAtt('AlarmUrl'))
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.firestore_admin_v1.proto import (
field_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2,
)
from google.cloud.firestore_admin_v1.proto import (
firestore_admin_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2,
)
from google.cloud.firestore_admin_v1.proto import (
index_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class FirestoreAdminStub(object):
"""Operations are created by service `FirestoreAdmin`, but are accessed via
service `google.longrunning.Operations`.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateIndex = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/CreateIndex",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.CreateIndexRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListIndexes = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/ListIndexes",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListIndexesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListIndexesResponse.FromString,
)
self.GetIndex = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/GetIndex",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.GetIndexRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2.Index.FromString,
)
self.DeleteIndex = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/DeleteIndex",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.DeleteIndexRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetField = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/GetField",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.GetFieldRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2.Field.FromString,
)
self.UpdateField = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/UpdateField",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.UpdateFieldRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListFields = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/ListFields",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListFieldsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListFieldsResponse.FromString,
)
self.ExportDocuments = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/ExportDocuments",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ExportDocumentsRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ImportDocuments = channel.unary_unary(
"/google.firestore.admin.v1.FirestoreAdmin/ImportDocuments",
request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ImportDocumentsRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
class FirestoreAdminServicer(object):
"""Operations are created by service `FirestoreAdmin`, but are accessed via
service `google.longrunning.Operations`.
"""
def CreateIndex(self, request, context):
"""Creates a composite index. This returns a [google.longrunning.Operation][google.longrunning.Operation]
which may be used to track the status of the creation. The metadata for
the operation will be the type [IndexOperationMetadata][google.firestore.admin.v1.IndexOperationMetadata].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListIndexes(self, request, context):
"""Lists composite indexes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIndex(self, request, context):
"""Gets a composite index.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteIndex(self, request, context):
"""Deletes a composite index.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetField(self, request, context):
"""Gets the metadata and configuration for a Field.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateField(self, request, context):
"""Updates a field configuration. Currently, field updates apply only to
single field index configuration. However, calls to
[FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField] should provide a field mask to avoid
changing any configuration that the caller isn't aware of. The field mask
should be specified as: `{ paths: "index_config" }`.
This call returns a [google.longrunning.Operation][google.longrunning.Operation] which may be used to
track the status of the field update. The metadata for
the operation will be the type [FieldOperationMetadata][google.firestore.admin.v1.FieldOperationMetadata].
To configure the default field settings for the database, use
the special `Field` with resource name:
`projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListFields(self, request, context):
"""Lists the field configuration and metadata for this database.
Currently, [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] only supports listing fields
that have been explicitly overridden. To issue this query, call
[FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] with the filter set to
`indexConfig.usesAncestorConfig:false`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExportDocuments(self, request, context):
"""Exports a copy of all or a subset of documents from Google Cloud Firestore
to another storage system, such as Google Cloud Storage. Recent updates to
documents may not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed via the
Operation resource that is created. The output of an export may only be
used once the associated operation is done. If an export operation is
cancelled before completion it may leave partial data behind in Google
Cloud Storage.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ImportDocuments(self, request, context):
"""Imports documents into Google Cloud Firestore. Existing documents with the
same name are overwritten. The import occurs in the background and its
progress can be monitored and managed via the Operation resource that is
created. If an ImportDocuments operation is cancelled, it is possible
that a subset of the data has already been imported to Cloud Firestore.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_FirestoreAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateIndex": grpc.unary_unary_rpc_method_handler(
servicer.CreateIndex,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.CreateIndexRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ListIndexes": grpc.unary_unary_rpc_method_handler(
servicer.ListIndexes,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListIndexesRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListIndexesResponse.SerializeToString,
),
"GetIndex": grpc.unary_unary_rpc_method_handler(
servicer.GetIndex,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.GetIndexRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2.Index.SerializeToString,
),
"DeleteIndex": grpc.unary_unary_rpc_method_handler(
servicer.DeleteIndex,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.DeleteIndexRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetField": grpc.unary_unary_rpc_method_handler(
servicer.GetField,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.GetFieldRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2.Field.SerializeToString,
),
"UpdateField": grpc.unary_unary_rpc_method_handler(
servicer.UpdateField,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.UpdateFieldRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ListFields": grpc.unary_unary_rpc_method_handler(
servicer.ListFields,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListFieldsRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListFieldsResponse.SerializeToString,
),
"ExportDocuments": grpc.unary_unary_rpc_method_handler(
servicer.ExportDocuments,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ExportDocumentsRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ImportDocuments": grpc.unary_unary_rpc_method_handler(
servicer.ImportDocuments,
request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ImportDocumentsRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.firestore.admin.v1.FirestoreAdmin", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
|
import json
import io
MOCK_EMPTY_RESPONSE = {"kind": "tm:asm:policies:host-names:host-namecollectionstate",
"selfLink": "https://localhost/mgmt/tm/asm/policies/0000/host-names",
"totalItems": 0,
"items": []
}
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_f5_get_md5_command(mocker):
from F5_ASM import f5_get_policy_md5_command
mocked_client = mocker.Mock()
mocked_client.get_policy_md5.return_value = util_load_json('test_data/get_md5.json')
result = f5_get_policy_md5_command(mocked_client, 'Test_Policy').outputs
assert result.get('md5') == 'md5-4321'
def test_f5_create_policy_command(mocker):
from F5_ASM import f5_create_policy_command
mocked_client = mocker.Mock()
mocked_client.create_policy.return_value = util_load_json('test_data/create_policy.json')
result = f5_create_policy_command(mocked_client, 'New_Policy', 'parent', 'transparent', False).outputs
assert result.get('name') == 'New_Policy'
assert result.get('id') == '0000'
assert result.get('description') == 'This is a description!'
def test_f5_apply_policy_command(mocker):
from F5_ASM import f5_apply_policy_command
mocked_client = mocker.Mock()
mocked_client.apply_policy.return_value = util_load_json('test_data/apply_policy.json')
result = f5_apply_policy_command(mocked_client, 'https://New_Policy.com').outputs
assert result.get('status') == 'NEW'
assert result.get('id') == '0000'
def test_f5_export_policy_command(mocker):
from F5_ASM import f5_export_policy_command
mocked_client = mocker.Mock()
mocked_client.export_policy.return_value = util_load_json('test_data/export_policy.json')
result = f5_export_policy_command(mocked_client, 'exported_file.xml', 'https://New_Policy.com', False).outputs
assert result.get('status') == 'NEW'
assert result.get('id') == '0000'
def test_f5_delete_policy_command(mocker):
from F5_ASM import f5_delete_policy_command
mocked_client = mocker.Mock()
mocked_client.delete_policy.return_value = util_load_json('test_data/delete_policy.json')
result = f5_delete_policy_command(mocked_client, 'policy_md5').outputs
assert result.get('name') == 'New_Policy'
assert result.get('id') == '0000'
def test_f5_list_policies_command(mocker):
from F5_ASM import f5_list_policies_command
mocked_client = mocker.Mock()
mocked_client.list_policies.return_value = MOCK_EMPTY_RESPONSE
assert f5_list_policies_command(mocked_client).outputs == []
mocked_client.list_policies.return_value = util_load_json('test_data/list_policies.json')
result = f5_list_policies_command(mocked_client).outputs
assert result[0].get('name') == 'Test_Policy'
assert result[0].get('id') == '1234'
assert result[1].get('name') == 'Common_copy_2'
assert result[1].get('id') == '9876'
def test_f5_list_policy_methods_command(mocker):
from F5_ASM import f5_list_policy_methods_command
mocked_client = mocker.Mock()
mocked_client.list_policy_methods.return_value = util_load_json('test_data/list_methods.json')
result = f5_list_policy_methods_command(mocked_client, '0000').outputs
assert result[0].get('name') == 'posty'
def test_f5_add_policy_methods_command(mocker):
from F5_ASM import f5_add_policy_method_command
mocked_client = mocker.Mock()
mocked_client.add_policy_method.return_value = util_load_json('test_data/add_methods.json')
result = f5_add_policy_method_command(mocked_client, '0000', 'posty', 'POST').outputs
assert result.get('name') == 'posty'
assert result.get('id') == 'md5-1234'
assert result.get('actAsMethod') == 'POST'
def test_f5_update_policy_methods_command(mocker):
from F5_ASM import f5_update_policy_method_command
mocked_client = mocker.Mock()
mocked_client.update_policy_method.return_value =\
util_load_json('test_data/update_methods.json')
result = f5_update_policy_method_command(mocked_client, '0000', 'id123', 'posty', 'GET').outputs
assert result.get('name') == 'posty'
assert result.get('id') == 'md5-1234'
assert result.get('actAsMethod') == 'GET'
def test_f5_delete_policy_methods_command(mocker):
from F5_ASM import f5_delete_policy_method_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_method.return_value =\
util_load_json('test_data/delete_methods.json')
result = f5_delete_policy_method_command(mocked_client, '0000', 'id123', 'posty').outputs
assert result.get('name') == 'posty'
assert result.get('id') == 'md5-1234'
assert result.get('actAsMethod') == 'GET'
def test_f5_list_policy_file_types_command(mocker):
from F5_ASM import f5_list_policy_file_types_command
mocked_client = mocker.Mock()
mocked_client.list_policy_file_types.return_value = util_load_json('test_data/list_file_types.json')
result = f5_list_policy_file_types_command(mocked_client, '0000').outputs
assert result[0].get('name') == 'csv'
def test_f5_add_policy_file_types_command(mocker):
from F5_ASM import f5_add_policy_file_type_command
mocked_client = mocker.Mock()
mocked_client.add_policy_file_type.return_value =\
util_load_json('test_data/add_file_type.json')
result = f5_add_policy_file_type_command(mocked_client, '0000', 'cs', 100, True, True,
True, 100, True).outputs
assert result.get('name') == 'cs'
assert result.get('id') == 'md5-1234'
def test_f5_update_policy_file_types_command(mocker):
from F5_ASM import f5_update_policy_file_type_command
mocked_client = mocker.Mock()
mocked_client.update_policy_file_type.return_value = util_load_json('test_data/update_file_type.json')
result = f5_update_policy_file_type_command(mocked_client, '0000', 'id123', 'cs', 100, True, True,
True, 100, True).outputs
assert result.get('name') == 'cs'
assert result.get('id') == 'md5-1234'
def test_f5_delete_policy_file_types_command(mocker):
from F5_ASM import f5_delete_policy_file_type_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_file_type.return_value = util_load_json('test_data/delete_file_type.json')
result = f5_delete_policy_file_type_command(mocked_client, '0000', 'id123', 'cs').outputs
assert result.get('name') == 'cs'
assert result.get('id') == 'md5-1234'
def test_f5_list_policy_cookies_command(mocker):
from F5_ASM import f5_list_policy_cookies_command
mocked_client = mocker.Mock()
mocked_client.list_policy_cookies.return_value = util_load_json('test_data/list_cookies.json')
result = f5_list_policy_cookies_command(mocked_client, '0000').outputs
assert result[0].get('name') == 'yummy'
assert result[0].get('id') == 'cookie-md5'
assert result[1].get('name') == 'cookie'
assert result[1].get('id') == 'cookie-md5-2'
def test_f5_add_policy_cookie_command(mocker):
from F5_ASM import f5_add_policy_cookie_command
mocked_client = mocker.Mock()
mocked_client.add_policy_cookie.return_value = util_load_json('test_data/add_cookie.json')
result = f5_add_policy_cookie_command(mocked_client, '0000', 'new_cookie', True, 'wildcard', 'allow', True).outputs
assert result.get('name') == 'new_cookie'
assert result.get('id') == 'cookie-md5'
def test_f5_update_policy_cookie_command(mocker):
from F5_ASM import f5_update_policy_cookie_command
mocked_client = mocker.Mock()
mocked_client.update_policy_cookie.return_value = util_load_json('test_data/update_cookie.json')
result = f5_update_policy_cookie_command(mocked_client, '0000', 'id123', 'new_cookie', True, 'wildcard',
'allow', True).outputs
assert result.get('name') == 'new_cookie'
assert result.get('id') == 'cookie-md5'
def test_f5_delete_policy_cookie_command(mocker):
from F5_ASM import f5_delete_policy_cookie_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_cookie.return_value = util_load_json('test_data/delete_cookie.json')
result = f5_delete_policy_cookie_command(mocked_client, '0000', 'id123', 'new_cookie').outputs
assert result.get('name') == 'new_cookie'
assert result.get('id') == 'cookie-md5'
def test_f5_list_policy_hostname_command(mocker):
from F5_ASM import f5_list_policy_hostnames_command
mocked_client = mocker.Mock()
mocked_client.list_policy_hostnames.return_value = util_load_json('test_data/list_hostname.json')
result = f5_list_policy_hostnames_command(mocked_client, '0000').outputs
assert result[0].get('name') == 'example.com'
assert result[0].get('id') == 'hostname-md5-1'
assert result[1].get('name') == 'qmasters.co.il'
assert result[1].get('id') == 'hostname-md5-2'
def test_f5_add_policy_hostname_command(mocker):
from F5_ASM import f5_add_policy_hostname_command
mocked_client = mocker.Mock()
mocked_client.add_policy_hostname.return_value = util_load_json('test_data/add_hostname.json')
result = f5_add_policy_hostname_command(mocked_client, '0000', 'example.co.il', True).outputs
assert result.get('name') == 'example.co.il'
assert result.get('id') == 'hostname-md5'
def test_f5_update_policy_hostname_command(mocker):
from F5_ASM import f5_update_policy_hostname_command
mocked_client = mocker.Mock()
mocked_client.update_policy_hostname.return_value = util_load_json('test_data/update_hostname.json')
result = f5_update_policy_hostname_command(mocked_client, '0000', 'id123', 'example.co.il', True).outputs
assert result.get('name') == 'example.co.il'
assert result.get('id') == 'hostname-md5'
def test_f5_delete_policy_hostname_command(mocker):
from F5_ASM import f5_delete_policy_hostname_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_hostname.return_value = util_load_json('test_data/delete_hostname.json')
result = f5_delete_policy_hostname_command(mocked_client, '0000', 'id123', 'example.co.il').outputs
assert result.get('name') == 'example.co.il'
assert result.get('id') == 'hostname-md5'
def test_f5_list_policy_urls_command(mocker):
from F5_ASM import f5_list_policy_urls_command
mocked_client = mocker.Mock()
mocked_client.list_policy_urls.return_value = util_load_json('test_data/list_urls.json')
result = f5_list_policy_urls_command(mocked_client, '0000').outputs
assert result[0].get('name') == '/http_example_1'
assert result[0].get('id') == 'url-md5-1'
assert result[1].get('name') == '/http_example_2'
assert result[1].get('id') == 'url-md5-2'
def test_f5_add_policy_url_command(mocker):
from F5_ASM import f5_add_policy_url_command
mocked_client = mocker.Mock()
mocked_client.add_policy_url.return_value = util_load_json('test_data/add_url.json')
result = f5_add_policy_url_command(mocked_client, '0000', 'new_url', 'http', 'Explicit', True).outputs
assert result.get('name') == '/new_url'
assert result.get('id') == 'url-md5'
def test_f5_update_policy_url_command(mocker):
from F5_ASM import f5_update_policy_url_command
mocked_client = mocker.Mock()
mocked_client.update_policy_url.return_value = util_load_json('test_data/update_url.json')
result = f5_update_policy_url_command(mocked_client, '0000', 'id123', 'new_url', True).outputs
assert result.get('name') == '/new_url'
assert result.get('id') == 'url-md5'
def test_f5_delete_policy_url_command(mocker):
from F5_ASM import f5_delete_policy_url_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_url.return_value = util_load_json('test_data/delete_url.json')
result = f5_delete_policy_url_command(mocked_client, '0000', 'id123', 'new_url').outputs
assert result.get('name') == '/new_url'
assert result.get('id') == 'url-md5'
def test_f5_list_policy_gwt_profiles_command(mocker):
from F5_ASM import f5_list_policy_gwt_profiles_command
mocked_client = mocker.Mock()
mocked_client.list_policy_gwt_profiles.return_value = MOCK_EMPTY_RESPONSE
result = f5_list_policy_gwt_profiles_command(mocked_client, 'unimportant').to_context()
result = result.get('HumanReadable')
assert 'No results' in result
# adding fields to BASIC_FIELDS after previus test emptied this list.
LIST_FIELDS = ['name', 'id', 'selfLink', 'lastUpdateMicros', 'type', 'protocol', 'method'] # noqa: F841
mocked_client.list_policy_gwt_profiles.return_value = util_load_json('test_data/list_GWT.json')
result = f5_list_policy_gwt_profiles_command(mocked_client, 'unimportant').outputs
assert result[0].get('name') == 'test-GWT'
assert result[0].get('id') == 'GWT-md5'
def test_f5_add_policy_gwt_profile_command(mocker):
from F5_ASM import f5_add_policy_gwt_profile_command
mocked_client = mocker.Mock()
mocked_client.add_policy_gwt_profile.return_value = util_load_json('test_data/CUD_GWT.json')
result = f5_add_policy_gwt_profile_command(mocked_client, '0000', 'GWT_test', '100', '100').outputs
assert result.get('name') == 'GWT_test'
assert result.get('id') == 'GWT-md5'
def test_f5_update_policy_gwt_profile_command(mocker):
from F5_ASM import f5_update_policy_gwt_profile_command
mocked_client = mocker.Mock()
mocked_client.update_policy_gwt_profile.return_value = util_load_json('test_data/CUD_GWT.json')
result = f5_update_policy_gwt_profile_command(mocked_client, '0000', 'id123', 'GWT_test', '100', '100').outputs
assert result.get('name') == 'GWT_test'
assert result.get('id') == 'GWT-md5'
def test_f5_delete_policy_gwt_profile_command(mocker):
from F5_ASM import f5_delete_policy_gwt_profile_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_gwt_profile.return_value = util_load_json('test_data/CUD_GWT.json')
result = f5_delete_policy_gwt_profile_command(mocked_client, '0000', 'id123', 'GWT_test').outputs
assert result.get('name') == 'GWT_test'
assert result.get('id') == 'GWT-md5'
def test_f5_list_policy_parameters_command(mocker):
from F5_ASM import f5_list_policy_parameters_command
mocked_client = mocker.Mock()
mocked_client.list_policy_parameters.return_value = MOCK_EMPTY_RESPONSE
result = f5_list_policy_parameters_command(mocked_client, 'unimportant').to_context()
result = result.get('HumanReadable')
assert 'No results' in result
mocked_client.list_policy_parameters.return_value = util_load_json('test_data/list_parameters.json')
result = f5_list_policy_parameters_command(mocked_client, 'unimportant').outputs
assert result[0].get('name') == 'param-1'
assert result[0].get('id') == 'parameter-md5-1'
assert result[1].get('name') == 'param-2'
assert result[1].get('id') == 'parameter-md5-2'
def test_f5_add_policy_parameter_command(mocker):
from F5_ASM import f5_add_policy_parameter_command
mocked_client = mocker.Mock()
mocked_client.add_policy_parameter.return_value = util_load_json('test_data/CUD_parameters.json')
result = f5_add_policy_parameter_command(mocked_client, '0000', 'param-1').outputs
assert result.get('name') == 'param-1'
assert result.get('id') == 'parameter-md5'
def test_f5_update_policy_parameter_command(mocker):
from F5_ASM import f5_update_policy_parameter_command
mocked_client = mocker.Mock()
mocked_client.update_policy_parameter.return_value = util_load_json('test_data/CUD_parameters.json')
result = f5_update_policy_parameter_command(mocked_client, '0000', 'id123', 'param-1').outputs
assert result.get('name') == 'param-1'
assert result.get('id') == 'parameter-md5'
def test_f5_delete_policy_parameter_command(mocker):
from F5_ASM import f5_delete_policy_parameter_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_parameter.return_value = util_load_json('test_data/CUD_parameters.json')
result = f5_delete_policy_parameter_command(mocked_client, '0000', 'id123', 'param-1').outputs
assert result.get('name') == 'param-1'
assert result.get('id') == 'parameter-md5'
def test_f5_list_policy_json_profiles_command(mocker):
from F5_ASM import f5_list_policy_json_profiles_command
mocked_client = mocker.Mock()
mocked_client.list_policy_json_profiles.return_value = MOCK_EMPTY_RESPONSE
result = f5_list_policy_json_profiles_command(mocked_client, 'unimportant').to_context()
result = result.get('HumanReadable')
assert 'No results' in result
mocked_client.list_policy_json_profiles.return_value = util_load_json('test_data/list_json_profiles.json')
result = f5_list_policy_json_profiles_command(mocked_client, '0000').outputs
assert result[0].get('name') == 'json-profile-1'
assert result[0].get('id') == 'json-profile-md5-1'
assert result[1].get('name') == 'Default'
assert result[1].get('id') == 'json-profile-md5-2'
def test_f5_add_policy_json_profile_command(mocker):
from F5_ASM import f5_add_policy_json_profile_command
mocked_client = mocker.Mock()
mocked_client.add_policy_json_profile.return_value = util_load_json('test_data/CUD_json_profile.json')
result = f5_add_policy_json_profile_command(mocked_client, '0000', 'param-1', '100', '100', '100',
'100').outputs
assert result.get('name') == 'json-profile'
assert result.get('id') == 'json-profile-md5'
def test_f5_update_policy_json_profile_command(mocker):
from F5_ASM import f5_update_policy_json_profile_command
mocked_client = mocker.Mock()
mocked_client.update_policy_json_profile.return_value = util_load_json('test_data/CUD_json_profile.json')
result = f5_update_policy_json_profile_command(mocked_client, '0000', 'id123', 'param-1', '100', '100',
'100', '100').outputs
assert result.get('name') == 'json-profile'
assert result.get('id') == 'json-profile-md5'
def test_f5_delete_policy_json_profile_command(mocker):
from F5_ASM import f5_delete_policy_json_profile_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_json_profile.return_value = util_load_json('test_data/CUD_json_profile.json')
result = f5_delete_policy_json_profile_command(mocked_client, '0000', 'id123', 'param-1').outputs
assert result.get('name') == 'json-profile'
assert result.get('id') == 'json-profile-md5'
def test_f5_list_policy_xml_profiles_command(mocker):
from F5_ASM import f5_list_policy_xml_profiles_command
mocked_client = mocker.Mock()
mocked_client.list_policy_xml_profiles.return_value = MOCK_EMPTY_RESPONSE
result = f5_list_policy_xml_profiles_command(mocked_client, '0000').to_context()
result = result.get('HumanReadable')
assert 'No results' in result
mocked_client.list_policy_xml_profiles.return_value = util_load_json('test_data/list_xml_profile.json')
result = f5_list_policy_xml_profiles_command(mocked_client, '0000').outputs
assert result[0].get('name') == 'Default'
assert result[0].get('id') == 'xml-profile-md5'
def test_f5_add_policy_xml_profile_command(mocker):
from F5_ASM import f5_add_policy_xml_profile_command
mocked_client = mocker.Mock()
mocked_client.add_policy_xml_profile.return_value = util_load_json('test_data/CUD_xml_profile.json')
result = f5_add_policy_xml_profile_command(mocked_client, '0000', 'param-1', '100').outputs
assert result.get('name') == 'new_xml_profile'
assert result.get('id') == 'xml-profile-md5'
def test_f5_update_policy_xml_profile_command(mocker):
from F5_ASM import f5_update_policy_xml_profile_command
mocked_client = mocker.Mock()
mocked_client.update_policy_xml_profile.return_value = util_load_json('test_data/CUD_xml_profile.json')
result = f5_update_policy_xml_profile_command(mocked_client, '0000', 'param-1', '100').outputs
assert result.get('name') == 'new_xml_profile'
assert result.get('id') == 'xml-profile-md5'
def test_f5_delete_policy_xml_profile_command(mocker):
from F5_ASM import f5_delete_policy_xml_profile_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_xml_profile.return_value = util_load_json('test_data/CUD_xml_profile.json')
result = f5_delete_policy_xml_profile_command(mocked_client, '0000', 'id123', '8.8.8.8').outputs
assert result.get('name') == 'new_xml_profile'
assert result.get('id') == 'xml-profile-md5'
def test_f5_list_policy_signatures_command(mocker):
from F5_ASM import f5_list_policy_signatures_command
mocked_client = mocker.Mock()
mocked_client.list_policy_signatures.return_value = MOCK_EMPTY_RESPONSE
result = f5_list_policy_signatures_command(mocked_client, 'unimportant').to_context()
result = result.get('HumanReadable')
assert 'No results' in result
def test_f5_list_policy_server_technologies_command(mocker):
from F5_ASM import f5_list_policy_server_technologies_command
# adding fields to BASIC_FIELDS after previus test emptied this list.
LIST_FIELDS = ['name', 'id', 'selfLink', 'lastUpdateMicros', 'type', 'protocol', 'method'] # noqa: F841
mocked_client = mocker.Mock()
mocked_client.list_policy_server_technologies.return_value =\
util_load_json('test_data/list_server_technologies.json')
result = f5_list_policy_server_technologies_command(mocked_client, '0000').outputs
assert result[0].get('id') == 'server-technology-md5-1'
assert result[1].get('id') == 'server-technology-md5-2'
def test_f5_add_policy_server_technologies_command(mocker):
from F5_ASM import f5_add_policy_server_technology_command
mocked_client = mocker.Mock()
mocked_client.add_policy_server_technology.return_value =\
util_load_json('test_data/add_delete_server_technology.json')
result = f5_add_policy_server_technology_command(mocked_client, 'id123', '0000', 'ASP').outputs
assert result.get('id') == 'server-technology-md5'
def test_f5_delete_policy_server_technologies_command(mocker):
from F5_ASM import f5_delete_policy_server_technology_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_server_technology.return_value = util_load_json('test_data/add_delete_server_technology.json')
result = f5_delete_policy_server_technology_command(mocked_client, 'id123', '0000', 'ASP').outputs
assert result.get('id') == 'server-technology-md5'
def test_f5_list_policy_whitelist_ips_command(mocker):
from F5_ASM import f5_list_policy_whitelist_ips_command
# adding fields to BASIC_FIELDS after previus test emptied this list.
LIST_FIELDS = ['name', 'id', 'selfLink', 'lastUpdateMicros', 'type', 'protocol', 'method'] # noqa: F841
mocked_client = mocker.Mock()
mocked_client.list_policy_whitelist_ips.return_value = util_load_json('test_data/list_whitelist.json')
result = f5_list_policy_whitelist_ips_command(mocked_client, '0000').outputs
assert result[0].get('id') == 'whitelist-md5-1'
assert result[1].get('id') == 'whitelist-md5-2'
def test_f5_add_policy_whitelist_ip_command(mocker):
from F5_ASM import f5_add_policy_whitelist_ip_command
mocked_client = mocker.Mock()
mocked_client.add_policy_whitelist_ip.return_value = util_load_json('test_data/CUD_whitelist.json')
result = f5_add_policy_whitelist_ip_command(mocked_client, '0000', '8.8.8.8').outputs
assert result.get('id') == 'whitelist-md5'
def test_f5_update_policy_whitelist_ip_command(mocker):
from F5_ASM import f5_update_policy_whitelist_ip_command
mocked_client = mocker.Mock()
mocked_client.update_policy_whitelist_ip.return_value = util_load_json('test_data/CUD_whitelist.json')
result = f5_update_policy_whitelist_ip_command(mocked_client, '0000', 'id123', '8.8.8.8').outputs
assert result.get('id') == 'whitelist-md5'
def test_f5_delete_policy_whitelist_ip_command(mocker):
from F5_ASM import f5_delete_policy_whitelist_ip_command
mocked_client = mocker.Mock()
mocked_client.delete_policy_whitelist_ip.return_value = util_load_json('test_data/CUD_whitelist.json')
result = f5_delete_policy_whitelist_ip_command(mocked_client, '0000', 'id123', '8.8.8.8').outputs
assert result.get('id') == 'whitelist-md5'
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from boto3.session import Session
from parameterized import parameterized
from airflow.models.connection import Connection
from airflow.providers.amazon.aws.transfers.redshift_to_s3 import RedshiftToS3Operator
from airflow.providers.amazon.aws.utils.redshift import build_credentials_block
from tests.test_utils.asserts import assert_equal_ignore_multiple_spaces
class TestRedshiftToS3Transfer(unittest.TestCase):
@parameterized.expand(
[
[True, "key/table_"],
[False, "key"],
]
)
@mock.patch("airflow.providers.amazon.aws.hooks.s3.S3Hook.get_connection")
@mock.patch("airflow.models.connection.Connection")
@mock.patch("boto3.session.Session")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.RedshiftSQLHook.run")
def test_table_unloading(
self,
table_as_file_name,
expected_s3_key,
mock_run,
mock_session,
mock_connection,
mock_hook,
):
access_key = "aws_access_key_id"
secret_key = "aws_secret_access_key"
mock_session.return_value = Session(access_key, secret_key)
mock_session.return_value.access_key = access_key
mock_session.return_value.secret_key = secret_key
mock_session.return_value.token = None
mock_connection.return_value = Connection()
mock_hook.return_value = Connection()
schema = "schema"
table = "table"
s3_bucket = "bucket"
s3_key = "key"
unload_options = [
'HEADER',
]
op = RedshiftToS3Operator(
schema=schema,
table=table,
s3_bucket=s3_bucket,
s3_key=s3_key,
unload_options=unload_options,
include_header=True,
redshift_conn_id="redshift_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
table_as_file_name=table_as_file_name,
dag=None,
)
op.execute(None)
unload_options = '\n\t\t\t'.join(unload_options)
select_query = f"SELECT * FROM {schema}.{table}"
credentials_block = build_credentials_block(mock_session.return_value)
unload_query = op._build_unload_query(
credentials_block, select_query, expected_s3_key, unload_options
)
assert mock_run.call_count == 1
assert access_key in unload_query
assert secret_key in unload_query
assert_equal_ignore_multiple_spaces(self, mock_run.call_args[0][0], unload_query)
@parameterized.expand(
[
[True, "key/table_"],
[False, "key"],
]
)
@mock.patch("airflow.providers.amazon.aws.hooks.s3.S3Hook.get_connection")
@mock.patch("airflow.models.connection.Connection")
@mock.patch("boto3.session.Session")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.RedshiftSQLHook.run")
def test_execute_sts_token(
self,
table_as_file_name,
expected_s3_key,
mock_run,
mock_session,
mock_connection,
mock_hook,
):
access_key = "ASIA_aws_access_key_id"
secret_key = "aws_secret_access_key"
token = "token"
mock_session.return_value = Session(access_key, secret_key, token)
mock_session.return_value.access_key = access_key
mock_session.return_value.secret_key = secret_key
mock_session.return_value.token = token
mock_connection.return_value = Connection()
mock_hook.return_value = Connection()
schema = "schema"
table = "table"
s3_bucket = "bucket"
s3_key = "key"
unload_options = [
'HEADER',
]
op = RedshiftToS3Operator(
schema=schema,
table=table,
s3_bucket=s3_bucket,
s3_key=s3_key,
unload_options=unload_options,
include_header=True,
redshift_conn_id="redshift_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
table_as_file_name=table_as_file_name,
dag=None,
)
op.execute(None)
unload_options = '\n\t\t\t'.join(unload_options)
select_query = f"SELECT * FROM {schema}.{table}"
credentials_block = build_credentials_block(mock_session.return_value)
unload_query = op._build_unload_query(
credentials_block, select_query, expected_s3_key, unload_options
)
assert mock_run.call_count == 1
assert access_key in unload_query
assert secret_key in unload_query
assert token in unload_query
assert_equal_ignore_multiple_spaces(self, mock_run.call_args[0][0], unload_query)
@parameterized.expand(
[
["table", True, "key/table_"],
["table", False, "key"],
[None, False, "key"],
[None, True, "key"],
]
)
@mock.patch("airflow.providers.amazon.aws.hooks.s3.S3Hook.get_connection")
@mock.patch("airflow.models.connection.Connection")
@mock.patch("boto3.session.Session")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.RedshiftSQLHook.run")
def test_custom_select_query_unloading(
self,
table,
table_as_file_name,
expected_s3_key,
mock_run,
mock_session,
mock_connection,
mock_hook,
):
access_key = "aws_access_key_id"
secret_key = "aws_secret_access_key"
mock_session.return_value = Session(access_key, secret_key)
mock_session.return_value.access_key = access_key
mock_session.return_value.secret_key = secret_key
mock_session.return_value.token = None
mock_connection.return_value = Connection()
mock_hook.return_value = Connection()
s3_bucket = "bucket"
s3_key = "key"
unload_options = [
'HEADER',
]
select_query = "select column from table"
op = RedshiftToS3Operator(
select_query=select_query,
table=table,
table_as_file_name=table_as_file_name,
s3_bucket=s3_bucket,
s3_key=s3_key,
unload_options=unload_options,
include_header=True,
redshift_conn_id="redshift_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
dag=None,
)
op.execute(None)
unload_options = '\n\t\t\t'.join(unload_options)
credentials_block = build_credentials_block(mock_session.return_value)
unload_query = op._build_unload_query(
credentials_block, select_query, expected_s3_key, unload_options
)
assert mock_run.call_count == 1
assert access_key in unload_query
assert secret_key in unload_query
assert_equal_ignore_multiple_spaces(self, mock_run.call_args[0][0], unload_query)
@parameterized.expand(
[
[True, "key/table_"],
[False, "key"],
]
)
@mock.patch("airflow.providers.amazon.aws.hooks.s3.S3Hook.get_connection")
@mock.patch("airflow.models.connection.Connection")
@mock.patch("boto3.session.Session")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.RedshiftSQLHook.run")
def test_table_unloading_role_arn(
self,
table_as_file_name,
expected_s3_key,
mock_run,
mock_session,
mock_connection,
mock_hook,
):
access_key = "aws_access_key_id"
secret_key = "aws_secret_access_key"
extra = {"role_arn": "arn:aws:iam::112233445566:role/myRole"}
mock_session.return_value = Session(access_key, secret_key)
mock_session.return_value.access_key = access_key
mock_session.return_value.secret_key = secret_key
mock_session.return_value.token = None
mock_connection.return_value = Connection(extra=extra)
mock_hook.return_value = Connection(extra=extra)
schema = "schema"
table = "table"
s3_bucket = "bucket"
s3_key = "key"
unload_options = [
'HEADER',
]
op = RedshiftToS3Operator(
schema=schema,
table=table,
s3_bucket=s3_bucket,
s3_key=s3_key,
unload_options=unload_options,
include_header=True,
redshift_conn_id="redshift_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
table_as_file_name=table_as_file_name,
dag=None,
)
op.execute(None)
unload_options = '\n\t\t\t'.join(unload_options)
select_query = f"SELECT * FROM {schema}.{table}"
credentials_block = f"aws_iam_role={extra['role_arn']}"
unload_query = op._build_unload_query(
credentials_block, select_query, expected_s3_key, unload_options
)
assert mock_run.call_count == 1
assert extra['role_arn'] in unload_query
assert_equal_ignore_multiple_spaces(self, mock_run.call_args[0][0], unload_query)
def test_template_fields_overrides(self):
assert RedshiftToS3Operator.template_fields == (
's3_bucket',
's3_key',
'schema',
'table',
'unload_options',
'select_query',
)
|
|
import datetime
import functools
import multiprocessing as mp
import os
import pickle
import re
from tensorflow.keras.preprocessing import image
import PIL.Image
from . import exceptions
from . import calc as ic
pj = os.path.join
ic_base_dir = 'imagecluster'
def read_pk(filename):
"""Read pickled data from `filename`."""
with open(filename, 'rb') as fd:
ret = pickle.load(fd)
return ret
def write_pk(obj, filename):
"""Write object `obj` pickled to `filename`."""
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as fd:
pickle.dump(obj, fd)
def get_files(imagedir, ext='jpg|jpeg|bmp|png'):
"""Return all file names with extension matching the regex `ext` from dir
`imagedir`.
Parameters
----------
imagedir : str
ext : str
regex
Returns
-------
list
list of file names
"""
rex = re.compile(r'^.*\.({})$'.format(ext), re.I)
return [os.path.join(imagedir,base) for base in os.listdir(imagedir)
if rex.match(base)]
def exif_timestamp(filename):
"""Read timestamp from image in `filename` from EXIF tag.
This will probably work for most JPG files, but not for PNG, for instance.
Raises
------
exceptions.ICExifReadError
Returns
-------
float
timestamp, seconds since Epoch
"""
# PIL lazy-loads the image data, so this open and _getexif() is fast.
img = PIL.Image.open(filename)
if ('exif' not in img.info.keys()) or (not hasattr(img, '_getexif')):
raise exceptions.ICExifReadError(f"no EXIF data found in {filename}")
# Avoid constucting the whole EXIF dict just to extract the DateTime field.
# DateTime -> key 306 is in the EXIF standard, so let's use that directly.
## date_time = {TAGS[k] : v for k,v in exif.items()}['DateTime']
exif = img._getexif()
key = 306
if key not in exif.keys():
raise exceptions.ICExifReadError(f"key 306 (DateTime) not found in "
f"EXIF data of file {filename}")
# '2019:03:10 22:42:42'
date_time = exif[key]
if date_time.count(':') != 4:
msg = f"unsupported EXIF DateTime format in '{date_time}' of {filename}"
raise exceptions.ICExifReadError(msg)
# '2019:03:10 22:42:42' -> ['2019', '03', '10', '22', '42', '42']
date_time_str = date_time.replace(':', ' ').split()
names = ('year', 'month', 'day', 'hour', 'minute', 'second')
stamp = datetime.datetime(**{nn:int(vv) for nn,vv in zip(names,date_time_str)},
tzinfo=datetime.timezone.utc).timestamp()
return stamp
def stat_timestamp(filename):
"""File timestamp from file stats (mtime)."""
return os.stat(filename).st_mtime
def timestamp(filename, source='auto'):
"""Read single timestamp for image in `filename`.
Parameters
----------
filename : str
source : {'auto', 'stat', 'exif'}
Read timestamps from file stats ('stat'), or EXIF tags ('exif'). If
'auto', then try 'exif' first.
Returns
-------
float
timestamp, seconds since Epoch
"""
if source == 'auto':
try:
return exif_timestamp(filename)
except exceptions.ICExifReadError:
return stat_timestamp(filename)
elif source == 'stat':
return stat_timestamp(filename)
elif source == 'exif':
return exif_timestamp(filename)
else:
raise ValueError("source not in ['stat', 'exif', 'auto']")
# TODO some code dups below, fix later by fancy factory functions
# keras.preprocessing.image.load_img() uses img.rezize(shape) with the default
# interpolation of Image.resize() which is pretty bad (see
# imagecluster/play/pil_resample_methods.py). Given that we are restricted to
# small inputs of 224x224 by the VGG network, we should do our best to keep as
# much information from the original image as possible. This is a gut feeling,
# untested. But given that model.predict() is 10x slower than PIL image loading
# and resizing .. who cares.
#
# (224, 224, 3)
##img = image.load_img(filename, target_size=size)
##... = image.img_to_array(img)
def _image_worker(filename, size):
# Handle PIL error "OSError: broken data stream when reading image file".
# See https://github.com/python-pillow/Pillow/issues/1510 . We have this
# issue with smartphone panorama JPG files. But instead of bluntly setting
# ImageFile.LOAD_TRUNCATED_IMAGES = True and hoping for the best (is the
# image read, and till the end?), we catch the OSError thrown by PIL and
# ignore the file completely. This is better than reading potentially
# undefined data and process it. A more specialized exception from PILs
# side would be good, but let's hope that an OSError doesn't cover too much
# ground when reading data from disk :-)
try:
print(filename)
img = PIL.Image.open(filename).convert('RGB').resize(size, resample=3)
arr = image.img_to_array(img, dtype=int)
return filename, arr
except OSError as ex:
print(f"skipping {filename}: {ex}")
return filename, None
def _timestamp_worker(filename, source):
try:
return filename, timestamp(filename, source)
except OSError as ex:
print(f"skipping {filename}: {ex}")
return filename, None
def read_images(imagedir, size, ncores=mp.cpu_count()):
"""Load images from `imagedir` and resize to `size`.
Parameters
----------
imagedir : str
size : sequence length 2
(width, height), used in ``Image.open(filename).resize(size)``
ncores : int
run that many parallel processes
Returns
-------
dict
{filename: 3d array (height, width, 3), ...}
"""
_f = functools.partial(_image_worker, size=size)
with mp.Pool(ncores) as pool:
ret = pool.map(_f, get_files(imagedir))
return {k: v for k,v in ret if v is not None}
def read_timestamps(imagedir, source='auto', ncores=mp.cpu_count()):
"""Read timestamps of all images in `imagedir`.
Parameters
----------
imagedir : str
source : see :func:`~imagecluster.io.timestamp`
ncores : int
run that many parallel processes
Returns
-------
dict
{filename: timestamp (int, seconds since Epoch)}
"""
_f = functools.partial(_timestamp_worker, source=source)
with mp.Pool(ncores) as pool:
ret = pool.map(_f, get_files(imagedir))
return {k: v for k,v in ret if v is not None}
# TODO fingerprints and timestamps may have different images which have been
# skipped -> we need a data struct to hold all image data and mask out the
# skipped ones. For now we have a check in calc.cluster()
def get_image_data(imagedir, model_kwds=dict(layer='fc2'),
img_kwds=dict(size=(224,224)), timestamps_kwds=dict(source='auto'),
pca_kwds=None):
"""Convenience function to create `images`, `fingerprints`,
`timestamps`.
It checks for existing `images` and `fingerprints` database files on
disk and load them if present. Running this again only loads data from
disk, which is fast. Default locations::
fingerprints: <imagedir>/imagecluster/fingerprints.pk
images: <imagedir>/imagecluster/images.pk
Parameters
----------
imagedir : str
model_kwds : dict
passed to :func:`~imagecluster.calc.get_model`
img_kwds : dict
passed to :func:`~imagecluster.io.read_images`
timestamps_kwds : dict
passed to :func:`~imagecluster.io.read_timestamps`
pca_kwds : dict
passed to :func:`~imagecluster.calc.pca`, PCA is skipped if
``pca_kwds=None``
Returns
-------
images : see :func:`~imagecluster.io.read_images`
fingerprints : see :func:`~imagecluster.calc.fingerprints`
timestamps : see :func:`~imagecluster.io.read_timestamps`
"""
fingerprints_fn = pj(imagedir, ic_base_dir, 'fingerprints.pk')
images_fn = pj(imagedir, ic_base_dir, 'images.pk')
if os.path.exists(images_fn):
print(f"reading image arrays {images_fn} ...")
images = read_pk(images_fn)
else:
print(f"create image arrays {images_fn}")
images = read_images(imagedir, **img_kwds)
write_pk(images, images_fn)
if os.path.exists(fingerprints_fn):
print(f"reading fingerprints {fingerprints_fn} ...")
fingerprints = read_pk(fingerprints_fn)
else:
print(f"create fingerprints {fingerprints_fn}")
fingerprints = ic.fingerprints(images, ic.get_model(**model_kwds))
if pca_kwds is not None:
fingerprints = ic.pca(fingerprints, **pca_kwds)
write_pk(fingerprints, fingerprints_fn)
print(f"reading timestamps ...")
if timestamps_kwds is not None:
timestamps = read_timestamps(imagedir, **timestamps_kwds)
return images, fingerprints, timestamps
|
|
import collections
import json
import random
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.core.cache import cache
from django.db.models import Count, Max, Min
from django.http import Http404, JsonResponse
from django.shortcuts import redirect, render, get_object_or_404
from django.urls import reverse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from election.predictions import MODELS
from election.utils import predict_winner
from endorsements.forms import EndorsementForm, SourceForm, \
EndorsementFormWithoutPosition, \
PersonalTagForm, EndorserForm, \
OrganizationTagForm, \
TagFilterForm
from endorsements.models import Account, Endorser, Candidate, Source, Quote, \
Tag, Endorsement, Category, Position
from endorsements.templatetags.endorsement_extras import shorten
from wikipedia.models import BulkImport, ImportedEndorsement, NEWSPAPER_SLUG, \
ImportedNewspaper, ImportedResult, \
ImportedRepresentative, ElectoralVotes
def search_endorsers(request):
query = request.GET.get('q')
endorsers = []
endorser_pks = set()
if query:
# First find the endorsers whose names start with this query.
results = Endorser.objects.filter(name__istartswith=query)
for endorser in results[:5]:
endorser_pks.add(endorser.pk)
endorsers.append(endorser)
if results.count() < 5:
results = Endorser.objects.filter(name__icontains=query)
for endorser in results:
if endorser.pk in endorser_pks:
continue
endorsers.append(endorser)
if len(endorsers) == 5:
break
return JsonResponse({
'endorsers': [{'pk': e.pk, 'name': e.name} for e in endorsers],
})
def get_endorsers(filter_params, sort_params, skip=0):
filters = {}
mode = filter_params.get('mode')
if mode == 'personal':
filters['is_personal'] = True
elif mode == 'organization':
filters['is_personal'] = False
if filters:
endorser_query = Endorser.objects.filter(**filters)
else:
endorser_query = Endorser.objects.all()
# Tags can't be placed in the filters dictionary because we may have to do
# multiple filters.
tags = filter_params.get('tags')
if type(tags) == list and tags:
for tag_pk in tags:
endorser_query = endorser_query.filter(tags=tag_pk)
# Before we apply any candidate-specific filters, we need a count of all
# the endorsers associated with each position.
if skip == 0:
position_totals = collections.Counter()
for endorser in endorser_query.prefetch_related('current_position'):
position_totals['all'] += 1
if endorser.current_position:
position = endorser.current_position
else:
endorsement = endorser.get_current_endorsement()
if endorsement:
position = endorsement.position
else:
position = None
if position:
position_totals[position.pk] += 1
candidate = filter_params.get('candidate')
show_extra_positions = False
if candidate:
try:
position = Position.objects.get(slug=candidate)
endorser_query = endorser_query.filter(current_position=position)
# If this position is one of the extra positions, make sure those
# are visible on page load.
show_extra_positions = not position.show_on_load
except Position.DoesNotExist:
pass
sort_value = sort_params.get('value')
sort_key = None
annotation_key = None
if sort_value == 'most':
sort_key = '-max_followers'
elif sort_value == 'least':
sort_key = 'max_followers'
elif sort_value == 'newest':
sort_key = '-sort_key'
annotation_key = Max('endorsement__quote__date')
elif sort_value == 'oldest':
sort_key = 'sort_key'
annotation_key = Min('endorsement__quote__date')
elif sort_value == 'az':
sort_key = 'name'
elif sort_value == 'za':
sort_key = '-name'
if sort_key:
if annotation_key:
endorser_query = endorser_query.annotate(
sort_key=annotation_key
)
endorser_query = endorser_query.order_by(sort_key)
# This is only needed when ordering by endorsement (otherwise,
# duplicates may show up based on their endorsement date).
category_names = {}
for tag in Tag.objects.all().prefetch_related('category'):
category_names[tag.pk] = tag.category.name
# Figure out which endorsers are also candidates.
candidate_endorser_pks = set()
for candidate in Candidate.objects.values('endorser_link'):
candidate_endorser_pks.add(candidate['endorser_link'])
endorser_query = endorser_query.prefetch_related(
'tags',
'endorsement_set__position',
'endorsement_set__quote',
'endorsement_set__quote__source',
'endorsement_set__quote__event',
'account_set'
)
endorsers = []
for endorser in endorser_query[skip:skip + 12]:
tags = []
for tag in endorser.tags.all():
tags.append((tag.name, tag.pk))
endorsements = []
previous_position = None
position_pks = set()
for i, endorsement in enumerate(endorser.endorsement_set.all()):
# Ignore a position if it's the same as the previous one.
position = endorsement.position
if i == 0:
display = position.get_present_display()
else:
display = position.get_past_display()
quote = endorsement.quote
source = quote.source
event = quote.event
if event:
if event.start_date == event.end_date:
event_dates = event.start_date.strftime('%b %d, %Y')
else:
event_dates = '{start} to {end}'.format(
start=event.start_date.strftime('%b %d, %Y'),
end=event.end_date.strftime('%b %d, %Y')
)
else:
event_dates = None
endorsements.append({
'c': endorsement.position.colour,
'di': display,
'q': quote.text,
'cx': quote.context,
'ecx': quote.get_event_context(),
'e': event.name if event else '',
'ed': event_dates,
'da': quote.get_date_display(),
'su': source.url,
'sd': source.get_date_display(),
'sn': source.name,
})
accounts = []
max_followers = 0
for account in endorser.account_set.all():
if account.followers_count > max_followers:
max_followers = account.followers_count
accounts.append({
'u': account.screen_name,
'n': shorten(account.followers_count),
})
# Don't bother checking if it's a candidate unless there are no
# endorsements.
is_candidate = False
if not endorsements and endorser.pk in candidate_endorser_pks:
is_candidate = True
description = endorser.description
if description:
if len(description) > 80:
description = description[:80] + '...'
else:
description = 'No description'
endorsers.append({
'p': endorser.pk,
'n': endorser.name,
'u': endorser.url,
'd': description,
't': tags,
'e': endorsements,
'a': accounts,
'c': is_candidate,
'i': 'missing' if endorser.missing_image else endorser.pk,
})
to_return = {
'endorsers': endorsers,
}
# Only return position-related data if it's the first page.
if skip == 0:
positions = [
{
'name': 'All',
'slug': 'all',
'colour': 'grey',
'count': position_totals['all'],
}
]
extra_positions = []
position_query = Position.objects.annotate(count=Count('endorser'))
for position in position_query.order_by('-count'):
if position.show_on_load:
to_append_to = positions
else:
to_append_to = extra_positions
if position.present_tense_prefix == 'Endorses':
name = position.suffix
else:
name = position.get_present_display()
position_count = position_totals[position.pk]
if position_count > 0:
to_append_to.append({
'name': name,
'slug': position.slug,
'colour': position.colour,
'count': position_count,
})
to_return['positions'] = positions
to_return['extra_positions'] = extra_positions
to_return['show_extra_positions'] = show_extra_positions
return to_return
@csrf_exempt
def get_tags(request):
category_tags = collections.defaultdict(list)
for tag in Tag.objects.all():
category_tags[tag.category.pk].append({
'name': tag.name,
'pk': tag.pk,
})
org_tags = []
personal_tags = []
for category_pk in category_tags:
category = Category.objects.get(pk=category_pk)
tag = {
'name': category.name,
'tags': category_tags[category_pk],
'exclusive': category.is_exclusive,
}
if category.allow_org:
org_tags.append(tag)
if category.allow_personal:
personal_tags.append(tag)
return JsonResponse({
'org': org_tags,
'personal': personal_tags,
})
@require_POST
@csrf_exempt
def get_endorsements(request):
params = None
if request.body:
try:
params = json.loads(request.body)
except ValueError:
pass
if params is not None:
filter_params = params.get('filter')
if not filter_params or type(filter_params) != dict:
return JsonResponse({
'error': True,
'message': 'Need "filter" key with a dict value',
})
sort_params = params.get('sort')
if not sort_params or type(sort_params) != dict:
return JsonResponse({
'error': True,
'message': 'Need "sort" key with a dict value',
})
else:
filter_params = {}
sort_params = {}
try:
skip = int(request.GET.get('skip', 0))
except ValueError:
skip = 0
params_string = (
'query_{sort_by}_{sort_value}_{mode}_{candidate}_{tags}_{skip}'
).format(
sort_by=sort_params.get('by', 'followers'),
sort_value=sort_params.get('value', 'most'),
mode=filter_params.get('mode', 'none'),
candidate=filter_params.get('candidate', 'all'),
tags=','.join(sorted(map(str, filter_params.get('tags', [])))),
skip=skip
)
print params_string
results = cache.get(params_string)
if results is None:
results = get_endorsers(filter_params, sort_params, skip)
cache.set(params_string, results, 60 * 60)
return JsonResponse(results)
def browse(request):
positions = Position.objects.all().annotate(
num_endorsers=Count('endorsement__endorser')
)
counts = {}
for position in positions:
if position.slug:
counts[position.slug] = position.num_endorsers
counts['total'] = Endorser.objects.count()
context = {
'counts': counts,
}
return render(request, 'endorsers/browse.html', context)
@require_POST
def add_endorser(request):
username = request.POST.get('username')
account = Account.objects.get_from_username(username)
if account is None:
messages.add_message(
request,
messages.ERROR,
u'Could not get user for {username}'.format(
username=username
)
)
# Redirect to the page for editing the endorser object.
return redirect('view-endorser', pk=account.endorser.pk)
def view_endorser(request, pk):
endorser = get_object_or_404(Endorser, pk=pk)
endorsement_form = EndorsementForm()
imported_endorsements = ImportedEndorsement.objects.filter(
confirmed_endorser=endorser
)
imported_representatives = ImportedRepresentative.objects.filter(
confirmed_endorser=endorser
)
context = {
'endorser': endorser,
'endorsement_form': endorsement_form,
'imported_endorsements': imported_endorsements,
'imported_representatives': imported_representatives,
}
return render(request, 'endorsers/view.html', context)
@staff_member_required
@require_POST
def add_account(request, pk):
endorser = get_object_or_404(Endorser, pk=pk)
username = request.POST.get('username')
account = Account.objects.get_from_username(username, endorser=endorser)
if account:
messages.add_message(
request,
messages.SUCCESS,
u'Added the account @{username}'.format(
username=username
)
)
return redirect('view-endorser', pk=pk)
@require_POST
def add_endorsement(request, pk):
endorser = get_object_or_404(Endorser, pk=pk)
form = EndorsementForm(request.POST)
if not form.is_valid():
messages.add_message(
request,
messages.ERROR,
u'Not valid form'
)
return redirect('view-endorser', pk=pk)
# First, create the source, or get it if it already exists.
source_url = form.cleaned_data['source_url']
source_date = form.cleaned_data['date']
source_name = form.cleaned_data['source_name']
try:
source = Source.objects.get(url=source_url)
except Source.DoesNotExist:
source = Source.objects.create(
date=source_date,
url=source_url,
name=source_name
)
quote_text = form.cleaned_data['quote']
quote_context = form.cleaned_data['context']
quote_event = form.cleaned_data['event']
try:
quote = Quote.objects.get(
source=source,
text=quote_text,
)
except Quote.DoesNotExist:
quote = Quote.objects.create(
source=source,
text=quote_text,
context=quote_context,
date=source_date,
event=quote_event,
)
position = form.cleaned_data['position']
endorsement = Endorsement.objects.create(
position=position,
endorser=endorser,
quote=quote,
confirmed=request.user.is_staff,
)
messages.add_message(
request,
messages.SUCCESS,
u'Added endorsement',
)
return redirect('view-endorser', pk=pk)
def random_endorser(request):
endorser_count = Endorser.objects.count()
random_endorser_index = random.randint(0, endorser_count - 1)
random_endorser = Endorser.objects.all()[random_endorser_index]
context = {
'endorser': random_endorser,
}
return render(request, 'endorsers/random.html', context)
# category name: is personal
CATEGORY_NAMES = {
'Gender': True,
'Race/ethnicity': True,
'Organizations': False,
}
def stats_tags(request):
candidates = list(
Candidate.objects.filter(still_running=True).order_by('pk')
)
positions = [
candidate.position.pk for candidate in candidates
]
categories = []
for category_name, is_personal in CATEGORY_NAMES.iteritems():
category = Category.objects.get(name=category_name)
category_candidates = []
for candidate in candidates:
position = candidate.position
endorsers = Endorser.objects.filter(
is_personal=is_personal,
current_position=position
)
category_endorsers = endorsers.filter(
tags__category=category
).distinct()
percent_reporting = (
category_endorsers.count() / float(endorsers.count()) * 100
)
category_candidates.append({
'num_tagged': category_endorsers.count(),
'percent_reporting': percent_reporting
})
# The Other column.
endorsers = Endorser.objects.exclude(current_position__in=positions)
category_endorsers = endorsers.filter(
is_personal=is_personal,
tags__category=category
)
percent_reporting = (
category_endorsers.count() / float(endorsers.count()) * 100
)
category_candidates.append({
'num_tagged': category_endorsers.count(),
'percent_reporting': percent_reporting
})
# Now get the tag-specific stats
category_tags = []
for tag in category.tag_set.all():
tag_candidates = []
for candidate in candidates:
position = candidate.position
endorsers = Endorser.objects.filter(
current_position=position,
is_personal=is_personal,
)
tag_endorsers = endorsers.filter(tags=tag)
num_tagged = tag_endorsers.count()
tag_candidates.append({
'num_tagged': num_tagged,
})
# The Other column.
endorsers = Endorser.objects.exclude(current_position__in=positions)
tag_endorsers = endorsers.filter(
tags=tag,
current_position=position,
)
tag_candidates.append({
'num_tagged': tag_endorsers.count(),
})
category_tags.append({
'name': tag.name,
'candidates': tag_candidates,
})
num_endorsers = Endorser.objects.count()
category_endorsers = Endorser.objects.filter(tags__category=category)
num_tagged = category_endorsers.count()
percent_reporting = num_tagged / float(num_endorsers) * 100
categories.append({
'name': category.name,
'candidates': category_candidates,
'tags': category_tags,
'num_tagged': num_tagged,
'percent_reporting': percent_reporting,
})
context = {
'candidates': candidates,
'categories': categories,
}
return render(request, 'stats/tags.html', context)
def charts(request):
context = {}
return render(request, 'stats/charts.html', context)
SLUG_MAPPING = {
'List_of_Gary_Johnson_presidential_campaign_endorsements,_2016': 'johnson',
'List_of_Jill_Stein_presidential_campaign_endorsements,_2016': 'stein',
'List_of_Evan_McMullin_presidential_campaign_endorsements,_2016': 'mcmullin',
'List_of_Donald_Trump_presidential_campaign_endorsements,_2016': 'trump',
'List_of_Hillary_Clinton_presidential_campaign_endorsements,_2016': 'clinton',
}
def progress_wikipedia(request):
slug_counts = collections.defaultdict(collections.Counter)
for e in ImportedEndorsement.objects.all().prefetch_related('bulk_import'):
slug = e.bulk_import.slug
if e.confirmed_endorser_id:
slug_counts[slug]['confirmed'] += 1
else:
if e.notes:
slug_counts[slug]['unverifiable'] += 1
else:
slug_counts[slug]['unconfirmed'] += 1
slug_counts[slug]['imported'] += 1
positions = []
for slug in slug_counts:
position_slug = SLUG_MAPPING.get(slug)
if position_slug:
position = Position.objects.get(slug=position_slug)
name = position.get_present_display()
endorsers = Endorser.objects.filter(endorsement__position=position)
missing_endorsers = endorsers.filter(importedendorsement=None)
num_endorsers = endorsers.count()
num_missing = missing_endorsers.count()
else:
position = None
name = '--'
num_endorsers = 0
num_missing = 0
num_unconfirmed = slug_counts[slug]['unconfirmed']
num_imported = slug_counts[slug]['imported']
if num_unconfirmed > 0:
progress = int(
(num_imported - num_unconfirmed) / float(num_imported) * 100
)
else:
progress = 100
last_import = BulkImport.objects.filter(slug=slug).latest('created_at')
positions.append({
'name': name,
'last_checked': last_import.created_at,
'slug': slug,
'num_confirmed': slug_counts[slug]['confirmed'],
'num_unconfirmed': num_unconfirmed,
'num_unverifiable': slug_counts[slug]['unverifiable'],
'num_imported': num_imported,
'num_missing': num_missing,
'num_endorsements': num_endorsers,
'progress': progress,
})
num_newspapers = ImportedNewspaper.objects.count()
num_confirmed = ImportedNewspaper.objects.filter(
confirmed_endorser__isnull=False
).count()
last_newspaper_import = BulkImport.objects.filter(
importednewspaper__isnull=False
).latest('pk')
num_missing = Endorser.objects.filter(
importednewspaper=None,
tags=Tag.objects.get(name='Publication')
).count()
progress = num_confirmed / float(num_newspapers) * 100
positions.append({
'name': 'Newspaper endorsements',
'last_checked': last_newspaper_import.created_at,
'slug': last_newspaper_import.slug,
'num_imported': num_newspapers,
'num_confirmed': num_confirmed,
'num_unconfirmed': num_newspapers - num_confirmed,
'num_unverifiable': 0,
'num_missing': num_missing,
'progress': progress,
})
context = {
'positions': positions,
}
return render(request, 'progress/wikipedia.html', context)
def progress_wikipedia_missing(request, slug):
if slug == NEWSPAPER_SLUG:
endorsements = Endorsement.objects.filter(
endorser__importednewspaper=None,
endorser__tags=Tag.objects.get(name='Publication')
)
else:
position = Position.objects.get(slug=SLUG_MAPPING[slug])
endorsements = Endorsement.objects.filter(
position=position,
endorser__importedendorsement=None
)
context = {
'slug': slug,
'endorsements': endorsements,
}
return render(request, 'progress/wikipedia_missing.html', context)
def progress_wikipedia_list(request, slug, mode):
if slug == NEWSPAPER_SLUG:
query = ImportedNewspaper.objects.all()
is_newspapers = True
else:
is_newspapers = False
query = ImportedEndorsement.objects.filter(
bulk_import__slug=slug,
)
position = get_object_or_404(Position, slug=SLUG_MAPPING.get(slug))
imported = []
if mode == 'already':
title = 'Already imported'
query = query.filter(confirmed_endorser__isnull=False)
query = query.prefetch_related('confirmed_endorser__endorsement_set')
for obj in query.order_by('-confirmed_endorser_id'):
endorser = obj.confirmed_endorser
if is_newspapers:
endorsements = endorser.endorsement_set.all()
attributes = {
'endorser_name': obj.name,
'endorser_details': "{city}, {state}".format(
city=obj.city,
state=obj.state
),
'citation_url': obj.url,
'citation_date': obj.date,
'citation_name': obj.name,
}
raw_text = obj.endorsement_2016
sections = obj.get_section_display()
notes = None
url_name = 'admin:wikipedia_importednewspaper_change'
else:
endorsements = endorser.endorsement_set.filter(
position=position
)
attributes = obj.parse_text()
raw_text = obj.raw_text
sections = obj.sections
notes = obj.notes
url_name = 'admin:wikipedia_importedendorsement_change'
imported.append({
'endorsements': endorsements,
'confirmed': True,
'endorser': endorser,
'attributes': attributes,
'raw_text': raw_text,
'sections': sections,
'notes': notes,
'pk': obj.pk,
'url_name': url_name,
})
elif mode == 'notyet':
# TODO: Handle newspapers here.
title = 'Not yet imported'
query = query.filter(confirmed_endorser__isnull=True)
for obj in query:
endorser = obj.get_likely_endorser()
url_name = 'admin:wikipedia_importedendorsement_change'
imported.append({
'endorsements': [],
'confirmed': False,
'endorser': endorser,
'attributes': obj.parse_text(),
'raw_text': obj.raw_text,
'sections': obj.sections,
'notes': obj.notes,
'pk': obj.pk,
'url_name': url_name,
})
else:
raise Http404
context = {
'slug': slug,
'title': title,
'imported': imported,
}
return render(request, 'progress/wikipedia_list.html', context)
@staff_member_required
@require_POST
def confirm_endorsement(request, pk):
imported_endorsement = ImportedEndorsement.objects.get(pk=pk)
slug = imported_endorsement.bulk_import.slug
position = Position.objects.get(slug=SLUG_MAPPING[slug])
endorser_form = EndorserForm(request.POST)
endorsement_form = EndorsementFormWithoutPosition(request.POST)
likely_endorser = imported_endorsement.get_likely_endorser()
if likely_endorser:
endorser = likely_endorser
if endorser.endorsement_set.filter(position=position).exists():
num_remaining = ImportedEndorsement.objects.filter(
confirmed_endorser=None,
notes=None,
).count()
imported_endorsement.confirmed_endorser = endorser
imported_endorsement.save()
messages.add_message(
request,
messages.SUCCESS,
'Confirmed endorser with pk {pk}; {n} left'.format(
pk=endorser.pk,
n=num_remaining,
),
)
return redirect('confirm-endorsements')
else:
if not endorser_form.is_valid():
messages.add_message(
request,
messages.ERROR,
'Invalid endorser form: {}'.format(endorser_form.errors),
)
return redirect('confirm-endorsements')
endorser = Endorser.objects.create(
name=endorser_form.cleaned_data['name'],
description=endorser_form.cleaned_data['description'],
url=endorser_form.cleaned_data['url'],
is_personal=endorser_form.cleaned_data['is_personal'],
max_followers=0,
)
for tag in endorser_form.cleaned_data['tags']:
endorser.tags.add(tag)
username_1 = endorser_form.cleaned_data['twitter_username_1']
if username_1:
account = Account.objects.get_from_username(
username_1,
endorser=endorser
)
username_2 = endorser_form.cleaned_data['twitter_username_2']
if username_2:
account = Account.objects.get_from_username(
username_2,
endorser=endorser
)
if not endorsement_form.is_valid():
messages.add_message(
request,
messages.ERROR,
'Invalid endorseMENT form: {}'.format(endorsement_form.errors),
)
return redirect('confirm-endorsements')
try:
source = Source.objects.get(
url=endorsement_form.cleaned_data['source_url']
)
except Source.DoesNotExist:
source = Source.objects.create(
date=endorsement_form.cleaned_data['date'] or None,
url=endorsement_form.cleaned_data['source_url'],
name=endorsement_form.cleaned_data['source_name']
)
quote = Quote.objects.create(
context=endorsement_form.cleaned_data['context'],
text=endorsement_form.cleaned_data['quote'],
source=source,
date=endorsement_form.cleaned_data['date'] or None,
event=endorsement_form.cleaned_data['event']
)
endorsement = endorser.endorsement_set.create(
quote=quote,
position=position,
confirmed=True
)
imported_endorsement.confirmed_endorser = endorser
imported_endorsement.save()
num_remaining = ImportedEndorsement.objects.filter(
confirmed_endorser=None,
notes=None,
).count()
messages.add_message(
request,
messages.SUCCESS,
'Added endorser with pk {pk}; {n} left'.format(
pk=endorser.pk,
n=num_remaining,
),
)
return redirect('confirm-endorsements')
@never_cache
@staff_member_required
def confirm_endorsements(request):
# Find the next imported endorsement to confirm.
query = ImportedEndorsement.objects.filter(
confirmed_endorser__isnull=True,
notes=None
)
if not query.count():
messages.add_message(
request,
messages.ERROR,
'No more imported endorsements to confirm',
)
return redirect('progress-wikipedia')
endorsement = query.latest('pk')
attributes = endorsement.parse_text()
source_url = attributes['citation_url']
if source_url:
twitter_username = source_url.partition('twitter.com/')[2]
twitter_username = twitter_username.partition('/')[0]
else:
twitter_username = None
endorser_form = EndorserForm(initial={
'name': attributes['endorser_name'],
'description': attributes['endorser_details'],
'is_personal': True,
'twitter_username_1': twitter_username,
})
endorsement_form = EndorsementFormWithoutPosition(initial={
'date': attributes['citation_date'],
'source_url': source_url,
'source_name': attributes['citation_name'],
})
slug = endorsement.bulk_import.slug
position = Position.objects.get(slug=SLUG_MAPPING[slug])
name = attributes['endorser_name']
likely_endorser = endorsement.get_likely_endorser()
context = {
'endorsement': endorsement,
'endorser_form': endorser_form,
'endorsement_form': endorsement_form,
'name': name,
'source_url': attributes['citation_url'],
'position': position,
'likely_endorser': likely_endorser,
'has_endorsement': likely_endorser and likely_endorser.endorsement_set.filter(
position=position
).exists(),
}
return render(request, 'confirm/endorsement.html', context)
@never_cache
@staff_member_required
def confirm_newspapers(request):
# Find the next imported endorsement to confirm.
query = ImportedNewspaper.objects.filter(
confirmed_endorser=None
)
if not query.count():
messages.add_message(
request,
messages.ERROR,
'No more imported newspapers to confirm',
)
return redirect('progress-wikipedia')
newspaper = query.latest('pk')
source_url = newspaper.url
if source_url:
twitter_username = source_url.partition('twitter.com/')[2]
twitter_username = twitter_username.partition('/')[0]
# Guess the newspaper's URL based on the source URL.
i = source_url[8:].index('/')
endorser_url = source_url[:i + 8]
else:
twitter_username = None
endorser_url = None
description = "{type} for {city}, {state}".format(
type=newspaper.get_section_display()[:-1],
city=newspaper.city,
state=newspaper.state
)
endorser_form = EndorserForm(initial={
'name': newspaper.name,
'description': description,
'twitter_username_1': twitter_username,
'url': endorser_url,
})
slug = (newspaper.endorsement_2016 or '').lower()
try:
position = Position.objects.get(slug=slug)
except Position.DoesNotExist:
position = None
endorsement_form = EndorsementForm(initial={
'date': newspaper.date,
'source_url': source_url,
'source_name': newspaper.name,
'context': 'In an editorial endorsement',
'position': position,
})
context = {
'newspaper': newspaper,
'endorser_form': endorser_form,
'endorsement_form': endorsement_form,
'name': newspaper.name,
'source_url': source_url,
}
return render(request, 'confirm/newspaper.html', context)
@staff_member_required
@require_POST
def confirm_newspaper(request, pk):
newspaper = ImportedNewspaper.objects.get(pk=pk)
endorser_form = EndorserForm(request.POST)
endorsement_form = EndorsementForm(request.POST)
if not endorser_form.is_valid():
messages.add_message(
request,
messages.ERROR,
'Invalid endorser form: {}'.format(endorser_form.errors),
)
return redirect('newspaper-next')
if not endorsement_form.is_valid():
messages.add_message(
request,
messages.ERROR,
'Invalid endorseMENT form: {}'.format(endorsement_form.errors),
)
return redirect('newspaper-next')
endorser = Endorser.objects.create(
name=endorser_form.cleaned_data['name'],
description=endorser_form.cleaned_data['description'],
url=endorser_form.cleaned_data['url'],
is_personal=False,
max_followers=0,
)
endorser.tags.add(Tag.objects.get(name='Publication'))
username_1 = endorser_form.cleaned_data['twitter_username_1']
if username_1:
account = Account.objects.get_from_username(
username_1,
endorser=endorser
)
username_2 = endorser_form.cleaned_data['twitter_username_2']
if username_2:
account = Account.objects.get_from_username(
username_2,
endorser=endorser
)
try:
source = Source.objects.get(
url=endorsement_form.cleaned_data['source_url']
)
except Source.DoesNotExist:
source = Source.objects.create(
date=endorsement_form.cleaned_data['date'] or None,
url=endorsement_form.cleaned_data['source_url'],
name=endorsement_form.cleaned_data['source_name']
)
quote = Quote.objects.create(
context=endorsement_form.cleaned_data['context'],
text=endorsement_form.cleaned_data['quote'],
source=source,
date=endorsement_form.cleaned_data['date'] or None,
event=endorsement_form.cleaned_data['event']
)
endorsement = endorser.endorsement_set.create(
quote=quote,
position=endorsement_form.cleaned_data['position'],
confirmed=True
)
newspaper.confirmed_endorser = endorser
newspaper.save()
num_remaining = ImportedNewspaper.objects.filter(
confirmed_endorser=None,
).count()
messages.add_message(
request,
messages.SUCCESS,
'Added newspaper with pk {pk}; {n} left'.format(
pk=endorser.pk,
n=num_remaining,
),
)
return redirect('confirm-newspapers')
def stats_states(request):
candidates = list(
Candidate.objects.filter(still_running=True).order_by('pk')
)
positions = [
candidate.position.pk for candidate in candidates
]
cache_key = 'stats_states'
cached_values = cache.get(cache_key)
if cached_values is None:
candidate_counts = collections.defaultdict(collections.Counter)
states = []
tags = {
'newspapers': Tag.objects.get(name='Publication'),
'politicians': Tag.objects.get(name='Politician'),
'senators': Tag.objects.get(name='Current Senator'),
'representatives': Tag.objects.get(name='Current U.S. Representative'),
'Republicans': Tag.objects.get(name='Republican Party'),
}
max_counts = collections.defaultdict(dict)
for state_tag in Tag.objects.filter(category_id=8).order_by('name'):
results = ImportedResult.objects.filter(
tag=state_tag,
).prefetch_related('candidate')
if not results.count():
continue
votes = {}
for result in results:
votes[result.candidate.pk] = result.count
candidate_values = []
for candidate in candidates:
endorsements = Endorser.objects.filter(
current_position=candidate.position,
tags=state_tag,
).distinct()
num_endorsements = endorsements.count()
counts = collections.OrderedDict()
counts['endorsements'] = num_endorsements
counts['newspapers'] = endorsements.filter(
tags=tags['newspapers']
).count()
counts['politicians'] = endorsements.filter(
tags=tags['politicians']
).count()
counts['senators'] = endorsements.filter(
tags=tags['senators']
).count()
counts['representatives'] = endorsements.filter(
tags=tags['representatives']
).count()
counts['Republicans'] = endorsements.filter(
tags=tags['Republicans']
).count()
for key, value in counts.iteritems():
if key in max_counts[candidate.pk]:
max_counts[candidate.pk][key] = max(
value, max_counts[candidate.pk][key]
)
else:
max_counts[candidate.pk][key] = value
candidate_counts[candidate.pk].update(counts)
candidate_counts[candidate.pk]['votes'] += votes[candidate.pk]
if 'votes' in max_counts[candidate.pk]:
max_counts[candidate.pk]['votes'] = max(
max_counts[candidate.pk]['votes'], votes[candidate.pk]
)
else:
max_counts[candidate.pk]['votes'] = votes[candidate.pk]
candidate_values.append({
'votes': votes[candidate.pk],
'counts': [
(key, value, tags.get(key))
for key, value in counts.iteritems()
],
'rgb': candidate.rgb,
})
# Figure out the opacity level for each cell in this row.
total_votes = sum(votes.values())
max_votes = max(votes.values())
winning_color = None
for candidate_value in candidate_values:
ratio = candidate_value['votes'] / float(total_votes)
percent = ratio * 100
candidate_value['percent'] = percent
candidate_value['ratio'] = '{:2.2f}'.format(ratio)
candidate_won = candidate_value['votes'] == max_votes
candidate_value['won'] = candidate_won
if candidate_won:
winning_color = candidate_value['rgb']
other_endorsements = Endorser.objects.filter(
tags=state_tag,
).exclude(
current_position__pk__in=positions,
).prefetch_related('current_position')
position_counter = collections.Counter()
for endorser in other_endorsements:
position = endorser.current_position
if position:
position_counter[position.pk] += 1
other_positions = []
for position in Position.objects.exclude(pk__in=positions):
count = position_counter[position.pk]
if count > 0:
other_positions.append({
'name': position.get_present_display(),
'count': count,
'slug': position.slug,
})
state_counts = collections.OrderedDict()
endorsements = Endorser.objects.filter(
tags=state_tag,
).distinct()
state_counts['endorsements'] = endorsements.count()
state_counts['newspapers'] = endorsements.filter(
tags=tags['newspapers']
).count()
state_counts['politicians'] = endorsements.filter(
tags=tags['politicians']
).count()
state_counts['senators'] = endorsements.filter(
tags=tags['senators']
).count()
state_counts['representatives'] = endorsements.filter(
tags=tags['representatives']
).count()
state_counts['Republicans'] = endorsements.filter(
tags=tags['Republicans']
).count()
states.append({
'pk': state_tag.pk,
'name': state_tag.name,
'candidates': candidate_values,
'counts': [
(key, value, tags.get(key))
for key, value in state_counts.iteritems()
],
'votes': total_votes,
'winning_color': winning_color,
'other_positions': other_positions,
'num_other_positions': sum(position_counter.values())
})
cached_values = {
'states': states,
'candidate_counts': [
(c, dict(candidate_counts[c.pk]), max_counts[c.pk])
for c in candidates
],
}
cache.set(cache_key, cached_values)
context = {
'states': cached_values['states'],
'candidates': candidates,
'candidate_counts': cached_values['candidate_counts'],
}
return render(request, 'stats/states.html', context)
def progress_tagging(request):
org_tags = set(
tag.pk for tag in Tag.objects.filter(category__name='Organizations')
)
gender_tags = set(
tag.pk for tag in Tag.objects.filter(category__name='Gender')
)
race_tags = set(
tag.pk for tag in Tag.objects.filter(category__name='Race/ethnicity')
)
occupation_tags = set(
tag.pk for tag in Tag.objects.filter(category__name='Occupation')
)
politician_tag = Tag.objects.get(name='Politician').pk
location_tags = set(
tag.pk for tag in Tag.objects.filter(category__name='States and districts')
)
party_tags = set(
tag.pk for tag in Tag.objects.filter(category__name='Party affiliation')
)
needs_keys = ['tags', 'org_type', 'gender', 'race', 'occupation', 'location', 'party']
IGNORED_SECTIONS = 'Endorsements > International political figures'
sections_by_page = []
tag_names = {
tag['pk']: tag['name'] for tag in Tag.objects.values('name', 'pk')
}
admin_url = reverse('admin:wikipedia_importedendorsement_changelist')
# Keep track of the tags common to each section.
section_tags = {}
for slug in SLUG_MAPPING:
section_counter = collections.defaultdict(collections.Counter)
imports = ImportedEndorsement.objects.filter(
bulk_import__slug=slug
).exclude(
sections__startswith=IGNORED_SECTIONS
).prefetch_related('confirmed_endorser', 'confirmed_endorser__tags')
for imported_endorsement in imports:
section = imported_endorsement.sections
section_counter[section]['total'] += 1
endorser = imported_endorsement.confirmed_endorser
if endorser is None:
continue
section_counter[section]['imported'] += 1
tag_pks = set(tag.pk for tag in endorser.tags.all())
if section in section_tags:
section_tags[section] &= tag_pks
else:
section_tags[section] = tag_pks
if not tag_pks:
section_counter[section]['needs_tags'] += 1
if endorser.is_personal:
if not gender_tags & tag_pks:
section_counter[section]['needs_gender'] += 1
if not race_tags & tag_pks:
section_counter[section]['needs_race'] += 1
if not occupation_tags & tag_pks:
section_counter[section]['needs_occupation'] += 1
if politician_tag in tag_pks:
if not location_tags & tag_pks:
section_counter[section]['needs_location'] += 1
if not party_tags & tag_pks:
section_counter[section]['needs_party'] += 1
else:
if not org_tags & tag_pks:
section_counter[section]['needs_org_type'] += 1
sections = []
for section, counter in section_counter.iteritems():
needs = []
show_section = False
for needs_key in needs_keys:
count = counter['needs_' + needs_key]
if count > 0:
show_section = True
url = (
'{admin_url}?bulk_import__slug={slug}'
'&needs={key}'
'§ions={section}'
'&is_confirmed=yes'.format(
admin_url=admin_url,
key=needs_key,
section=section,
slug=slug,
)
)
else:
url = None
needs.append({
'count': count,
'url': url,
})
if not show_section:
continue
common_tags = [
tag_names[tag_pk] for tag_pk in section_tags[section]
]
sections.append({
'name': section,
'common_tags': common_tags,
'total': counter['total'],
'imported': counter['imported'],
'needs': needs,
})
sections_by_page.append({
'slug': slug,
'sections': sections,
})
context = {
'sections_by_page': sections_by_page,
}
return render(request, 'progress/tagging.html', context)
def progress_twitter(request):
pass
def stats_predictions(request):
ENDORSER_TYPES = {
'clinton': Position.objects.get(slug='clinton'),
'trump': Position.objects.get(slug='trump'),
'pence': Position.objects.get(slug='pence'),
'another-republican': Position.objects.get(slug='another-republican'),
'trump-support': Position.objects.get(slug='trump-support'),
'senate': Tag.objects.get(name='Current Senator'),
'house': Tag.objects.get(name='Current U.S. Representative'),
'republican': Tag.objects.get(name='Republican Party'),
'democrat': Tag.objects.get(name='Democratic Party'),
'newspaper': Tag.objects.get(name='Publication')
}
clinton_pk = ENDORSER_TYPES['clinton'].pk
trump_pk = ENDORSER_TYPES['trump'].pk
endorser_pks = {}
for key, value in ENDORSER_TYPES.iteritems():
endorser_pks[key] = set(
value.endorser_set.values_list('id', flat=True)
)
state_tag_pks = set()
results = collections.defaultdict(dict)
results_query = ImportedResult.objects.filter(
candidate__in=[clinton_pk, trump_pk]
).prefetch_related('tag', 'candidate')
for result in results_query:
results[result.tag.name][result.candidate.pk] = result.percent
state_tag_pks.add(result.tag.pk)
states = []
for state_tag in Tag.objects.filter(category_id=8).order_by('name'):
if state_tag.name not in results:
continue
# Find the actual vote spread.
clinton_percent = results[state_tag.name][clinton_pk]
trump_percent = results[state_tag.name][trump_pk]
votes = predict_winner(
clinton_percent,
trump_percent,
5,
is_percent=True
)
state_endorser_pks = set(
state_tag.endorser_set.values_list('id', flat=True)
)
states.append({
'name': state_tag.name,
'votes': votes,
'endorser_pks': state_endorser_pks,
})
electoral_votes = {
e.state.name: e.count for e in ElectoralVotes.objects.all()
}
# Apply all the different models.
categories = []
for category, category_models in MODELS.iteritems():
category_states = []
for state in states:
state_models = []
for model in category_models:
model_counts = model.apply_model(
state['endorser_pks'],
endorser_pks
)
model_data = predict_winner(
model_counts['clinton'],
model_counts['trump'],
model.threshold,
)
state_models.append(model_data)
# Figure out which models were correct
for model_data in state_models:
model_data['correct_candidate'] = (
model_data['color'] == state['votes']['color']
)
model_data['correct_size'] = (
model_data['correct_candidate'] and
model_data['basic'] == state['votes']['basic']
)
category_states.append({
'name': state['name'],
'votes': state['votes'],
'models': state_models,
'electoral_votes': electoral_votes[state['name']],
})
model_summaries = []
for i, model in enumerate(category_models):
clinton_electoral_votes = sum(
state['electoral_votes']
for state in category_states
if state['models'][i]['winner'] == 'clinton'
)
trump_electoral_votes = sum(
state['electoral_votes']
for state in category_states
if state['models'][i]['winner'] == 'trump'
)
if clinton_electoral_votes > 270:
electoral_vote_winner = 'blue'
elif trump_electoral_votes > 270:
electoral_vote_winner = 'red'
else:
electoral_vote_winner = 'grey'
model_summaries.append({
'name': model.name,
'num_correct_candidate': sum(
state['models'][i]['correct_candidate']
for state in category_states
),
'num_correct_size': sum(
state['models'][i]['correct_size']
for state in category_states
),
'clinton_electoral_votes': clinton_electoral_votes,
'trump_electoral_votes': trump_electoral_votes,
'electoral_vote_winner': electoral_vote_winner,
})
categories.append({
'name': category,
'states': category_states,
'models': model_summaries,
})
context = {
'categories': categories,
}
return render(request, 'stats/predictions.html', context)
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import uuid
from oslo_config import cfg
from oslo_log import log
import six
from keystone.common import authorization
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import utils
from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _, _LW
from keystone.models import token_model
LOG = log.getLogger(__name__)
CONF = cfg.CONF
def v2_deprecated(f):
"""No-op decorator in preparation for deprecating Identity API v2.
This is a placeholder for the pending deprecation of v2. The implementation
of this decorator can be replaced with::
from keystone.openstack.common import versionutils
v2_deprecated = versionutils.deprecated(
what='v2 API',
as_of=versionutils.deprecated.JUNO,
in_favor_of='v3 API')
"""
return f
def _build_policy_check_credentials(self, action, context, kwargs):
LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', {
'action': action,
'kwargs': ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs])})
# see if auth context has already been created. If so use it.
if ('environment' in context and
authorization.AUTH_CONTEXT_ENV in context['environment']):
LOG.debug('RBAC: using auth context from the request environment')
return context['environment'].get(authorization.AUTH_CONTEXT_ENV)
# There is no current auth context, build it from the incoming token.
# TODO(morganfainberg): Collapse this logic with AuthContextMiddleware
# in a sane manner as this just mirrors the logic in AuthContextMiddleware
try:
LOG.debug('RBAC: building auth context from the incoming auth token')
token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
# NOTE(jamielennox): whilst this maybe shouldn't be within this
# function it would otherwise need to reload the token_ref from
# backing store.
wsgi.validate_token_bind(context, token_ref)
except exception.TokenNotFound:
LOG.warning(_LW('RBAC: Invalid token'))
raise exception.Unauthorized()
auth_context = authorization.token_to_auth_context(token_ref)
return auth_context
def protected(callback=None):
"""Wraps API calls with role based access controls (RBAC).
This handles both the protection of the API parameters as well as any
target entities for single-entity API calls.
More complex API calls (for example that deal with several different
entities) should pass in a callback function, that will be subsequently
called to check protection for these multiple entities. This callback
function should gather the appropriate entities needed and then call
check_protection() in the V3Controller class.
"""
def wrapper(f):
@functools.wraps(f)
def inner(self, context, *args, **kwargs):
if 'is_admin' in context and context['is_admin']:
LOG.warning(_LW('RBAC: Bypassing authorization'))
elif callback is not None:
prep_info = {'f_name': f.__name__,
'input_attr': kwargs}
callback(self, context, prep_info, *args, **kwargs)
else:
action = 'identity:%s' % f.__name__
creds = _build_policy_check_credentials(self, action,
context, kwargs)
policy_dict = {}
# Check to see if we need to include the target entity in our
# policy checks. We deduce this by seeing if the class has
# specified a get_member() method and that kwargs contains the
# appropriate entity id.
if (hasattr(self, 'get_member_from_driver') and
self.get_member_from_driver is not None):
key = '%s_id' % self.member_name
if key in kwargs:
ref = self.get_member_from_driver(kwargs[key])
policy_dict['target'] = {self.member_name: ref}
# TODO(henry-nash): Move this entire code to a member
# method inside v3 Auth
if context.get('subject_token_id') is not None:
token_ref = token_model.KeystoneToken(
token_id=context['subject_token_id'],
token_data=self.token_provider_api.validate_token(
context['subject_token_id']))
policy_dict.setdefault('target', {})
policy_dict['target'].setdefault(self.member_name, {})
policy_dict['target'][self.member_name]['user_id'] = (
token_ref.user_id)
try:
user_domain_id = token_ref.user_domain_id
except exception.UnexpectedError:
user_domain_id = None
if user_domain_id:
policy_dict['target'][self.member_name].setdefault(
'user', {})
policy_dict['target'][self.member_name][
'user'].setdefault('domain', {})
policy_dict['target'][self.member_name]['user'][
'domain']['id'] = (
user_domain_id)
# Add in the kwargs, which means that any entity provided as a
# parameter for calls like create and update will be included.
policy_dict.update(kwargs)
self.policy_api.enforce(creds,
action,
utils.flatten_dict(policy_dict))
LOG.debug('RBAC: Authorization granted')
return f(self, context, *args, **kwargs)
return inner
return wrapper
def filterprotected(*filters):
"""Wraps filtered API calls with role based access controls (RBAC)."""
def _filterprotected(f):
@functools.wraps(f)
def wrapper(self, context, **kwargs):
if not context['is_admin']:
action = 'identity:%s' % f.__name__
creds = _build_policy_check_credentials(self, action,
context, kwargs)
# Now, build the target dict for policy check. We include:
#
# - Any query filter parameters
# - Data from the main url (which will be in the kwargs
# parameter) and would typically include the prime key
# of a get/update/delete call
#
# First any query filter parameters
target = dict()
if filters:
for item in filters:
if item in context['query_string']:
target[item] = context['query_string'][item]
LOG.debug('RBAC: Adding query filter params (%s)', (
', '.join(['%s=%s' % (item, target[item])
for item in target])))
# Now any formal url parameters
for key in kwargs:
target[key] = kwargs[key]
self.policy_api.enforce(creds,
action,
utils.flatten_dict(target))
LOG.debug('RBAC: Authorization granted')
else:
LOG.warning(_LW('RBAC: Bypassing authorization'))
return f(self, context, filters, **kwargs)
return wrapper
return _filterprotected
class V2Controller(wsgi.Application):
"""Base controller class for Identity API v2."""
def _normalize_domain_id(self, context, ref):
"""Fill in domain_id since v2 calls are not domain-aware.
This will overwrite any domain_id that was inadvertently
specified in the v2 call.
"""
ref['domain_id'] = CONF.identity.default_domain_id
return ref
@staticmethod
def filter_domain_id(ref):
"""Remove domain_id since v2 calls are not domain-aware."""
ref.pop('domain_id', None)
return ref
@staticmethod
def filter_domain(ref):
"""Remove domain since v2 calls are not domain-aware.
V3 Fernet tokens builds the users with a domain in the token data.
This method will ensure that users create in v3 belong to the default
domain.
"""
if 'domain' in ref:
if ref['domain'].get('id') != CONF.identity.default_domain_id:
raise exception.Unauthorized(
_('Non-default domain is not supported'))
del ref['domain']
return ref
@staticmethod
def normalize_username_in_response(ref):
"""Adds username to outgoing user refs to match the v2 spec.
Internally we use `name` to represent a user's name. The v2 spec
requires the use of `username` instead.
"""
if 'username' not in ref and 'name' in ref:
ref['username'] = ref['name']
return ref
@staticmethod
def normalize_username_in_request(ref):
"""Adds name in incoming user refs to match the v2 spec.
Internally we use `name` to represent a user's name. The v2 spec
requires the use of `username` instead.
"""
if 'name' not in ref and 'username' in ref:
ref['name'] = ref.pop('username')
return ref
@staticmethod
def v3_to_v2_user(ref):
"""Convert a user_ref from v3 to v2 compatible.
* v2.0 users are not domain aware, and should have domain_id removed
* v2.0 users expect the use of tenantId instead of default_project_id
* v2.0 users have a username attribute
This method should only be applied to user_refs being returned from the
v2.0 controller(s).
If ref is a list type, we will iterate through each element and do the
conversion.
"""
def _format_default_project_id(ref):
"""Convert default_project_id to tenantId for v2 calls."""
default_project_id = ref.pop('default_project_id', None)
if default_project_id is not None:
ref['tenantId'] = default_project_id
elif 'tenantId' in ref:
# NOTE(morganfainberg): To avoid v2.0 confusion if somehow a
# tenantId property sneaks its way into the extra blob on the
# user, we remove it here. If default_project_id is set, we
# would override it in either case.
del ref['tenantId']
def _normalize_and_filter_user_properties(ref):
"""Run through the various filter/normalization methods."""
_format_default_project_id(ref)
V2Controller.filter_domain(ref)
V2Controller.filter_domain_id(ref)
V2Controller.normalize_username_in_response(ref)
return ref
if isinstance(ref, dict):
return _normalize_and_filter_user_properties(ref)
elif isinstance(ref, list):
return [_normalize_and_filter_user_properties(x) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
def format_project_list(self, tenant_refs, **kwargs):
"""Format a v2 style project list, including marker/limits."""
marker = kwargs.get('marker')
first_index = 0
if marker is not None:
for (marker_index, tenant) in enumerate(tenant_refs):
if tenant['id'] == marker:
# we start pagination after the marker
first_index = marker_index + 1
break
else:
msg = _('Marker could not be found')
raise exception.ValidationError(message=msg)
limit = kwargs.get('limit')
last_index = None
if limit is not None:
try:
limit = int(limit)
if limit < 0:
raise AssertionError()
except (ValueError, AssertionError):
msg = _('Invalid limit value')
raise exception.ValidationError(message=msg)
last_index = first_index + limit
tenant_refs = tenant_refs[first_index:last_index]
for x in tenant_refs:
if 'enabled' not in x:
x['enabled'] = True
o = {'tenants': tenant_refs,
'tenants_links': []}
return o
@dependency.requires('policy_api', 'token_provider_api')
class V3Controller(wsgi.Application):
"""Base controller class for Identity API v3.
Child classes should set the ``collection_name`` and ``member_name`` class
attributes, representing the collection of entities they are exposing to
the API. This is required for supporting self-referential links,
pagination, etc.
Class parameters:
* `_mutable_parameters` - set of parameters that can be changed by users.
Usually used by cls.check_immutable_params()
* `_public_parameters` - set of parameters that are exposed to the user.
Usually used by cls.filter_params()
"""
collection_name = 'entities'
member_name = 'entity'
get_member_from_driver = None
@classmethod
def base_url(cls, context, path=None):
endpoint = super(V3Controller, cls).base_url(context, 'public')
if not path:
path = cls.collection_name
return '%s/%s/%s' % (endpoint, 'v3', path.lstrip('/'))
def get_auth_context(self, context):
# TODO(dolphm): this method of accessing the auth context is terrible,
# but context needs to be refactored to always have reasonable values.
env_context = context.get('environment', {})
return env_context.get(authorization.AUTH_CONTEXT_ENV, {})
@classmethod
def full_url(cls, context, path=None):
url = cls.base_url(context, path)
if context['environment'].get('QUERY_STRING'):
url = '%s?%s' % (url, context['environment']['QUERY_STRING'])
return url
@classmethod
def query_filter_is_true(cls, filter_value):
"""Determine if bool query param is 'True'.
We treat this the same way as we do for policy
enforcement:
{bool_param}=0 is treated as False
Any other value is considered to be equivalent to
True, including the absence of a value
"""
if (isinstance(filter_value, six.string_types) and
filter_value == '0'):
val = False
else:
val = True
return val
@classmethod
def _add_self_referential_link(cls, context, ref):
ref.setdefault('links', {})
ref['links']['self'] = cls.base_url(context) + '/' + ref['id']
@classmethod
def wrap_member(cls, context, ref):
cls._add_self_referential_link(context, ref)
return {cls.member_name: ref}
@classmethod
def wrap_collection(cls, context, refs, hints=None):
"""Wrap a collection, checking for filtering and pagination.
Returns the wrapped collection, which includes:
- Executing any filtering not already carried out
- Truncate to a set limit if necessary
- Adds 'self' links in every member
- Adds 'next', 'self' and 'prev' links for the whole collection.
:param context: the current context, containing the original url path
and query string
:param refs: the list of members of the collection
:param hints: list hints, containing any relevant filters and limit.
Any filters already satisfied by managers will have been
removed
"""
# Check if there are any filters in hints that were not
# handled by the drivers. The driver will not have paginated or
# limited the output if it found there were filters it was unable to
# handle.
if hints is not None:
refs = cls.filter_by_attributes(refs, hints)
list_limited, refs = cls.limit(refs, hints)
for ref in refs:
cls.wrap_member(context, ref)
container = {cls.collection_name: refs}
container['links'] = {
'next': None,
'self': cls.full_url(context, path=context['path']),
'previous': None}
if list_limited:
container['truncated'] = True
return container
@classmethod
def limit(cls, refs, hints):
"""Limits a list of entities.
The underlying driver layer may have already truncated the collection
for us, but in case it was unable to handle truncation we check here.
:param refs: the list of members of the collection
:param hints: hints, containing, among other things, the limit
requested
:returns: boolean indicating whether the list was truncated, as well
as the list of (truncated if necessary) entities.
"""
NOT_LIMITED = False
LIMITED = True
if hints is None or hints.limit is None:
# No truncation was requested
return NOT_LIMITED, refs
if hints.limit.get('truncated', False):
# The driver did truncate the list
return LIMITED, refs
if len(refs) > hints.limit['limit']:
# The driver layer wasn't able to truncate it for us, so we must
# do it here
return LIMITED, refs[:hints.limit['limit']]
return NOT_LIMITED, refs
@classmethod
def filter_by_attributes(cls, refs, hints):
"""Filters a list of references by filter values."""
def _attr_match(ref_attr, val_attr):
"""Matches attributes allowing for booleans as strings.
We test explicitly for a value that defines it as 'False',
which also means that the existence of the attribute with
no value implies 'True'
"""
if type(ref_attr) is bool:
return ref_attr == utils.attr_as_boolean(val_attr)
else:
return ref_attr == val_attr
def _inexact_attr_match(filter, ref):
"""Applies an inexact filter to a result dict.
:param filter: the filter in question
:param ref: the dict to check
:returns True if there is a match
"""
comparator = filter['comparator']
key = filter['name']
if key in ref:
filter_value = filter['value']
target_value = ref[key]
if not filter['case_sensitive']:
# We only support inexact filters on strings so
# it's OK to use lower()
filter_value = filter_value.lower()
target_value = target_value.lower()
if comparator == 'contains':
return (filter_value in target_value)
elif comparator == 'startswith':
return target_value.startswith(filter_value)
elif comparator == 'endswith':
return target_value.endswith(filter_value)
else:
# We silently ignore unsupported filters
return True
return False
for filter in hints.filters:
if filter['comparator'] == 'equals':
attr = filter['name']
value = filter['value']
refs = [r for r in refs if _attr_match(
utils.flatten_dict(r).get(attr), value)]
else:
# It might be an inexact filter
refs = [r for r in refs if _inexact_attr_match(
filter, r)]
return refs
@classmethod
def build_driver_hints(cls, context, supported_filters):
"""Build list hints based on the context query string.
:param context: contains the query_string from which any list hints can
be extracted
:param supported_filters: list of filters supported, so ignore any
keys in query_dict that are not in this list.
"""
query_dict = context['query_string']
hints = driver_hints.Hints()
if query_dict is None:
return hints
for key in query_dict:
# Check if this is an exact filter
if supported_filters is None or key in supported_filters:
hints.add_filter(key, query_dict[key])
continue
# Check if it is an inexact filter
for valid_key in supported_filters:
# See if this entry in query_dict matches a known key with an
# inexact suffix added. If it doesn't match, then that just
# means that there is no inexact filter for that key in this
# query.
if not key.startswith(valid_key + '__'):
continue
base_key, comparator = key.split('__', 1)
# We map the query-style inexact of, for example:
#
# {'email__contains', 'myISP'}
#
# into a list directive add filter call parameters of:
#
# name = 'email'
# value = 'myISP'
# comparator = 'contains'
# case_sensitive = True
case_sensitive = True
if comparator.startswith('i'):
case_sensitive = False
comparator = comparator[1:]
hints.add_filter(base_key, query_dict[key],
comparator=comparator,
case_sensitive=case_sensitive)
# NOTE(henry-nash): If we were to support pagination, we would pull any
# pagination directives out of the query_dict here, and add them into
# the hints list.
return hints
def _require_matching_id(self, value, ref):
"""Ensures the value matches the reference's ID, if any."""
if 'id' in ref and ref['id'] != value:
raise exception.ValidationError('Cannot change ID')
def _require_matching_domain_id(self, ref_id, ref, get_member):
"""Ensure the current domain ID matches the reference one, if any.
Provided we want domain IDs to be immutable, check whether any
domain_id specified in the ref dictionary matches the existing
domain_id for this entity.
:param ref_id: the ID of the entity
:param ref: the dictionary of new values proposed for this entity
:param get_member: The member function to call to get the current
entity
:raises: :class:`keystone.exception.ValidationError`
"""
# TODO(henry-nash): It might be safer and more efficient to do this
# check in the managers affected, so look to migrate this check to
# there in the future.
if CONF.domain_id_immutable and 'domain_id' in ref:
existing_ref = get_member(ref_id)
if ref['domain_id'] != existing_ref['domain_id']:
raise exception.ValidationError(_('Cannot change Domain ID'))
def _assign_unique_id(self, ref):
"""Generates and assigns a unique identifier to a reference."""
ref = ref.copy()
ref['id'] = uuid.uuid4().hex
return ref
def _get_domain_id_for_list_request(self, context):
"""Get the domain_id for a v3 list call.
If we running with multiple domain drivers, then the caller must
specify a domain_id either as a filter or as part of the token scope.
"""
if not CONF.identity.domain_specific_drivers_enabled:
# We don't need to specify a domain ID in this case
return
if context['query_string'].get('domain_id') is not None:
return context['query_string'].get('domain_id')
try:
token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
except KeyError:
raise exception.ValidationError(
_('domain_id is required as part of entity'))
except (exception.TokenNotFound,
exception.UnsupportedTokenVersionException):
LOG.warning(_LW('Invalid token found while getting domain ID '
'for list request'))
raise exception.Unauthorized()
if token_ref.domain_scoped:
return token_ref.domain_id
else:
LOG.warning(
_LW('No domain information specified as part of list request'))
raise exception.Unauthorized()
def _get_domain_id_from_token(self, context):
"""Get the domain_id for a v3 create call.
In the case of a v3 create entity call that does not specify a domain
ID, the spec says that we should use the domain scoping from the token
being used.
"""
# We could make this more efficient by loading the domain_id
# into the context in the wrapper function above (since
# this version of normalize_domain will only be called inside
# a v3 protected call). However, this optimization is probably not
# worth the duplication of state
try:
token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
except KeyError:
# This might happen if we use the Admin token, for instance
raise exception.ValidationError(
_('A domain-scoped token must be used'))
except (exception.TokenNotFound,
exception.UnsupportedTokenVersionException):
LOG.warning(_LW('Invalid token found while getting domain ID '
'for list request'))
raise exception.Unauthorized()
if token_ref.domain_scoped:
return token_ref.domain_id
else:
# TODO(henry-nash): We should issue an exception here since if
# a v3 call does not explicitly specify the domain_id in the
# entity, it should be using a domain scoped token. However,
# the current tempest heat tests issue a v3 call without this.
# This is raised as bug #1283539. Once this is fixed, we
# should remove the line below and replace it with an error.
return CONF.identity.default_domain_id
def _normalize_domain_id(self, context, ref):
"""Fill in domain_id if not specified in a v3 call."""
if 'domain_id' not in ref:
ref['domain_id'] = self._get_domain_id_from_token(context)
return ref
@staticmethod
def filter_domain_id(ref):
"""Override v2 filter to let domain_id out for v3 calls."""
return ref
def check_protection(self, context, prep_info, target_attr=None):
"""Provide call protection for complex target attributes.
As well as including the standard parameters from the original API
call (which is passed in prep_info), this call will add in any
additional entities or attributes (passed in target_attr), so that
they can be referenced by policy rules.
"""
if 'is_admin' in context and context['is_admin']:
LOG.warning(_LW('RBAC: Bypassing authorization'))
else:
action = 'identity:%s' % prep_info['f_name']
# TODO(henry-nash) need to log the target attributes as well
creds = _build_policy_check_credentials(self, action,
context,
prep_info['input_attr'])
# Build the dict the policy engine will check against from both the
# parameters passed into the call we are protecting (which was
# stored in the prep_info by protected()), plus the target
# attributes provided.
policy_dict = {}
if target_attr:
policy_dict = {'target': target_attr}
policy_dict.update(prep_info['input_attr'])
self.policy_api.enforce(creds,
action,
utils.flatten_dict(policy_dict))
LOG.debug('RBAC: Authorization granted')
@classmethod
def check_immutable_params(cls, ref):
"""Raise exception when disallowed parameter is in ref.
Check whether the ref dictionary representing a request has only
mutable parameters included. If not, raise an exception. This method
checks only root-level keys from a ref dictionary.
:param ref: a dictionary representing deserialized request to be
stored
:raises: :class:`keystone.exception.ImmutableAttributeError`
"""
ref_keys = set(ref.keys())
blocked_keys = ref_keys.difference(cls._mutable_parameters)
if not blocked_keys:
# No immutable parameters changed
return
exception_args = {'target': cls.__name__,
'attributes': ', '.join(blocked_keys)}
raise exception.ImmutableAttributeError(**exception_args)
@classmethod
def filter_params(cls, ref):
"""Remove unspecified parameters from the dictionary.
This function removes unspecified parameters from the dictionary. See
check_immutable_parameters for corresponding function that raises
exceptions. This method checks only root-level keys from a ref
dictionary.
:param ref: a dictionary representing deserialized response to be
serialized
"""
ref_keys = set(ref.keys())
blocked_keys = ref_keys - cls._public_parameters
for blocked_param in blocked_keys:
del ref[blocked_param]
return ref
|
|
# Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_with_some_model_selection():
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn import datasets
from sklearn.linear_model import LassoCV
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
pipe = make_pipeline(
StandardScaler(),
LassoCV(cv=StratifiedKFold(n_splits=5))
)
pipe.fit(X, y)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1.0, tol=1e-8, max_iter=1)
assert_warns_message(ConvergenceWarning, 'did not converge', clf.fit, X, Y)
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 10, 3), clf.mse_path_.shape)
assert_equal((2, 10), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((10, 3), clf.mse_path_.shape)
assert_equal(10, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises_regex(ValueError, ".*should be.*True.*False.*auto.*"
"array-like.*Got 'invalid'", clf.fit, X, y)
# Precompute = 'auto' is not supported for ElasticNet
assert_raises_regex(ValueError, ".*should be.*True.*False.*array-like.*"
"Got 'auto'", ElasticNet(precompute='auto').fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test positive parameter
X, Y, _, _ = build_dataset(n_samples=50, n_features=50, n_targets=2)
# For mono output
# Test that the coefs returned by positive=True in enet_path are positive
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, Y[:, 0], positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
# For multi output, positive parameter is not allowed
# Test that an error is raised
for path in [enet_path, lasso_path]:
assert_raises(ValueError, path, X, Y, positive=True)
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
def test_enet_float_precision():
# Generate dataset
X, y, X_test, y_test = build_dataset(n_samples=20, n_features=10)
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
for normalize in [True, False]:
for fit_intercept in [True, False]:
coef = {}
intercept = {}
for dtype in [np.float64, np.float32]:
clf = ElasticNet(alpha=0.5, max_iter=100, precompute=False,
fit_intercept=fit_intercept,
normalize=normalize)
X = dtype(X)
y = dtype(y)
ignore_warnings(clf.fit)(X, y)
coef[('simple', dtype)] = clf.coef_
intercept[('simple', dtype)] = clf.intercept_
assert_equal(clf.coef_.dtype, dtype)
# test precompute Gram array
Gram = X.T.dot(X)
clf_precompute = ElasticNet(alpha=0.5, max_iter=100,
precompute=Gram,
fit_intercept=fit_intercept,
normalize=normalize)
ignore_warnings(clf_precompute.fit)(X, y)
assert_array_almost_equal(clf.coef_, clf_precompute.coef_)
assert_array_almost_equal(clf.intercept_,
clf_precompute.intercept_)
# test multi task enet
multi_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_multioutput = MultiTaskElasticNet(
alpha=0.5, max_iter=100, fit_intercept=fit_intercept,
normalize=normalize)
clf_multioutput.fit(X, multi_y)
coef[('multi', dtype)] = clf_multioutput.coef_
intercept[('multi', dtype)] = clf_multioutput.intercept_
assert_equal(clf.coef_.dtype, dtype)
for v in ['simple', 'multi']:
assert_array_almost_equal(coef[(v, np.float32)],
coef[(v, np.float64)],
decimal=4)
assert_array_almost_equal(intercept[(v, np.float32)],
intercept[(v, np.float64)],
decimal=4)
def test_enet_l1_ratio():
# Test that an error message is raised if an estimator that
# uses _alpha_grid is called with l1_ratio=0
msg = ("Automatic alpha grid generation is not supported for l1_ratio=0. "
"Please supply a grid by providing your estimator with the "
"appropriate `alphas=` argument.")
X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T
y = np.array([12, 10, 11, 21, 5])
assert_raise_message(ValueError, msg, ElasticNetCV(
l1_ratio=0, random_state=42).fit, X, y)
assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(
l1_ratio=0, random_state=42).fit, X, y[:, None])
# Test that l1_ratio=0 is allowed if we supply a grid manually
alphas = [0.1, 10]
estkwds = {'alphas': alphas, 'random_state': 42}
est_desired = ElasticNetCV(l1_ratio=0.00001, **estkwds)
est = ElasticNetCV(l1_ratio=0, **estkwds)
with ignore_warnings():
est_desired.fit(X, y)
est.fit(X, y)
assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5)
est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, **estkwds)
est = MultiTaskElasticNetCV(l1_ratio=0, **estkwds)
with ignore_warnings():
est.fit(X, y[:, None])
est_desired.fit(X, y[:, None])
assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5)
|
|
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from collections import OrderedDict
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency
import pandas.compat as compat
from pandas.compat import (
get_range_parameters, lmap, lrange, raise_with_traceback, range)
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike, infer_dtype_from_scalar,
maybe_cast_to_datetime, maybe_cast_to_integer_array, maybe_castable,
maybe_convert_platform, maybe_infer_to_datetimelike, maybe_upcast)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_datetime64tz_dtype, is_dtype_equal,
is_extension_array_dtype, is_extension_type, is_float_dtype,
is_integer_dtype, is_iterator, is_list_like, is_object_dtype, pandas_dtype)
from pandas.core.dtypes.generic import (
ABCDataFrame, ABCDatetimeIndex, ABCIndexClass, ABCPandasArray,
ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex)
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, common as com
from pandas.core.arrays import Categorical, ExtensionArray, period_array
from pandas.core.index import (
Index, _get_objs_combined_axis, _union_indexes, ensure_index)
from pandas.core.indexes import base as ibase
from pandas.core.internals import (
create_block_manager_from_arrays, create_block_manager_from_blocks)
from pandas.core.internals.arrays import extract_array
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [ensure_index(columns), index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def init_ndarray(values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif (is_datetime64tz_dtype(values) or
is_extension_array_dtype(values)):
# GH#19157
if columns is None:
columns = [0]
return arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '{dtype}' (Exception "
"was: {orig})".format(dtype=dtype,
orig=orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape, index=index, columns=columns)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
def init_dict(data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isnull()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or np.issubdtype(dtype, np.flexible):
# GH#1783
nan_dtype = object
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index),
nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
else:
for key in data:
if (isinstance(data[key], ABCDatetimeIndex) and
data[key].tz is not None):
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
data[key] = data[key].copy(deep=True)
keys = com.dict_keys_to_ordered_list(data)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
# ---------------------------------------------------------------------
def prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _homogenize(data, index, dtype=None):
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
else:
if isinstance(val, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):
val = com.dict_compat(val)
else:
val = dict(val)
val = lib.fast_multiget(val, oindex.values, default=np.nan)
val = sanitize_array(val, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(val)
return homogenized
def extract_index(data):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length {length} does not match index '
'length {idx_len}'
.format(length=lengths[0], idx_len=len(index)))
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def get_names_from_index(data):
has_some_name = any(getattr(s, 'name', None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed {count}'.format(count=count)
count += 1
return index
def _get_axes(N, K, index, columns):
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns.
"""
if isinstance(data, ABCDataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], compat.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], ABCSeries):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, ABCSeries, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _get_objs_combined_axis(data, sort=False)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = com.values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = ibase.default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('{col:d} columns passed, passed data had '
'{con} columns'.format(col=len(columns),
con=len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
# ---------------------------------------------------------------------
# Series-Based
def sanitize_index(data, index, copy=False):
"""
Sanitize an index type to return an ndarray of the underlying, pass
through a non-Index.
"""
if index is None:
return data
if len(data) != len(index):
raise ValueError('Length of values does not match length of index')
if isinstance(data, ABCIndexClass) and not copy:
pass
elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)):
data = data._values
if copy:
data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M', 'm']:
data = sanitize_array(data, index, copy=copy)
return data
def sanitize_array(data, index, dtype=None, copy=False,
raise_cast_failure=False):
"""
Sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified.
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
try:
subarr = _try_cast(data, True, dtype, copy,
True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True, dtype, copy, raise_cast_failure)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH#6140
subarr = sanitize_index(data, index, copy=copy)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, True, dtype, copy, raise_cast_failure)
elif isinstance(data, ExtensionArray):
if isinstance(data, ABCPandasArray):
# We don't want to let people put our PandasArray wrapper
# (the output of Series/Index.array), into a Series. So
# we explicitly unwrap it here.
subarr = data.to_numpy()
else:
subarr = data
# everything else in this block must also handle ndarray's,
# becuase we've unwrapped PandasArray into an ndarray.
if dtype is not None:
subarr = data.astype(dtype)
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False, dtype, copy,
raise_cast_failure)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
start, stop, step = get_range_parameters(data)
arr = np.arange(start, stop, step, dtype='int64')
subarr = _try_cast(arr, False, dtype, copy, raise_cast_failure)
else:
subarr = _try_cast(data, False, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, 'ndim', 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(
value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception('Data must be 1-dimensional')
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
if is_object_dtype(subarr.dtype) and dtype != 'object':
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred == 'period':
try:
subarr = period_array(subarr)
except IncompatibleFrequency:
pass
return subarr
def _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure):
# perf shortcut as this is the most common case
if take_fast_path:
if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
# GH#15832: Check if we are requesting a numeric dype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
subarr = maybe_cast_to_integer_array(arr, dtype)
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (is_list_like(subarr) and
not (is_iterator(subarr) or
isinstance(subarr, np.ndarray))):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_type(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype,
copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
# We *do* allow casting to categorical, since we know
# that Categorical is the only array type for 'category'.
subarr = Categorical(arr, dtype.categories,
ordered=dtype.ordered)
elif is_extension_array_dtype(dtype):
# create an extension array from its dtype
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
|
|
#!/usr/bin/env python
import os
import pandas as pd
from StringIO import StringIO
from unittest import TestCase, main
from numpy import array, nan, arange
from numpy.testing import assert_almost_equal
from biom import Table
from pandas.util.testing import assert_frame_equal
from americangut.util import (
slice_mapping_file, parse_mapping_file,
verify_subset, concatenate_files, trim_fasta, count_samples,
count_seqs, count_unique_participants, clean_and_reformat_mapping,
add_alpha_diversity, get_single_id_lists, collapse_taxonomy, collapse_full
)
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["Daniel McDonald", "Adam Robbins-Pianka"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Daniel McDonald"
__email__ = "[email protected]"
class UtilTests(TestCase):
def test_count_samples(self):
test_mapping = ["#SampleID\tfoo\tbar",
"A\t1\t2",
"B\t1\t3",
"C\t2\t4",
"D\t3\t5",
"E\t2\t6"]
obs = count_samples(iter(test_mapping))
exp = 5
self.assertEqual(obs, exp)
obs = count_samples(iter(test_mapping), criteria={'foo': '2'})
exp = 2
def test_count_seqs(self):
test_seqs = [">a b",
"aattggcc",
">b.xyz stuff",
"asdasd",
">c",
"qweasd",
">d.foo",
"qweasdasd"]
obs = count_seqs(iter(test_seqs))
exp = 4
self.assertEqual(obs, exp)
obs = count_seqs(iter(test_seqs), subset=['b', 'c', 'foo'])
exp = 2
self.assertEqual(obs, exp)
def test_count_unique_participants(self):
test_mapping = ["#SampleID\tfoo\tbar\tHOST_SUBJECT_ID",
"A\t1\t2\tx",
"B\t1\t3\tx",
"C\t2\t4\ty",
"D\t3\t5\tz",
"E\t2\t6\tw"]
obs = count_unique_participants(iter(test_mapping))
exp = 4
self.assertEqual(obs, exp)
obs = count_unique_participants(iter(test_mapping),
criteria={'foo': '1'})
exp = 1
self.assertEqual(obs, exp)
obs = count_unique_participants(iter(test_mapping),
criteria={'foo': '2'})
exp = 2
self.assertEqual(obs, exp)
def test_verify_subset(self):
metadata = [('a','other stuff\tfoo'), ('b', 'asdasdasd'),
('c','123123123')]
table = Table(array([[1,2,3],[4,5,6]]),
['x', 'y'],
['a', 'b', 'c'])
self.assertTrue(verify_subset(table, metadata))
table = Table(array([[1,2],[3,4]]),
['x','y'],
['a','b'])
self.assertTrue(verify_subset(table, metadata))
table = Table(array([[1,2,3],[4,5,6]]),
['x','y'],
['a','b','x'])
self.assertFalse(verify_subset(table, metadata))
def test_slice_mapping_file(self):
header, metadata = parse_mapping_file(StringIO(test_mapping))
table = Table(array([[1,2],[4,5]]),
['x','y'],
['a','c'])
exp = ["a\t1\t123123", "c\tpoop\tdoesn't matter"]
obs = slice_mapping_file(table, metadata)
self.assertEqual(obs,exp)
def test_parse_mapping_file(self):
exp = ("#SampleIDs\tfoo\tbar", [['a','1\t123123'],
['b','yy\txxx'],
['c',"poop\tdoesn't matter"]])
obs = parse_mapping_file(StringIO(test_mapping))
self.assertEqual(obs, exp)
def test_concatenate_files(self):
expected_output = concat_test_input + concat_test_input
input_files = [StringIO(concat_test_input),
StringIO(concat_test_input)]
output_file = StringIO()
concatenate_files(input_files, output_file)
output_file.seek(0)
self.assertEqual(expected_output, output_file.read())
# try again with a tiny chunk size
input_files = [StringIO(concat_test_input),
StringIO(concat_test_input)]
output_file = StringIO()
concatenate_files(input_files, output_file, 2)
output_file.seek(0)
self.assertEqual(expected_output, output_file.read())
def test_trim_fasta(self):
infasta = StringIO(test_fasta)
# Trim length 10
expected = (">seq1\n"
"0123456789\n"
">seq2\n"
"0123456789\n"
">seq3\n"
"012345\n")
outfasta = StringIO()
trim_fasta(infasta, outfasta, 10)
outfasta.seek(0)
self.assertEqual(expected, outfasta.read())
def test_clean_and_reformat_mapping(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_clean_and_reformat_mapping_nopgp(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_clean_and_reformat_mapping_allpgp(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_add_alpha_diversity(self):
map_ = pd.DataFrame(
array([
['GAZ:w00t', '43.0', 'UBERON_mucosa_of_tongue', '5'],
['GAZ:left', '51.0', 'UBERON:FECES', '10'],
['GAZ:right', '12.0', 'UBERON_FECES', '15'],
['GAZ:stuff', '32.0', 'unknown', '26'],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37'],
]),
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI'],
index=['A', 'B', 'C', 'D', 'E']
)
alpha = {
'alpha_1': pd.DataFrame(
array([
['0', '1', '2', '3', '4'],
['100', '100', '100', '100', '100'],
[nan, nan, nan, nan, nan],
['14.5', '14.0', '15.1', '14.7', '14.4'],
['12.1', '15.2', '13.1', '14.1', '12.8'],
['16.2', '16.5', '16.9', '15.9', '16.2'],
['10.1', '9.8', '10.5', '10.0', '10.2'],
]),
columns=[
'alpha_rarefaction_100_0.txt',
'alpha_rarefaction_100_1.txt',
'alpha_rarefaction_100_2.txt',
'alpha_rarefaction_100_3.txt',
'alpha_rarefaction_100_4.txt',
],
index=['sequences per sample', 'iteration',
'A', 'B', 'C', 'D', 'E']
)
}
expected = pd.DataFrame(
array([
['GAZ:left', '51.0', 'UBERON:FECES', '10', 14.54],
['GAZ:right', '12.0', 'UBERON_FECES', '15', 13.46],
['GAZ:stuff', '32.0', 'unknown', '26', 16.34],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37', 10.12]
]),
index=['B', 'C', 'D', 'E'],
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI', 'alpha_1']
)
expected['alpha_1'] = expected['alpha_1'].astype(float)
test = add_alpha_diversity(map_, alpha)
assert_frame_equal(expected, test)
def test_get_single_id_list(self):
map_ = pd.DataFrame(
array([
['GAZ:w00t', '43.0', 'UBERON_mucosa_of_tongue', '5', 'A',
'12'],
['GAZ:left', '51.0', 'UBERON:FECES', '10', 'B', '1500'],
['GAZ:right', '12.0', 'UBERON_FECES', '15', 'C', '121'],
['GAZ:stuff', '32.0', 'unknown', '26', 'D', '150'],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37', 'E', '201'],
]),
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI', 'HOST_SUBJECT_ID',
'depth'],
index=['A', 'B', 'C', 'D', 'E']
)
depths = [100]
test = get_single_id_lists(map_, depths)
known = {100: ['B', 'C', 'D', 'E'],
'unrare': ['A', 'B', 'C', 'D', 'E']}
self.assertEqual(test, known)
def test_collapse_taxonomy(self):
obs = collapse_taxonomy(table)
exp = Table(array([[100.0, 105.0, 110.0, 115.0],
[44.0, 46.0, 48.0, 50.0],
[36.0, 39.0, 42.0, 45.0]]),
['Bacteria; Firmicutes', 'Bacteria; Bacteroidetes',
'Bacteria; Proteobacteria'], sample_ids,
sample_metadata=sample_metadata, observation_metadata=[
{'collapsed_ids': ['O0', 'O1', 'O7', 'O8', 'O9']},
{'collapsed_ids': ['O5', 'O6']},
{'collapsed_ids': ['O2', 'O3', 'O4']}])
self.assertEqual(obs, exp)
def test_collapse_full(self):
obs = collapse_full(table)
exp = Table(array([[0.00769230769231], [0.0282051282051],
[0.0487179487179], [0.0692307692308],
[0.0897435897436], [0.110256410256],
[0.130769230769], [0.151282051282],
[0.171794871795], [0.192307692308]]),
observ_ids, ['average'],
observation_metadata=observ_metadata)
for r in range(10):
assert_almost_equal(obs[r, 0], exp[r, 0])
self.assertEqual(obs.ids(), exp.ids())
self.assertItemsEqual(obs.ids('observation'), exp.ids('observation'))
obs_meta = []
for _, _, m in obs.iter(axis='observation'):
obs_meta.append(m)
self.assertItemsEqual(obs_meta, observ_metadata)
test_mapping = """#SampleIDs\tfoo\tbar
a\t1\t123123
b\tyy\txxx
c\tpoop\tdoesn't matter
"""
concat_test_input="""This is
a
test file that is used
in the concatenation test. The file will be concatenated to itself."""
test_fasta = """>seq1
0123456789
>seq2
0123456789AB
>seq3
012345"""
reformat_mapping_testdata = StringIO(
"""#SampleID COUNTRY AGE BODY_SITE BMI
A GAZ:w00t 43.0 UBERON_mucosa_of_tongue 5
B GAZ:left 51.0 UBERON:FECES 10
C GAZ:right 12.0 UBERON_FECES 15
D GAZ:stuff 32.0 unknown 26
E GAZ:stuff 56.0 UBERON:SKIN 37
""")
data = arange(40).reshape(10, 4)
sample_ids = ['S%d' % i for i in range(4)]
observ_ids = ['O%d' % i for i in range(10)]
sample_metadata = [{'environment': 'A'}, {'environment': 'B'},
{'environment': 'A'}, {'environment': 'B'}]
observ_metadata = [{'taxonomy': ['Bacteria', 'Firmicutes']},
{'taxonomy': ['Bacteria', 'Firmicutes']},
{'taxonomy': ['Bacteria', 'Proteobacteria']},
{'taxonomy': ['Bacteria', 'Proteobacteria']},
{'taxonomy': ['Bacteria', 'Proteobacteria']},
{'taxonomy': ['Bacteria', 'Bacteroidetes']},
{'taxonomy': ['Bacteria', 'Bacteroidetes']},
{'taxonomy': ['Bacteria', 'Firmicutes']},
{'taxonomy': ['Bacteria', 'Firmicutes']},
{'taxonomy': ['Bacteria', 'Firmicutes']}]
table = Table(data, observ_ids, sample_ids, observ_metadata,
sample_metadata, table_id='Example Table')
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import operator
import pytest
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, TimedeltaIndex, Float64Index, Int64Index,
to_timedelta, timedelta_range, date_range,
Series,
Timestamp, Timedelta)
from pandas.errors import PerformanceWarning, NullFrequencyError
from pandas.core import ops
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexComparisons(object):
def test_tdi_cmp_str_invalid(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for left, right in [(tdi, 'a'), ('a', tdi)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left == right
with pytest.raises(TypeError):
left != right
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, exp)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for lhs, rhs in [(left, right),
(left.astype(object), right.astype(object))]:
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
class TestTimedeltaIndexMultiplicationDivision(object):
# __mul__, __rmul__,
# __div__, __rdiv__, __floordiv__, __rfloordiv__,
# __mod__, __rmod__, __divmod__, __rdivmod__
# -------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_tdi_mul_int(self):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
result = idx * 1
tm.assert_index_equal(result, idx)
def test_tdi_rmul_int(self):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
result = 1 * idx
tm.assert_index_equal(result, idx)
def test_tdi_mul_tdlike_scalar_raises(self, delta):
rng = timedelta_range('1 days', '10 days', name='foo')
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result, expected)
def test_tdi_mul_int_array(self):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
didx = TimedeltaIndex(rng5 ** 2)
result = idx * rng5
tm.assert_index_equal(result, didx)
def test_tdi_mul_dti_raises(self):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
with pytest.raises(TypeError):
idx * idx
def test_tdi_mul_too_short_raises(self):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
with pytest.raises(TypeError):
idx * TimedeltaIndex(np.arange(3))
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_tdi_mul_int_series(self):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
didx = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_series_equal(result, Series(didx))
def test_tdi_mul_float_series(self):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
rng5f = np.arange(5, dtype='float64')
result = idx * Series(rng5f + 0.1)
expected = Series(TimedeltaIndex(rng5f * (rng5f + 0.1)))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('other', [np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)])
def test_tdi_rmul_arraylike(self, other):
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
result = other * tdi
tm.assert_index_equal(result, expected)
commute = tdi * other
tm.assert_index_equal(commute, expected)
# -------------------------------------------------------------
# TimedeltaIndex.__div__
def test_tdi_div_int(self):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
result = idx / 1
tm.assert_index_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta):
rng = timedelta_range('1 days', '10 days', name='foo')
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
result = rng / delta
tm.assert_index_equal(result, expected, exact=False)
def test_tdi_div_tdlike_scalar_with_nat(self, delta):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
result = rng / delta
tm.assert_index_equal(result, expected)
def test_tdi_div_nat_raises(self):
# don't allow division by NaT (make could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
with pytest.raises(TypeError):
rng / pd.NaT
# -------------------------------------------------------------
# TimedeltaIndex.__floordiv__
def test_tdi_floordiv_int(self):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
result = idx // 1
tm.assert_index_equal(result, idx)
def test_tdi_floordiv_tdlike_scalar(self, delta):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
result = tdi // delta
tm.assert_index_equal(result, expected, exact=False)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()])
def test_tdi_floordiv_timedelta_scalar(self, scalar_td):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
res = tdi.__rfloordiv__(scalar_td)
tm.assert_index_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
res = tdi // (scalar_td)
tm.assert_index_equal(res, expected)
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub])
def test_tdi_add_sub_float(self, op, other):
dti = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
tdi = dti - dti.shift(1)
with pytest.raises(TypeError):
op(tdi, other)
def test_tdi_add_str_invalid(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('freq', [None, 'H'])
def test_tdi_sub_period(self, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub])
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_dti_sub_pi(self, tdi_freq, pi_freq, op):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = pd.Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
with pytest.raises(TypeError):
op(dti, pi)
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_tdi_add_offset_index(self, names):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_index_equal(res2, expected)
def test_tdi_add_offset_array(self):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_tdi_sub_offset_index(self, names):
# GH#18824, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_index_equal(res, expected)
def test_tdi_sub_offset_array(self):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_tdi_with_offset_series(self, names):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_series_equal(res3, expected_sub)
@pytest.mark.parametrize('box', [np.array, pd.Index, pd.Series])
def test_tdi_add_sub_anchored_offset_arraylike(self, box):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
def test_tdi_add_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_sub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and datetime-like
def test_tdi_sub_timestamp_raises(self):
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
def test_tdi_add_timestamp(self):
idx = TimedeltaIndex(['1 day', '2 day'])
result = idx + Timestamp('2011-01-01')
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_tdi_radd_timestamp(self):
idx = TimedeltaIndex(['1 day', '2 day'])
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_tdi_sub_dt64_array(self):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
expected = pd.DatetimeIndex(dtarr) - tdi
result = dtarr - tdi
tm.assert_index_equal(result, expected)
def test_tdi_add_dt64_array(self):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
result = tdi + dtarr
tm.assert_index_equal(result, expected)
result = dtarr + tdi
tm.assert_index_equal(result, expected)
def test_tdi_add_td64_array(self):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
result = tdi + tdarr
tm.assert_index_equal(result, expected)
result = tdarr + tdi
tm.assert_index_equal(result, expected)
def test_tdi_sub_td64_array(self):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
result = tdi - tdarr
tm.assert_index_equal(result, expected)
result = tdarr - tdi
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(NullFrequencyError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
assert s.dtype == object
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
assert s2.dtype == object
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
assert s.dtype == object
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 + NA
tm.assert_frame_equal(actual, dfn)
actual = df1 - NA
tm.assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
(to_timedelta([_NaT, '5 days', '1 hours']) -
to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (to_timedelta([pd.NaT, '5 days', '1 hours']) +
to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
def test_timedeltaindex_add_timestamp_nat_masking(self):
# GH17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4D'
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2D'
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# TODO: Needs more informative name, probably split up into
# more targeted tests
def test_timedelta(self, freq):
index = date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
|
|
import os
from AppKit import NSMenuItem, NSImage, NSBezierPath, NSColor, NSCompositeSourceOver, NSGradient, NSNull, \
NSMutableDictionary, NSMutableParagraphStyle, NSParagraphStyleAttributeName, NSFormatter, \
NSLineBreakByTruncatingHead, NSActionCell, NSAttributedString
import vanilla
from defconAppKit.controls.placardScrollView import DefconAppKitPlacardNSScrollView, PlacardPopUpButton
# -------
# Sorting
# -------
def fontFileNameSort(fonts):
sortable = []
noPathCounter = 0
for font in fonts:
if font.path is not None:
s = os.path.basename(font.path)
else:
noPathCounter += 1
s = []
if font.info.familyName is not None:
s = font.info.familyName
else:
s = "Untitled Family"
if font.info.styleName is not None:
s += "-" + font.info.styleName
else:
s += "-Untitled Style"
sortable.append((s, font))
fonts = [item[-1] for item in sorted(sortable, key=lambda x: x[:-1])]
return fonts
def _isItalic(font):
isItalic = False
if font.info.styleMapStyleName is not None and "italic" in font.info.styleMapStyleName:
isItalic = True
elif font.info.italicAngle != 0:
isItalic = True
return isItalic
def fontWidthWeightSort(fonts):
sortable = []
for font in fonts:
isItalic = _isItalic(font)
fileName = None
if font.path is not None:
fileName = os.path.basename(font.path)
s = (
font.info.familyName,
font.info.openTypeOS2WidthClass,
font.info.openTypeOS2WeightClass,
isItalic,
font.info.styleName,
fileName,
font
)
sortable.append(s)
fonts = [item[-1] for item in sorted(sortable, key=lambda x: x[:-1])]
return fonts
# -----------
# Main Object
# -----------
class FontList(vanilla.List):
"""
This object presents the user with a standard list showing fonts.
It follows the same API as vanilla.List. When you set objects into
the view, you always pass font objects. The object will then extract
the relevant data to display.
Constructor Arguments:
All of the vanilla.List contstructor arguments apply, with the
following modifications.
columnDescriptions
This sets up the columns in the list. These follow the same format
of the column descriptions in vanilla.List. The only exception is that
you need to provide an "attribute" key/value pair. This is the font
attribute that the list will extract display values from. For example:
dict(title="Font Path", key="fontPath", attribute="path")
If no columnDescriptions is provided, the font will be shown in a single
single column represented with its file name or a combination of its
family and style names.
The list may show an "Options..." placard if either of the following is given:
placardSortItems
A list of dictionaries describing font sorting options. The dictionaries
must follow this form:
dict(title=string, callback=callback)
The title must begin with "Sort by" for this to work properly. The callback
must accept one argument: fonts. This will be a list of all fonts in the list.
The callback should return a list of sorted fonts.
placardItems
A list of dictionaries describing arbitrary items to show in the placard.
The dictionaries must follow this form:
dict(title=string, callback=callback)
The callback must accept one argument, sender, which will be the font list.
"""
nsScrollViewClass = DefconAppKitPlacardNSScrollView
def __init__(self, posSize, items,
placardSortItems=[
dict(title="Sort by File Name", callback=fontFileNameSort),
dict(title="Sort by Weight and Width", callback=fontWidthWeightSort),
],
placardItems=[],
**kwargs):
# make default column descriptions if needed
if not kwargs.get("columnDescriptions"):
kwargs["columnDescriptions"] = [fontListFontNameColumnDescription]
kwargs["showColumnTitles"] = False
# set some defaults
kwargs["autohidesScrollers"] = False
# build the internal column reference
self._keyToAttribute = {}
self._orderedListKeys = []
self._wrappedListItems = {}
for columnDescription in kwargs["columnDescriptions"]:
title = columnDescription["title"]
key = columnDescription.get("key", title)
attribute = columnDescription["attribute"]
self._keyToAttribute[key] = attribute
self._orderedListKeys.append(key)
# wrap the items
items = [self._wrapFontForList(font) for font in items]
# start the list
super(FontList, self).__init__(posSize, items, **kwargs)
# set the initial sort mode
self._sortMode = None
self._placardSortOptions = {}
self._placardOptions = {}
# placard
if len(placardSortItems) + len(placardItems):
# build the sort options
if placardSortItems:
self._sortMode = placardSortItems[0]["title"]
for d in placardSortItems:
title = d["title"]
assert title.startswith("Sort by")
self._placardSortOptions[title] = d["callback"]
# build the other options
if placardItems:
for d in placardItems:
self._placardOptions[d["title"]] = d["callback"]
# build
placardW = 65
placardH = 16
self._placard = vanilla.Group((0, 0, placardW, placardH))
# make a default item
item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_("Options...", None, "")
item.setHidden_(True)
items = [item]
# add the items
items += [d["title"] for d in placardSortItems]
items += [d["title"] for d in placardItems]
self._placard.optionsButton = PlacardPopUpButton((0, 0, placardW, placardH), items,
callback=self._placardCallback, sizeStyle="mini")
button = self._placard.optionsButton.getNSPopUpButton()
button.setTitle_("Options...")
self._nsObject.setPlacard_(self._placard.getNSView())
# update the sort
self._updateSort()
def _breakCycles(self):
for font in self._wrappedListItems.keys():
self._unsubscribeFromFont(font)
self._wrappedListItems.clear()
self._placard = None
self._placardSortOptions = {}
super(FontList, self)._breakCycles()
def setSortMode(self, mode):
"""
Set the sort mode in the popup.
"""
self._sortMode = mode
self._updateSort()
# -------------------
# Placard and Sorting
# -------------------
def _placardCallback(self, sender):
index = sender.get()
title = sender.getItems()[index]
# title item
if title == "Options...":
return
# sorting
elif title.startswith("Sort by"):
self._sortMode = title
self._updateSort()
# other
else:
self._placardOptions[title](self)
sender.set(0)
def _updateSort(self):
if self._sortMode is None:
return
# gather the wrappers and the selection states
oldSelection = self.getSelection()
fontToWrapper = {}
for index, wrapper in enumerate(self._arrayController.content()):
fontToWrapper[wrapper["_font"]] = (wrapper, index in oldSelection)
# sort the fonts
fonts = fontToWrapper.keys()
sortFunction = self._placardSortOptions[self._sortMode]
fonts = sortFunction(fonts)
# clear the list
count = len(self)
for index in range(count):
count -= 1
super(FontList, self).__delitem__(count)
# reset the items
sortedWrappers = []
newSelection = []
for index, font in enumerate(fonts):
wrapper, selected = fontToWrapper[font]
sortedWrappers.append(wrapper)
if selected:
newSelection.append(index)
super(FontList, self).set(sortedWrappers)
# reset the selection
self.setSelection(newSelection)
# -------------
# list behavior
# -------------
def _subscribeToFont(self, font):
font.addObserver(self, "_fontChanged", "Font.Changed")
def _unsubscribeFromFont(self, font):
font.removeObserver(self, "Font.Changed")
def _fontChanged(self, notification):
font = notification.object
if font not in self._wrappedListItems:
return
d = self._wrappedListItems[font]
for key, attr in self._keyToAttribute.items():
if attr == defaultFontIDAttribute:
value = makeDefaultIDString(font)
else:
value = getattr(font, attr)
d[key] = value
# editing
def _listEditCallback(self, sender):
# skip if in an edit loop
if self._listEditChangingFont is not None:
return
if not self.getSelection():
return
columnIndex, rowIndex = sender.getEditedColumnAndRow()
if columnIndex == -1 or rowIndex == -1:
rowIndex = self.getSelection()[0]
editedKey = None
editedAttribute = None
else:
editedKey = self._orderedListKeys[columnIndex]
editedAttribute = self._keyToAttribute[editedKey]
item = super(FontList, self).__getitem__(rowIndex)
font = item["_font"]()
self._listEditChangingAttribute = editedAttribute
self._listEditChangingFont = font
# known attribute. procees it individually.
if editedAttribute is not None:
# set the attribute
value = item[editedKey]
fontValue = getattr(font, editedAttribute)
if value != fontValue:
setattr(font, editedAttribute, value)
# unknown attribute. process all.
else:
for key, attribute in self._keyToAttribute.items():
value = getattr(font, attribute)
if value != item[key]:
setattr(font, attribute, item[key])
# update the dict contents
for key, attribute in self._keyToAttribute.items():
if key == editedKey and attribute == editedAttribute:
continue
value = getattr(font, attribute)
if value != item[key]:
item[key] = value
self._listEditChangingAttribute = None
self._listEditChangingFont = None
# wrapping
def _wrapFontForList(self, font):
changed = False
if font in self._wrappedListItems:
d = self._wrappedListItems[font]
else:
d = NSMutableDictionary.dictionary()
self._subscribeToFont(font)
for key, attribute in self._keyToAttribute.items():
if attribute == defaultFontIDAttribute:
value = makeDefaultIDString(font)
else:
value = getattr(font, attribute)
if not key in d or d.get(key) != value:
d[key] = value
changed = True
d["_font"] = font
if changed:
self._wrappedListItems[font] = d
return d
def _unwrapListItems(self, items=None):
if items is None:
items = super(FontList, self).get()
fonts = [d["_font"] for d in items]
return fonts
# standard API
def __contains__(self, font):
return font in self._wrappedListItems
def __getitem__(self, index):
item = super(FontList, self).__getitem__(index)
font = self._unwrapListItems([item])[0]
return font
def __setitem__(self, index, font):
existing = self[index]
item = self._wrapFontForList(font)
super(FontList, self).__setitem__(index, font)
if not super(FontList, self).__contains__(existing):
otherFont = existing["_font"]
del self._wrappedListItems[otherFont]
self._unsubscribeFromFont(otherFont)
def __delitem__(self, index):
item = super(FontList, self).__getitem__(index)
super(FontList, self).__delitem__(index)
if not super(FontList, self).__contains__(item):
font = item["_font"]
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
def append(self, font):
item = self._wrapFontForList(font)
super(FontList, self).append(item)
def remove(self, font):
item = self._wrappedListItems[font]
super(FontList, self).remove(item)
if not super(FontList, self).__contains__(item):
font = item["_font"]
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
def index(self, font):
item = self._wrappedListItems[font]
return super(FontList, self).index(item)
def insert(self, index, font):
item = self._wrapFontForList(font)
super(FontList, self).insert(index, item)
def extend(self, fonts):
items = [self._wrapFontForList(font) for font in fonts]
super(FontList, self).extend(items)
def set(self, fonts):
"""
Set the fonts in the list.
"""
# remove removed wrapped items
removedFonts = set(self._wrappedListItems) - set(fonts)
for font in removedFonts:
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
# wrap the fonts for the list
wrappedFonts = [self._wrapFontForList(font) for font in fonts]
# set the list
super(FontList, self).set(wrappedFonts)
def get(self):
"""
Get the fonts in the list.
"""
return self._unwrapListItems()
# --------------------------
# Formatters, Cells and Such
# --------------------------
class DirtyStatusIndicatorCell(NSActionCell):
def drawWithFrame_inView_(self, frame, view):
value = self.objectValue()
if not value:
image = _drawDirtyStateImage(value)
image = _drawDirtyStateImage(value)
image.drawAtPoint_fromRect_operation_fraction_(frame.origin, ((0, 0), (13, 17)), NSCompositeSourceOver, 1.0)
def _drawDirtyStateImage(value):
if value:
imageName = "defconAppKitFontListDirtyStateTrue"
else:
imageName = "defconAppKitFontListDirtyStateFalse"
image = NSImage.imageNamed_(imageName)
if image is None:
# make the image
width = 13
height = 17
image = NSImage.alloc().initWithSize_((width, height))
image.lockFocus()
# draw if dirty
if value:
rect = ((2, 4), (9, 9))
path = NSBezierPath.bezierPathWithOvalInRect_(rect)
path.addClip()
# colors
color1 = NSColor.colorWithCalibratedRed_green_blue_alpha_(1.0, 0.1, 0.1, 1)
color2 = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.5, 0.0, 0.0, 1)
# fill
color1.set()
path.fill()
# shadow
try:
gradient = NSGradient.alloc().initWithColors_([color1, color2])
gradient.drawInBezierPath_angle_(path, -90)
except NameError:
pass
# stroke
color2.set()
path.setLineWidth_(2)
path.stroke()
image.unlockFocus()
image.setName_(imageName)
image = NSImage.imageNamed_(imageName)
return image
class FilePathFormatter(NSFormatter):
def stringForObjectValue_(self, obj):
if obj is None or isinstance(obj, NSNull):
return ""
return obj
def attributedStringForObjectValue_withDefaultAttributes_(self, obj, attrs):
if obj is None or isinstance(obj, NSNull):
obj = ""
paragraph = NSMutableParagraphStyle.alloc().init()
paragraph.setLineBreakMode_(NSLineBreakByTruncatingHead)
attrs = dict(attrs)
attrs[NSParagraphStyleAttributeName] = paragraph
return NSAttributedString.alloc().initWithString_attributes_(obj, attrs)
def objectValueForString_(self, string):
return string
def makeDefaultIDString(font):
if font.path is None:
if font.info.familyName is not None:
s = font.info.familyName
else:
s = "Untitled Family"
if font.info.styleName is not None:
s += "-" + font.info.styleName
else:
s += "-Untitled Style"
return s
else:
return os.path.basename(font.path)
# --------------------------
# Common Column Descriptions
# --------------------------
defaultFontIDAttribute = "defconAppKitFontIDString"
fontListFontNameColumnDescription = dict(title="Font", attribute=defaultFontIDAttribute, editable=False)
fontListFontPathColumnDescription = dict(title="Path", attribute="path", editable=False, formatter=FilePathFormatter.alloc().init())
fontListDirtyStateColoumnDescription = dict(title="Dirty", attribute="dirty", cell=DirtyStatusIndicatorCell.alloc().init(), width=13, editable=False)
|
|
"""The tests for the Demo Media player platform."""
import datetime
import socket
import unittest
import pysonos.snapshot
from unittest import mock
import pysonos
from pysonos import alarms
from homeassistant.setup import setup_component
from homeassistant.components.sonos import media_player as sonos
from homeassistant.components.media_player.const import DOMAIN
from homeassistant.components.sonos.media_player import CONF_INTERFACE_ADDR
from homeassistant.const import CONF_HOSTS, CONF_PLATFORM
from tests.common import get_test_home_assistant
ENTITY_ID = 'media_player.kitchen'
class pysonosDiscoverMock():
"""Mock class for the pysonos.discover method."""
def discover(interface_addr):
"""Return tuple of pysonos.SoCo objects representing found speakers."""
return {SoCoMock('192.0.2.1')}
class AvTransportMock():
"""Mock class for the avTransport property on pysonos.SoCo object."""
def __init__(self):
"""Initialize ethe Transport mock."""
pass
def GetMediaInfo(self, _):
"""Get the media details."""
return {
'CurrentURI': '',
'CurrentURIMetaData': ''
}
class MusicLibraryMock():
"""Mock class for the music_library property on pysonos.SoCo object."""
def get_sonos_favorites(self):
"""Return favorites."""
return []
class SoCoMock():
"""Mock class for the pysonos.SoCo object."""
def __init__(self, ip):
"""Initialize SoCo object."""
self.ip_address = ip
self.is_visible = True
self.volume = 50
self.mute = False
self.shuffle = False
self.night_mode = False
self.dialog_mode = False
self.music_library = MusicLibraryMock()
self.avTransport = AvTransportMock()
def get_sonos_favorites(self):
"""Get favorites list from sonos."""
return {'favorites': []}
def get_speaker_info(self, force):
"""Return a dict with various data points about the speaker."""
return {'serial_number': 'B8-E9-37-BO-OC-BA:2',
'software_version': '32.11-30071',
'uid': 'RINCON_B8E937BOOCBA02500',
'zone_icon': 'x-rincon-roomicon:kitchen',
'mac_address': 'B8:E9:37:BO:OC:BA',
'zone_name': 'Kitchen',
'model_name': 'Sonos PLAY:1',
'hardware_version': '1.8.1.2-1'}
def get_current_transport_info(self):
"""Return a dict with the current state of the speaker."""
return {'current_transport_speed': '1',
'current_transport_state': 'STOPPED',
'current_transport_status': 'OK'}
def get_current_track_info(self):
"""Return a dict with the current track information."""
return {'album': '',
'uri': '',
'title': '',
'artist': '',
'duration': '0:00:00',
'album_art': '',
'position': '0:00:00',
'playlist_position': '0',
'metadata': ''}
def is_coordinator(self):
"""Return true if coordinator."""
return True
def join(self, master):
"""Join speaker to a group."""
return
def set_sleep_timer(self, sleep_time_seconds):
"""Set the sleep timer."""
return
def unjoin(self):
"""Cause the speaker to separate itself from other speakers."""
return
def uid(self):
"""Return a player uid."""
return "RINCON_XXXXXXXXXXXXXXXXX"
def group(self):
"""Return all group data of this player."""
return
def add_entities_factory(hass):
"""Add devices factory."""
def add_entities(devices, update_befor_add=False):
"""Fake add device."""
hass.data[sonos.DATA_SONOS].devices = devices
return add_entities
class TestSonosMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def monkey_available(self):
"""Make a monkey available."""
return True
# Monkey patches
self.real_available = sonos.SonosDevice.available
sonos.SonosDevice.available = monkey_available
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
# Monkey patches
sonos.SonosDevice.available = self.real_available
self.hass.stop()
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_discovery(self, *args):
"""Test a single device using the autodiscovery provided by HASS."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
devices = list(self.hass.data[sonos.DATA_SONOS].devices)
assert len(devices) == 1
assert devices[0].name == 'Kitchen'
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch('pysonos.discover')
def test_ensure_setup_config_interface_addr(self, discover_mock, *args):
"""Test an interface address config'd by the HASS config file."""
discover_mock.return_value = {SoCoMock('192.0.2.1')}
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_INTERFACE_ADDR: '192.0.1.1',
}
}
assert setup_component(self.hass, DOMAIN, config)
assert len(self.hass.data[sonos.DATA_SONOS].devices) == 1
assert discover_mock.call_count == 1
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_string_single(self, *args):
"""Test a single address config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1'],
}
}
assert setup_component(self.hass, DOMAIN, config)
devices = self.hass.data[sonos.DATA_SONOS].devices
assert len(devices) == 1
assert devices[0].name == 'Kitchen'
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_string_multiple(self, *args):
"""Test multiple address string config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1,192.168.2.2'],
}
}
assert setup_component(self.hass, DOMAIN, config)
devices = self.hass.data[sonos.DATA_SONOS].devices
assert len(devices) == 2
assert devices[0].name == 'Kitchen'
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_list(self, *args):
"""Test a multiple address list config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1', '192.168.2.2'],
}
}
assert setup_component(self.hass, DOMAIN, config)
devices = self.hass.data[sonos.DATA_SONOS].devices
assert len(devices) == 2
assert devices[0].name == 'Kitchen'
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch.object(pysonos, 'discover', new=pysonosDiscoverMock.discover)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_sonos_discovery(self, *args):
"""Test a single device using the autodiscovery provided by Sonos."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass))
devices = list(self.hass.data[sonos.DATA_SONOS].devices)
assert len(devices) == 1
assert devices[0].name == 'Kitchen'
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(SoCoMock, 'set_sleep_timer')
def test_sonos_set_sleep_timer(self, set_sleep_timerMock, *args):
"""Ensure pysonos methods called for sonos_set_sleep_timer service."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
device.set_sleep_timer(30)
set_sleep_timerMock.assert_called_once_with(30)
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(SoCoMock, 'set_sleep_timer')
def test_sonos_clear_sleep_timer(self, set_sleep_timerMock, *args):
"""Ensure pysonos method called for sonos_clear_sleep_timer service."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
device.set_sleep_timer(None)
set_sleep_timerMock.assert_called_once_with(None)
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('pysonos.alarms.Alarm')
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_set_alarm(self, pysonos_mock, alarm_mock, *args):
"""Ensure pysonos methods called for sonos_set_sleep_timer service."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
alarm1 = alarms.Alarm(pysonos_mock)
alarm1.configure_mock(_alarm_id="1", start_time=None, enabled=False,
include_linked_zones=False, volume=100)
with mock.patch('pysonos.alarms.get_alarms', return_value=[alarm1]):
attrs = {
'time': datetime.time(12, 00),
'enabled': True,
'include_linked_zones': True,
'volume': 0.30,
}
device.set_alarm(alarm_id=2)
alarm1.save.assert_not_called()
device.set_alarm(alarm_id=1, **attrs)
assert alarm1.enabled == attrs['enabled']
assert alarm1.start_time == attrs['time']
assert alarm1.include_linked_zones == \
attrs['include_linked_zones']
assert alarm1.volume == 30
alarm1.save.assert_called_once_with()
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(pysonos.snapshot.Snapshot, 'snapshot')
def test_sonos_snapshot(self, snapshotMock, *args):
"""Ensure pysonos methods called for sonos_snapshot service."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
snapshotMock.return_value = True
device.snapshot()
assert snapshotMock.call_count == 1
assert snapshotMock.call_args == mock.call()
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(pysonos.snapshot.Snapshot, 'restore')
def test_sonos_restore(self, restoreMock, *args):
"""Ensure pysonos methods called for sonos_restore service."""
from pysonos.snapshot import Snapshot
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
restoreMock.return_value = True
device._snapshot_coordinator = mock.MagicMock()
device._snapshot_coordinator.soco_device = SoCoMock('192.0.2.17')
device._soco_snapshot = Snapshot(device._player)
device.restore()
assert restoreMock.call_count == 1
assert restoreMock.call_args == mock.call(False)
|
|
#!/usr/bin/env python -tt
# -*- coding: UTF-8 -*-
"""
Redis Copy
Redis Copy the keys in a source redis server into another target redis server.
The script probably needs to be added to a cron job if the keys are a lot because it only copies a fix number of keys at a time
and continue from there on the next run. It does this until there is no more keys to copy
Usage: python redis-copy.py [options]
Options:
-l ..., --limit=... optional numbers of keys to copy per run, if not defined 10000 is the default . e.g. 1000
-s ..., --source=... source redis server "ip:port" to copy keys from. e.g. 192.168.0.99:6379
-t ..., --target=... target redis server "ip:port" to copy keys to. e.g. 192.168.0.101:6379
-d ..., --databases=... comma separated list of redis databases to select when copying. e.g. 2,5
-h, --help show this help
--clean clean all variables, temp lists created previously by the script
Dependencies: redis (redis-py: sudo pip install redis)
Examples:
python redis-copy.py --help show this doc
python redis-copy.py \
--source=192.168.0.99:6379 \
--target=192.168.0.101:6379 \
--databases=2,5 --clean clean all variables, temp lists created previously by the script
python redis-copy.py \
--source=192.168.0.99:6379 \
--target=192.168.0.101:6379 \
--databases=2,5 copy all keys in db 2 and 5 from server 192.168.0.99:6379 to server 192.168.0.101:6379
with the default limit of 10000 per script run
python redis-copy.py --limit=1000 \
--source=192.168.0.99:6379 \
--target=192.168.0.101:6379 \
--databases=2,5 copy all keys in db 2 and 5 from server 192.168.0.99:6379 to server 192.168.0.101:6379
with a limit of 1000 per script run
"""
__author__ = "Salimane Adjao Moustapha ([email protected])"
__version__ = "$Revision: 1.0 $"
__date__ = "$Date: 2011/06/09 12:57:19 $"
__copyleft__ = "Copyleft (c) 2011 Salimane Adjao Moustapha"
__license__ = "MIT"
import redis
import time
import sys
import getopt
class RedisCopy:
"""A class for copying keys from one server to another.
"""
#some key prefix for this script
mprefix = 'mig:'
keylistprefix = 'keylist:'
hkeylistprefix = 'havekeylist:'
# numbers of keys to copy on each iteration
limit = 10000
def __init__(self, source, target, dbs):
self.source = source
self.target = target
self.dbs = dbs
def save_keylists(self):
"""Function to save the keys' names of the source redis server into a list for later usage.
"""
for db in self.dbs:
servername = self.source['host'] + ":" + str(
self.source['port']) + ":" + str(db)
#get redis handle for server-db
r = redis.StrictRedis(
host=self.source['host'], port=self.source['port'], db=db)
dbsize = r.dbsize()
#check whether we already have the list, if not get it
hkl = r.get(self.mprefix + self.hkeylistprefix + servername)
if hkl is None or int(hkl) != 1:
print "Saving the keys in %s to temp keylist...\n" % servername
moved = 0
r.delete(self.mprefix + self.keylistprefix + servername)
for key in r.keys('*'):
moved += 1
r.rpush(
self.mprefix + self.keylistprefix + servername, key)
if moved % self.limit == 0:
print "%d keys of %s inserted in temp keylist at %s...\n" % (moved, servername, time.strftime("%Y-%m-%d %I:%M:%S"))
r.set(self.mprefix + self.hkeylistprefix + servername, 1)
print "ALL %d keys of %s already inserted to temp keylist ...\n\n" % (dbsize - 1, servername)
def copy_db(self, limit=None):
"""Function to copy all the keys from the source into the new target.
- limit : optional numbers of keys to copy per run
"""
#set the limit per run
try:
limit = int(limit)
except (ValueError, TypeError):
limit = None
if limit is not None:
self.limit = limit
for db in self.dbs:
servername = self.source['host'] + ":" + str(
self.source['port']) + ":" + str(db)
print "Processing keys copying of server %s at %s...\n" % (
servername, time.strftime("%Y-%m-%d %I:%M:%S"))
#get redis handle for current source server-db
r = redis.StrictRedis(
host=self.source['host'], port=self.source['port'], db=db)
moved = 0
dbsize = r.dbsize() - 1
#get keys already moved
keymoved = r.get(self.mprefix + "keymoved:" + servername)
keymoved = 0 if keymoved is None else int(keymoved)
#check if we already have all keys copied for current source server-db
if dbsize < keymoved:
print "ALL %d keys from %s have already been copied.\n" % (
dbsize, servername)
continue
print "Started copy of %s keys from %d to %d at %s...\n" % (servername, keymoved, dbsize, time.strftime("%Y-%m-%d %I:%M:%S"))
#get redis handle for corresponding target server-db
rr = redis.StrictRedis(
host=self.target['host'], port=self.target['port'], db=db)
#max index for lrange
newkeymoved = keymoved + \
self.limit if dbsize > keymoved + self.limit else dbsize
for key in r.lrange(self.mprefix + self.keylistprefix + servername, keymoved, newkeymoved):
#get key type
ktype = r.type(key)
#if undefined type go to next key
if ktype == 'none':
continue
#save key to target server-db
if ktype == 'string':
rr.set(key, r.get(key))
elif ktype == 'hash':
rr.hmset(key, r.hgetall(key))
elif ktype == 'list':
if key == self.mprefix + "keylist:" + servername:
continue
#value = r.lrange(key, 0, -1)
#rr.rpush(key, *value)
for k in r.lrange(key, 0, -1):
rr.rpush(key, k)
elif ktype == 'set':
#value = r.smembers(key)
#rr.sadd(key, *value)
for k in r.smembers(key):
rr.sadd(key, k)
elif ktype == 'zset':
#value = r.zrange(key, 0, -1, withscores=True)
#rr.zadd(key, **dict(value))
for k, v in r.zrange(key, 0, -1, withscores=True):
rr.zadd(key, v, k)
# Handle keys with an expire time set
kttl = r.ttl(key)
kttl = -1 if kttl is None else int(kttl)
if kttl != -1:
rr.expire(key, kttl)
moved += 1
if moved % 10000 == 0:
print "%d keys have been copied on %s at %s...\n" % (
moved, servername, time.strftime("%Y-%m-%d %I:%M:%S"))
r.set(self.mprefix + "keymoved:" + servername, newkeymoved)
print "%d keys have been copied on %s at %s\n" % (
newkeymoved, servername, time.strftime("%Y-%m-%d %I:%M:%S"))
def flush_target(self):
"""Function to flush the target server.
"""
for db in self.dbs:
servername = self.target['host'] + ":" + str(
self.target['port']) + ":" + str(db)
print "Flushing server %s at %s...\n" % (
servername, time.strftime("%Y-%m-%d %I:%M:%S"))
r = redis.StrictRedis(
host=self.target['host'], port=self.target['port'], db=db)
r.flushdb()
print "Flushed server %s at %s...\n" % (
servername, time.strftime("%Y-%m-%d %I:%M:%S"))
def clean(self):
"""Function to clean all variables, temp lists created previously by the script.
"""
print "Cleaning all temp variables...\n"
for db in self.dbs:
servername = self.source['host'] + ":" + str(
self.source['port']) + ":" + str(db)
r = redis.StrictRedis(
host=self.source['host'], port=self.source['port'], db=db)
r.delete(self.mprefix + "keymoved:" + servername)
r.delete(self.mprefix + self.keylistprefix + servername)
r.delete(self.mprefix + self.hkeylistprefix + servername)
r.delete(self.mprefix + "firstrun")
r.delete(self.mprefix + 'run')
print "Done.\n"
def main(source, target, databases, limit=None, clean=False):
#getting source and target
if (source == target):
exit('The 2 servers adresses are the same. e.g. python redis-copy.py 127.0.0.1:6379 127.0.0.1:63791 0,1')
so = source.split(':')
if len(so) == 2:
source_server = {'host': so[0], 'port': int(so[1])}
else:
exit('Supplied source address is wrong. e.g. python redis-copy.py 127.0.0.1:6379 127.0.0.1:63791 0,1')
sn = target.split(':')
if len(sn) == 2:
target_server = {'host': sn[0], 'port': int(sn[1])}
else:
exit('Supplied target address is wrong. e.g. python redis-copy.py 127.0.0.1:6379 127.0.0.1:63791 0,1')
#getting the dbs
dbs = [int(k) for k in databases.split(',')]
if len(dbs) < 1:
exit('Supplied list of db is wrong. e.g. python redis-copy.py 127.0.0.1:6379 127.0.0.1:63791 0,1')
try:
r = redis.StrictRedis(
host=source_server['host'], port=source_server['port'], db=dbs[0])
except AttributeError as e:
exit('Please this script requires redis-py >= 2.4.10, your current version is :' + redis.__version__)
mig = RedisCopy(source_server, target_server, dbs)
if clean == False:
#check if script already running
run = r.get(mig.mprefix + "run")
if run is not None and int(run) == 1:
exit('another process already running the script')
r.set(mig.mprefix + 'run', 1)
mig.save_keylists()
firstrun = r.get(mig.mprefix + "firstrun")
firstrun = 0 if firstrun is None else int(firstrun)
if firstrun == 0:
mig.flush_target()
r.set(mig.mprefix + "firstrun", 1)
mig.copy_db(limit)
else:
mig.clean()
r.set(mig.mprefix + 'run', 0)
def usage():
print __doc__
if __name__ == "__main__":
clean = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hl:s:t:d:", ["help", "limit=", "source=", "target=", "databases=", "clean"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == "--clean":
clean = True
elif opt in ("-l", "--limit"):
limit = arg
elif opt in ("-s", "--source"):
source = arg
elif opt in ("-t", "--target"):
target = arg
elif opt in ("-d", "--databases"):
databases = arg
try:
limit = int(limit)
except (NameError, TypeError, ValueError):
limit = None
try:
main(source, target, databases, limit, clean)
except NameError as e:
usage()
|
|
# This file is part of ZS
# Copyright (C) 2013-2014 Nathaniel Smith <[email protected]>
# See file LICENSE.txt for license information.
import os
import os.path
import sys
import hashlib
from six import int2byte, byte2int, BytesIO, integer_types
from nose.tools import assert_raises
from .util import test_data_path
from .http_harness import web_server
from zs import ZS, ZSError, ZSCorrupt
from zs._zs import pack_data_records
from zs.common import read_length_prefixed, codec_shorthands
# letters.zs contains records:
# [b, bb, d, dd, f, ff, ..., z, zz]
letters_records = []
for i in range(1, 26, 2):
letter = int2byte(byte2int(b"a") + i)
letters_records += [letter, 2 * letter]
letters_sha256 = hashlib.sha256(pack_data_records(letters_records)).digest()
def identity(x):
return x
def _check_map_helper(records, arg1, arg2):
assert arg1 == 1
assert arg2 == 2
return records
def _check_raise_helper(records, exc):
raise exc
def check_letters_zs(z, codec_shorthand):
assert isinstance(z.root_index_offset, integer_types)
assert isinstance(z.root_index_length, integer_types)
assert isinstance(z.total_file_length, integer_types)
assert z.codec == codec_shorthands[codec_shorthand]
assert z.data_sha256 == letters_sha256
assert z.metadata == {
u"test-data": u"letters",
u"build-info": {
u"user": u"test-user",
u"host": u"test-host",
u"time": u"2000-01-01T00:00:00.000000Z",
u"version": u"zs test",
},
}
assert isinstance(z.root_index_level, integer_types)
assert list(z) == letters_records
assert list(z.search()) == letters_records
if "ZS_QUICK_TEST" in os.environ:
chars = "m"
else:
chars = "abcdefghijklmnopqrstuvwxyz"
for char in chars:
byte = char.encode("ascii")
for (start, stop, prefix) in [
(None, None, None),
(byte, None, None),
(None, byte, None),
(None, None, byte),
(byte, byte, None),
(byte, int2byte(byte2int(byte) + 1), None),
(byte, int2byte(byte2int(byte) + 2), None),
(byte, int2byte(byte2int(byte) + 3), None),
(byte, b"q", None),
(None, 2 * byte, byte),
(b"m", b"s", byte),
]:
print("start=%r, stop=%r, prefix=%r" % (start, stop, prefix))
expected = letters_records
if start is not None:
expected = [r for r in expected if r >= start]
if stop is not None:
expected = [r for r in expected if not r >= stop]
if prefix is not None:
expected = [r for r in expected if r.startswith(prefix)]
assert list(z.search(start=start, stop=stop, prefix=prefix)
) == expected
map_blocks = list(z.block_map(
_check_map_helper,
# test args and kwargs argument passing
args=(1,), kwargs={"arg2": 2},
start=start, stop=stop, prefix=prefix))
assert sum(map_blocks, []) == expected
for term in [b"\n", b"\x00"]:
expected_dump = term.join(expected + [b""])
out = BytesIO()
z.dump(out, start=start, stop=stop, prefix=prefix,
terminator=term)
assert out.getvalue() == expected_dump
out = BytesIO()
z.dump(out, start=start, stop=stop, prefix=prefix,
length_prefixed="uleb128")
assert (list(read_length_prefixed(BytesIO(out.getvalue()), "uleb128"))
== expected)
out = BytesIO()
z.dump(out, start=start, stop=stop, prefix=prefix,
length_prefixed="u64le")
assert (list(read_length_prefixed(BytesIO(out.getvalue()), "u64le"))
== expected)
assert list(z.search(stop=b"bb", prefix=b"b")) == [b"b"]
assert_raises(ValueError, list,
z.block_map(_check_raise_helper, args=(ValueError,)))
assert_raises(ValueError, z.block_exec,
_check_raise_helper, args=(ValueError,))
z.validate()
def test_zs():
for codec in codec_shorthands:
p = test_data_path("letters-%s.zs" % (codec,))
for parallelism in [0, 2, "guess"]:
with ZS(path=p, parallelism=parallelism) as z:
check_letters_zs(z, codec)
# This is much slower, and the above test will have already exercised most of
# the tricky code, so we make this test less exhaustive.
def test_http_zs():
with web_server(test_data_path()) as root_url:
codec = "deflate"
url = "%s/letters-%s.zs" % (root_url, codec)
for parallelism in [0, 2]:
with ZS(url=url, parallelism=parallelism) as z:
check_letters_zs(z, codec)
def test_http_notices_lack_of_range_support():
with web_server(test_data_path(), range_support=False) as root_url:
codec = "deflate"
url = "%s/letters-%s.zs" % (root_url, codec)
assert_raises(ZSError, lambda: list(ZS(url=url)))
def test_zs_args():
p = test_data_path("letters-none.zs")
# can't pass both path and url
assert_raises(ValueError, ZS, path=p, url="x")
# parallelism must be >= 0
assert_raises(ValueError, ZS, path=p, parallelism=-1)
def test_zs_close():
z = ZS(test_data_path("letters-none.zs"))
z.close()
for call in [[list, z.search()],
[list,
z.block_map(_check_raise_helper, AssertionError)],
[list, z],
[z.dump, BytesIO()],
[z.validate],
]:
print(repr(call))
assert_raises(ZSError, *call)
# But calling .close() twice is fine.
z.close()
# smoke test for __del__ method
ZS(test_data_path("letters-none.zs"))
def test_context_manager_closes():
with ZS(test_data_path("letters-none.zs")) as z:
assert list(z.search()) == letters_records
assert_raises(ZSError, list, z.search())
def test_block_exec():
# This function tricky to test in a multiprocessing world, because we need
# some way to communicate back from the subprocesses that the execution
# actually happened... instead we just test it in serial
# mode. (Fortunately it is a super-trivial function.)
z = ZS(test_data_path("letters-none.zs"), parallelism=0)
# b/c we're in serial mode, the fn doesn't need to be pickleable
class CountBlocks(object):
def __init__(self):
self.count = 0
def __call__(self, records):
self.count += 1
count_blocks = CountBlocks()
z.block_exec(count_blocks)
assert count_blocks.count > 1
assert count_blocks.count == len(list(z.block_map(identity)))
def test_big_headers():
from zs.reader import _lower_header_size_guess
with _lower_header_size_guess():
z = ZS(test_data_path("letters-none.zs"))
assert z.codec == "none"
assert z.data_sha256 == letters_sha256
assert z.metadata == {
u"test-data": u"letters",
u"build-info": {
u"user": u"test-user",
u"host": u"test-host",
u"time": u"2000-01-01T00:00:00.000000Z",
u"version": u"zs test",
},
}
assert list(z) == letters_records
def test_broken_files():
import glob
unchecked_paths = set(glob.glob(test_data_path("broken-files/*.zs")))
# Files that should fail even on casual use (no validate)
for basename, msg_fragment in [
("short-root", ["partial read", "root index length"]),
("truncated-root", "unexpected EOF"),
("bad-magic", "bad magic"),
("incomplete-magic", "partially written"),
("header-checksum", "header checksum"),
("root-checksum", "checksum mismatch"),
("bad-codec", "unrecognized compression"),
("non-dict-metadata", "bad metadata"),
("truncated-data-1", "unexpectedly ran out of data"),
("truncated-data-2", "unexpected EOF"),
("truncated-data-3", "unexpected EOF"),
("wrong-root-offset", ["checksum mismatch", "root block missing"]),
("root-is-data", ["expecting index block", "bad level"]),
("wrong-root-level-1", ["expecting index block", "bad index ref"]),
("partial-data-1", "past end of block"),
("partial-data-2", "end of buffer"),
("empty-data", "empty block"),
("partial-index-1", "end of buffer"),
("partial-index-2", "end of buffer"),
("partial-index-3", "past end of block"),
("partial-index-4", "past end of block"),
("empty-index", "empty block"),
("bad-total-length", "header says it should"),
("bad-level-root", ["extension block", "root block missing"]),
("bad-level-index-2", ["extension block", "dangling or multiple refs"]),
("post-header-junk", "checksum mismatch"),
]:
print(basename)
def any_match(mfs, haystack):
if isinstance(mfs, str):
mfs = [mfs]
for mf in mfs:
if mf in haystack:
return True
return False
# to prevent accidental false success:
assert not any_match(msg_fragment, basename)
p = test_data_path("broken-files/%s.zs" % (basename,))
with assert_raises(ZSCorrupt) as cm:
with ZS(p) as z:
list(z)
# use start= to ensure that we do an index traversal
list(z.search(start=b"\x00"))
assert any_match(msg_fragment, str(cm.exception))
with assert_raises(ZSCorrupt) as cm:
with ZS(p) as z:
z.validate()
assert any_match(msg_fragment, str(cm.exception))
unchecked_paths.discard(p)
# Files that might look okay locally, but validate should detect problems
for basename, msg_fragment in [
("unref-data", "unreferenced"),
("unref-index", "unreferenced"),
("wrong-root-length", "root index length"),
("wrong-root-level-2", "level 3 to level 1"),
("repeated-index", "multiple ref"),
("bad-ref-length", "!= actual length"),
("bad-index-order", "unsorted offsets"),
("bad-index-order", "unsorted records"),
("bad-data-order", "unsorted records"),
("bad-index-key-1", "too large for block"),
("bad-index-key-2", "too small for block"),
("bad-index-key-3", "too small for block"),
("bad-sha256", "data hash mismatch"),
# not really an accurate message -- this file has a level 1 index
# pointing to an extension block. the reader doesn't blow up at
# this because it knows that below a level 1 index is data and
# switches to streaming read, and then streaming read ignores
# extension blocks, so only fsck() will catch it. And fsck() uses
# a streaming read so extension blocks are invisible to it, and
# all it sees is that there's this reference pointing into an
# invisible hole in space, which looks like a dangling reference.
("bad-level-index-1", "dangling"),
]:
print(basename)
# to prevent accidental false success:
assert msg_fragment not in basename
p = test_data_path("broken-files/%s.zs" % (basename,))
with ZS(p) as z:
with assert_raises(ZSCorrupt) as cm:
z.validate()
assert msg_fragment in str(cm.exception)
unchecked_paths.discard(p)
# Files that are a bit tricky, but should in fact be okay
for basename in [
"good-index-key-1",
"good-index-key-2",
"good-index-key-3",
"good-extension-blocks",
"good-extension-header-fields",
]:
print(basename)
p = test_data_path("broken-files/%s.zs" % (basename,))
with ZS(p) as z:
list(z)
z.validate()
unchecked_paths.discard(p)
assert not unchecked_paths
def test_extension_blocks():
# Check that the reader happily skips over the extension blocks in the
# middle of the file.
with ZS(test_data_path("broken-files/good-extension-blocks.zs")) as z:
assert list(z) == [b"a", b"b", b"c", b"d"]
def test_ref_loops():
# Had a bunch of trouble eliminating reference loops in the ZS object.
# Don't use 'with' statement here b/c that keeps another ref which just
# confuses things.
z = ZS(test_data_path("letters-none.zs"))
try:
# 1 for 'z', one for the temporary passed to sys.getrefcount
print(sys.getrefcount(z))
assert sys.getrefcount(z) == 2
list(z)
assert sys.getrefcount(z) == 2
finally:
z.close()
|
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from ci import models, event
from ci.tests import DBTester, utils
class Tests(DBTester.DBTester):
def setUp(self):
super(Tests, self).setUp()
self.create_default_recipes()
def job_compare(self, j0_ready=False, j1_ready=False, j2_ready=False, j3_ready=False):
self.job0.refresh_from_db()
self.job1.refresh_from_db()
self.job2.refresh_from_db()
self.job3.refresh_from_db()
self.assertEqual(self.job0.ready, j0_ready)
self.assertEqual(self.job1.ready, j1_ready)
self.assertEqual(self.job2.ready, j2_ready)
self.assertEqual(self.job3.ready, j3_ready)
def create_jobs(self):
(self.job0, self.job1, self.job2, self.job3) = utils.create_test_jobs()
def test_make_jobs_ready_simple(self):
# a new set of jobs, only the first one that doesn't have dependencies is ready
self.create_jobs()
self.set_counts()
self.job0.event.make_jobs_ready()
self.compare_counts(ready=1)
self.job_compare(j0_ready=True)
def test_make_jobs_ready_done(self):
# all the jobs are complete
self.create_jobs()
for j in models.Job.objects.all():
j.complete = True
j.active = True
j.save()
self.set_counts()
self.job0.event.make_jobs_ready()
self.compare_counts(num_events_completed=1)
def test_make_jobs_ready_first_failed(self):
# first one failed so jobs that depend on it
# shouldn't be marked as ready
self.create_jobs()
self.job0.status = models.JobStatus.FAILED
self.job0.complete = True
self.job0.save()
self.set_counts()
self.job0.event.make_jobs_ready()
self.compare_counts(num_events_completed=True) # None of the other jobs can run so event is complete
self.job_compare()
def test_make_jobs_ready_first_passed(self):
# first one passed so jobs that depend on it
# should be marked as ready
self.create_jobs()
self.job0.status = models.JobStatus.FAILED_OK
self.job0.complete = True
self.job0.save()
self.set_counts()
self.job0.event.make_jobs_ready()
self.compare_counts(ready=2)
self.job_compare(j1_ready=True, j2_ready=True)
def test_make_jobs_ready_running(self):
# a failed job, but running jobs keep going
self.create_jobs()
self.job0.status = models.JobStatus.FAILED_OK
self.job0.complete = True
self.job0.save()
self.job1.status = models.JobStatus.FAILED
self.job1.complete = True
self.job1.save()
self.set_counts()
self.job0.event.make_jobs_ready()
self.compare_counts(ready=1)
self.job_compare(j2_ready=True)
# make sure calling it again doesn't change things
self.set_counts()
self.job0.event.make_jobs_ready()
self.compare_counts()
self.job_compare(j2_ready=True)
def test_make_jobs_ready_last_dep(self):
# make sure multiple dependencies work
self.create_jobs()
self.job0.status = models.JobStatus.FAILED_OK
self.job0.complete = True
self.job0.ready = True
self.job0.save()
self.job1.status = models.JobStatus.SUCCESS
self.job1.complete = True
self.job1.ready = True
self.job1.save()
self.set_counts()
self.job0.event.make_jobs_ready()
self.compare_counts(ready=1)
self.job_compare(j0_ready=True, j1_ready=True, j2_ready=True)
self.job2.status = models.JobStatus.SUCCESS
self.job2.complete = True
self.job2.save()
self.set_counts()
self.job0.event.make_jobs_ready()
self.compare_counts(ready=1)
self.job_compare(j0_ready=True, j1_ready=True, j2_ready=True, j3_ready=True)
def test_event_odd_deps(self):
"""
Had the scenario where we have:
Precheck -> Test:linux, Test:clang -> Merge
where Test had 2 build configs.
But the merge recipe had a depends_on with an outdated
recipe
make_jobs_ready started the merge without waiting for the
two Test jobs to finish
"""
e = utils.create_event()
e.cause = models.Event.PUSH
e.save()
r0 = utils.create_recipe(name='precheck')
r1 = utils.create_recipe(name='test')
r2 = utils.create_recipe(name='merge')
r3 = utils.create_recipe(name='test')
# These two need to have the same filename
r1.filename = "my filename"
r1.save()
r3.filename = r1.filename
r3.save()
r1.build_configs.add(utils.create_build_config("Otherconfig"))
utils.create_recipe_dependency(recipe=r1 , depends_on=r0)
utils.create_recipe_dependency(recipe=r2, depends_on=r3)
j0 = utils.create_job(recipe=r0, event=e)
j1a = utils.create_job(recipe=r1, event=e, config=r1.build_configs.first())
j1b = utils.create_job(recipe=r1, event=e, config=r1.build_configs.last())
j2 = utils.create_job(recipe=r2, event=e)
self.set_counts()
e.make_jobs_ready()
self.compare_counts(ready=1)
j0.refresh_from_db()
j1a.refresh_from_db()
j1b.refresh_from_db()
j2.refresh_from_db()
self.assertEqual(j0.ready, True)
self.assertEqual(j1a.ready, False)
self.assertEqual(j1b.ready, False)
self.assertEqual(j2.ready, False)
j0.complete = True
j0.status = models.JobStatus.SUCCESS
j0.save()
self.set_counts()
e.make_jobs_ready()
self.compare_counts(ready=2)
j0.refresh_from_db()
j1a.refresh_from_db()
j1b.refresh_from_db()
j2.refresh_from_db()
self.assertEqual(j0.ready, True)
self.assertEqual(j1a.ready, True)
self.assertEqual(j1b.ready, True)
self.assertEqual(j2.ready, False)
j1a.complete = True
j1a.status = models.JobStatus.SUCCESS
j1a.save()
self.set_counts()
e.make_jobs_ready()
self.compare_counts()
j1b.complete = True
j1b.status = models.JobStatus.SUCCESS
j1b.save()
self.set_counts()
e.make_jobs_ready()
self.compare_counts(ready=1)
j2.refresh_from_db()
self.assertEqual(j2.ready, True)
def test_event_status_incomplete(self):
self.create_jobs()
# All jobs are NOT_STARTED
ev = self.job0.event
self.assertEqual(ev.jobs.count(), 4)
ev.set_status()
self.assertEqual(ev.status, models.JobStatus.NOT_STARTED)
self.assertEqual(ev.base.branch.status, models.JobStatus.NOT_STARTED)
# 1 SUCCESS but not all of them
self.job0.status = models.JobStatus.SUCCESS
self.job0.save()
ev.set_status()
self.assertEqual(ev.status, models.JobStatus.RUNNING)
self.assertEqual(ev.base.branch.status, models.JobStatus.RUNNING)
self.job1.status = models.JobStatus.FAILED
self.job1.save()
ev.set_status()
self.assertEqual(ev.status, models.JobStatus.RUNNING)
self.job2.status = models.JobStatus.ACTIVATION_REQUIRED
self.job2.save()
self.job3.status = models.JobStatus.ACTIVATION_REQUIRED
self.job3.save()
ev.set_status()
self.assertEqual(ev.status, models.JobStatus.ACTIVATION_REQUIRED)
self.assertEqual(ev.base.branch.status, models.JobStatus.ACTIVATION_REQUIRED)
self.job2.status = models.JobStatus.RUNNING
self.job2.save()
ev.set_status()
self.assertEqual(ev.status, models.JobStatus.RUNNING)
self.assertEqual(ev.base.branch.status, models.JobStatus.RUNNING)
# try again with on a pull request event
ev.pull_request = utils.create_pr()
ev.save()
self.assertEqual(ev.pull_request.status, models.JobStatus.NOT_STARTED)
ev.set_status()
self.assertEqual(ev.status, models.JobStatus.RUNNING)
self.assertEqual(ev.pull_request.status, models.JobStatus.RUNNING)
def test_event_status_complete(self):
self.create_jobs()
# All jobs are NOT_STARTED
ev = self.job0.event
self.assertEqual(ev.jobs.count(), 4)
ev.set_complete()
self.assertEqual(ev.status, models.JobStatus.NOT_STARTED)
# 1 SUCCESS but none of them are ready
self.job0.status = models.JobStatus.SUCCESS
self.job0.complete = True
self.job0.save()
self.job1.complete = True
self.job1.save()
self.job2.complete = True
self.job2.save()
self.job3.complete = True
self.job3.save()
ev.set_complete()
ev.refresh_from_db()
self.assertEqual(ev.status, models.JobStatus.SUCCESS)
self.assertEqual(ev.base.branch.status, models.JobStatus.SUCCESS)
# 1 SUCCESS, 1 CANCELED
self.job1.status = models.JobStatus.CANCELED
self.job1.save()
ev.set_complete()
ev.refresh_from_db()
self.assertEqual(ev.status, models.JobStatus.CANCELED)
self.assertEqual(ev.base.branch.status, models.JobStatus.CANCELED)
# 1 SUCCESS, 1 CANCELED, 1 FAILED_OK
self.job2.status = models.JobStatus.FAILED_OK
self.job2.save()
ev.set_complete()
ev.refresh_from_db()
self.assertEqual(ev.status, models.JobStatus.CANCELED)
self.assertEqual(ev.base.branch.status, models.JobStatus.CANCELED)
# 1 SUCCESS, 1 CANCELED, 1 FAILED_OK, 1 FAILED
self.job3.status = models.JobStatus.FAILED
self.job3.save()
ev.set_complete()
ev.refresh_from_db()
# Since jobs are j0 -> j1,j2 ->j3 j3 is unrunnable
# and not counted
self.assertEqual(ev.status, models.JobStatus.CANCELED)
self.assertEqual(ev.base.branch.status, models.JobStatus.CANCELED)
# 2 SUCCESS, 1 FAILED_OK, 1 FAILED
self.job1.status = models.JobStatus.SUCCESS
self.job1.save()
ev.set_complete()
ev.refresh_from_db()
self.assertEqual(ev.status, models.JobStatus.FAILED)
self.assertEqual(ev.base.branch.status, models.JobStatus.FAILED)
# 2 SUCCESS, 1 FAILED_OK, 1 RUNNING
self.job3.status = models.JobStatus.RUNNING
self.job3.save()
ev.set_complete()
ev.refresh_from_db()
self.assertEqual(ev.status, models.JobStatus.RUNNING)
self.assertEqual(ev.base.branch.status, models.JobStatus.RUNNING)
# 3 SUCCESS, 1 FAILED_OK
self.job3.status = models.JobStatus.SUCCESS
self.job3.save()
ev.set_complete()
ev.refresh_from_db()
self.assertEqual(ev.status, models.JobStatus.FAILED_OK)
self.assertEqual(ev.base.branch.status, models.JobStatus.FAILED_OK)
# try again with on a pull request event
ev.pull_request = utils.create_pr()
ev.save()
self.assertEqual(ev.pull_request.status, models.JobStatus.NOT_STARTED)
ev.set_complete()
self.assertEqual(ev.status, models.JobStatus.FAILED_OK)
self.assertEqual(ev.pull_request.status, models.JobStatus.FAILED_OK)
def test_cancel_event(self):
ev = utils.create_event()
jobs = []
for i in range(3):
r = utils.create_recipe(name="recipe %s" % i, user=ev.build_user)
j = utils.create_job(recipe=r, event=ev, user=ev.build_user)
jobs.append(j)
msg = "Test cancel"
self.set_counts()
event.cancel_event(ev, msg)
# The status on the branch should get updated
self.compare_counts(canceled=3,
events_canceled=1,
num_changelog=3,
num_jobs_completed=3,
num_events_completed=1,
active_branches=1)
ev.refresh_from_db()
self.assertEqual(ev.status, models.JobStatus.CANCELED)
self.assertEqual(ev.complete, True)
for j in jobs:
j.refresh_from_db()
self.assertEqual(j.status, models.JobStatus.CANCELED)
self.assertTrue(j.complete)
def test_get_active_labels(self):
with self.settings(INSTALLED_GITSERVERS=[utils.github_config(recipe_label_activation=utils.default_labels())]):
all_docs = ["docs/foo", "docs/bar", "docs/foobar"]
some_docs = all_docs[:] + ["tutorials/foo", "tutorials/bar"]
matched, match_all = event.get_active_labels(self.repo, all_docs)
self.assertEqual(matched, ["DOCUMENTATION"])
self.assertEqual(match_all, True)
matched, match_all = event.get_active_labels(self.repo, some_docs)
self.assertEqual(matched, ["DOCUMENTATION", "TUTORIAL"])
self.assertEqual(match_all, False)
# No labels are configured
other_docs = ["common/foo", "common/bar"]
matched, match_all = event.get_active_labels(self.repo, other_docs)
self.assertEqual(matched, [])
self.assertEqual(match_all, True)
# One of the labels matches all the files
labels = {"LABEL0": "^common", "LABEL1": "^common/no_exist"}
with self.settings(INSTALLED_GITSERVERS=[utils.github_config(recipe_label_activation=labels)]):
matched, match_all = event.get_active_labels(self.repo, other_docs)
self.assertEqual(matched, ["LABEL0"])
self.assertEqual(match_all, True)
# One of the labels matches but not all the files
labels = {"LABEL0": "^common/foo", "LABEL1": "^common/no_exist"}
with self.settings(INSTALLED_GITSERVERS=[utils.github_config(recipe_label_activation=labels)]):
matched, match_all = event.get_active_labels(self.repo, other_docs)
self.assertEqual(matched, ["LABEL0"])
self.assertEqual(match_all, False)
# Old syntax is no longer supported
with self.settings(INSTALLED_GITSERVERS=[utils.github_config(recipe_label_activation_additive=["ADDITIVE"])]):
matched, match_all = event.get_active_labels(self.repo, other_docs)
self.assertEqual(matched, [])
self.assertEqual(match_all, True)
# Anything that matches an additive label automatically sets matched_all to false
labels = {"ADDITIVE": "^common/foo"}
with self.settings(INSTALLED_GITSERVERS=[utils.github_config(recipe_label_activation_additive=labels)]):
matched, match_all = event.get_active_labels(self.repo, other_docs)
self.assertEqual(matched, ["ADDITIVE"])
self.assertEqual(match_all, False)
# A normal label matches everything but the additive label also matches
labels = {"LABEL": "^common/"}
add_labels = {"ADDITIVE": "^common/foo"}
git_config = utils.github_config(recipe_label_activation_additive=add_labels, recipe_label_activation=labels)
with self.settings(INSTALLED_GITSERVERS=[git_config]):
matched, match_all = event.get_active_labels(self.repo, other_docs)
self.assertEqual(matched, ["ADDITIVE", "LABEL"])
self.assertEqual(match_all, False)
|
|
# coding: utf-8
"""
Classes and helpers to describe and work with systematic shifts.
"""
__all__ = ["Shift"]
import scinum as sn
from order.unique import UniqueObject
from order.mixins import CopyMixin, AuxDataMixin, TagMixin, LabelMixin
from order.util import typed
class Shift(UniqueObject, CopyMixin, AuxDataMixin, TagMixin, LabelMixin):
"""
Description of a systematic shift.
**Arguments**
The shift *name* should either be ``"nominal"`` or it should have the format
``"<source>_<direction>"`` where *direction* is either ``"up"`` or ``"down"``. *type* describes
the shift's effect, which is either only rate-changing (*RATE*) or also shape-changing
(*SHAPE*). When *None*, *UNKNOWN* is used.
*label* and *label_short* are forwarded to the :py:class:`~order.mixins.LabelMixin`, *tags* to
the :py:class:`~order.mixins.TagMixin`, *aux* to the :py:class:`~order.mixins.AuxDataMixin`
*name*, *id* (defaulting to an auto id) and *context* to the
:py:class:`~order.unique.UniqueObject` constructor.
**Copy behavior**
All attributes are copied. Also note the copy behavior of
:py:class:`~order.unique.UniqueObject`'s.
**Example**
.. code-block:: python
import order as od
s = od.Shift("nominal", 1)
s.name
# -> "nominal"
s.is_up
# -> False
s = Shift("pdf_up", 2)
s.source
# -> "pdf"
s.direction
# -> "up"
s.is_up
# -> True
**Members**
.. py:classattribute:: NOMINAL
type: string
Flag denoting a nominal shift (``"nominal"``). Same as
`scinum.Number.NOMINAL <https://scinum.readthedocs.io/en/latest/#scinum.Number.NOMINAL>`__.
.. py:classattribute:: UP
type: string
Flag denoting an up variation (``"up"``). Same as
`scinum.Number.UP <https://scinum.readthedocs.io/en/latest/#scinum.Number.UP>`__.
.. py:classattribute:: DOWN
type: string
Flag denoting a down variation (``"down"``). Same as
`scinum.Number.DOWN <https://scinum.readthedocs.io/en/latest/#scinum.Number.DOWN>`__.
.. py:classattribute:: RATE
type: string
Flag denoting a rate-changing effect (``"rate"``).
.. py:classattribute:: SHAPE
type: string
Flag denoting a shape-changing effect (``"shape"``).
.. py:classattribute:: RATE_SHAPE
type: string
Flag denoting a both rate- and shape-changing effect (``"rate_shape"``).
.. py:attribute:: source
type: string
read-only
The source of this shift, e.g. *NOMINAL*, ``"pdf"``, etc.
.. py:attribute:: direction
type: string
read-only
The direction of this shift, either *NOMINAL*, *UP* or *DOWN*.
.. py:attribute:: type
type: string
The type of this shift, either *RATE*, *SHAPE* or *RATE_SHAPE*.
.. py:attribute:: is_nominal
type: bool
read-only
Flag denoting if the shift is nominal.
.. py:attribute:: is_up
type: bool
read-only
Flag denoting if the shift direction is *UP*.
.. py:attribute:: is_down
type: bool
read-only
Flag denoting if the shift direction is *DOWN*.
.. py:attribute:: is_rate
type: bool
read-only
Flag denoting if the shift type is rate-changing only.
.. py:attribute:: is_shape
type: bool
read-only
Flag denoting if the shift type is shape-changing only.
.. py:attribute:: is_rate_shape
type: bool
read-only
Flag denoting if the shift type is rate- and shape-changing.
"""
# nominal flag
NOMINAL = sn.Number.NOMINAL
# shift directions
UP = sn.Number.UP
DOWN = sn.Number.DOWN
# shift types
RATE = "rate"
SHAPE = "shape"
RATE_SHAPE = "rate_shape"
# attributes for copying
copy_specs = ["type"] + UniqueObject.copy_specs + AuxDataMixin.copy_specs + \
TagMixin.copy_specs + LabelMixin.copy_specs
@classmethod
def split_name(cls, name):
"""
Splits a shift *name* into its source and direction. If *name* is *NOMINAL*, both source
and direction will be *NOMINAL*. Example:
.. code-block:: python
split_name("nominal") # -> ("nominal", "nominal")
split_name("pdf_up") # -> ("pdf", "up")
split_name("pdfup") # -> ValueError: invalid shift name format: pdfup
"""
if name is None:
return (None, None)
elif name == cls.NOMINAL:
return (cls.NOMINAL, cls.NOMINAL)
elif "_" in name:
source, direction = tuple(name.rsplit("_", 1))
if source == cls.NOMINAL:
raise ValueError("pointless nominal shift name: {}".format(name))
elif direction not in (cls.UP, cls.DOWN):
raise ValueError("invalid shift direction: {}".format(direction))
else:
return (source, direction)
else:
raise ValueError("invalid shift name format: {}".format(name))
@classmethod
def join_name(cls, source, direction):
"""
Joins a shift *source* and a shift *direction* to return a shift name. If either *source* or
*direction* is *None*, *None* is returned. If *source* is *NOMINAL*, *direction* must be
*NOMINAL* as well. Otherwise, *direction* must be either *UP* or *DOWN*. Example:
.. code-block:: python
join_name("nominal", "nominal") # -> "nominal"
join_name("nominal", "up") # -> ValueError: pointless nominal shift direction
join_name("pdf", "up") # -> "pdf_up"
join_name("pdf", "high") # -> ValueError: invalid shift direction
"""
if source == cls.NOMINAL:
if direction != cls.NOMINAL:
raise ValueError("pointless nominal shift direction: {}".format(direction))
else:
return cls.NOMINAL
elif direction in (cls.UP, cls.DOWN):
return "{}_{}".format(source, direction)
else:
raise ValueError("unknown shift direction: {}".format(direction))
def __init__(self, name, id, type=None, label=None, label_short=None, tags=None, aux=None,
context=None):
UniqueObject.__init__(self, name, id, context=context)
CopyMixin.__init__(self)
AuxDataMixin.__init__(self, aux=aux)
TagMixin.__init__(self, tags=tags)
LabelMixin.__init__(self, label=label, label_short=label_short)
# register empty attributes
self._source = None
self._direction = None
self._type = self.RATE_SHAPE
# set initial values
self._source, self._direction = self.split_name(self.name)
if type is not None:
self.type = type
@property
def source(self):
# source getter
return self._source
@property
def direction(self):
# direction getter
return self._direction
@typed
def type(self, type):
# type parser
if type not in (self.RATE, self.SHAPE, self.RATE_SHAPE):
raise ValueError("unknown type: {}".format(type))
return type
@property
def is_nominal(self):
# is_nominal getter
return self.name == self.NOMINAL
@property
def is_up(self):
# is_up getter
return self.direction == self.UP
@property
def is_down(self):
# is_down getter
return self.direction == self.DOWN
@property
def is_rate(self):
# is_rate getter
return self.type == self.RATE
@property
def is_shape(self):
# is_shape getter
return self.type == self.SHAPE
@property
def is_rate_shape(self):
# is_rate_shape getter
return self.type == self.RATE_SHAPE
|
|
from __future__ import absolute_import
from django import forms
from django.conf import settings
from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm, \
PasswordResetForm
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db.models.query import QuerySet
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from zerver.lib.actions import do_change_password, is_inactive, user_email_is_unique
from zerver.lib.name_restrictions import is_reserved_subdomain, is_disposable_domain
from zerver.lib.utils import get_subdomain, check_subdomain
from zerver.models import Realm, get_user_profile_by_email, UserProfile, \
completely_open, get_realm, get_realm_by_email_domain, get_realm_by_string_id, \
get_unique_open_realm, email_to_domain, email_allowed_for_realm
from zproject.backends import password_auth_enabled
import logging
import re
import DNS
from six import text_type
from typing import Any, Callable, Optional
MIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \
u'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \
u'If you want to sign up an alias for Zulip, ' + \
u'<a href="mailto:[email protected]">contact us</a>.'
WRONG_SUBDOMAIN_ERROR = "Your Zulip account is not a member of the " + \
"organization associated with this subdomain. " + \
"Please contact %s with any questions!" % (settings.ZULIP_ADMINISTRATOR,)
def get_registration_string(domain):
# type: (text_type) -> text_type
register_url = reverse('register') + domain
register_account_string = _('The organization with the domain already exists. '
'Please register your account <a href=%(url)s>here</a>.') % {'url': register_url}
return register_account_string
def email_is_not_mit_mailing_list(email):
# type: (text_type) -> None
"""Prevent MIT mailing lists from signing up for Zulip"""
if "@mit.edu" in email:
username = email.rsplit("@", 1)[0]
# Check whether the user exists and can get mail.
try:
DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT)
except DNS.Base.ServerError as e:
if e.rcode == DNS.Status.NXDOMAIN:
raise ValidationError(mark_safe(MIT_VALIDATION_ERROR))
else:
raise
class RegistrationForm(forms.Form):
full_name = forms.CharField(max_length=100)
# The required-ness of the password field gets overridden if it isn't
# actually required for a realm
password = forms.CharField(widget=forms.PasswordInput, max_length=100,
required=False)
realm_name = forms.CharField(max_length=100, required=False)
realm_subdomain = forms.CharField(max_length=40, required=False)
realm_org_type = forms.ChoiceField(((Realm.COMMUNITY, 'Community'),
(Realm.CORPORATE, 'Corporate')), \
initial=Realm.COMMUNITY, required=False)
if settings.TERMS_OF_SERVICE:
terms = forms.BooleanField(required=True)
def clean_realm_subdomain(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS:
error_strings = {
'too short': _("Subdomain needs to have length 3 or greater."),
'extremal dash': _("Subdomain cannot start or end with a '-'."),
'bad character': _("Subdomain can only have lowercase letters, numbers, and '-'s."),
'unavailable': _("Subdomain unavailable. Please choose a different one.")}
else:
error_strings = {
'too short': _("Short name needs at least 3 characters."),
'extremal dash': _("Short name cannot start or end with a '-'."),
'bad character': _("Short name can only have lowercase letters, numbers, and '-'s."),
'unavailable': _("Short name unavailable. Please choose a different one.")}
subdomain = self.cleaned_data['realm_subdomain']
if not subdomain:
return ''
if len(subdomain) < 3:
raise ValidationError(error_strings['too short'])
if subdomain[0] == '-' or subdomain[-1] == '-':
raise ValidationError(error_strings['extremal dash'])
if not re.match('^[a-z0-9-]*$', subdomain):
raise ValidationError(error_strings['bad character'])
if is_reserved_subdomain(subdomain) or \
get_realm_by_string_id(subdomain) is not None:
raise ValidationError(error_strings['unavailable'])
return subdomain
class ToSForm(forms.Form):
terms = forms.BooleanField(required=True)
class HomepageForm(forms.Form):
# This form is important because it determines whether users can
# register for our product. Be careful when modifying the
# validators.
email = forms.EmailField(validators=[is_inactive,])
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.string_id = kwargs.get("string_id")
if "string_id" in kwargs:
del kwargs["string_id"]
super(HomepageForm, self).__init__(*args, **kwargs)
def clean_email(self):
# type: () -> str
"""Returns the email if and only if the user's email address is
allowed to join the realm they are trying to join."""
email = self.cleaned_data['email']
if get_unique_open_realm():
return email
# Otherwise, the user is trying to join a specific realm.
realm = None
if self.string_id:
realm = get_realm_by_string_id(self.string_id)
elif not settings.REALMS_HAVE_SUBDOMAINS:
realm = get_realm_by_email_domain(email)
if realm is None:
if settings.REALMS_HAVE_SUBDOMAINS:
raise ValidationError(_("The organization you are trying to join does not exist."))
else:
raise ValidationError(_("Your email address does not correspond to any existing organization."))
if realm.invite_required:
raise ValidationError(_("Please request an invite from the organization administrator."))
if not email_allowed_for_realm(email, realm):
raise ValidationError(
_("The organization you are trying to join, %(string_id)s, only allows users with e-mail "
"addresses within the organization. Please try a different e-mail address."
% {'string_id': realm.string_id}))
if realm.is_zephyr_mirror_realm:
email_is_not_mit_mailing_list(email)
return email
def email_is_not_disposable(email):
# type: (text_type) -> None
if is_disposable_domain(email_to_domain(email)):
raise ValidationError(_("Please use your real email address."))
class RealmCreationForm(forms.Form):
# This form determines whether users can
# create a new realm. Be careful when modifying the
# validators.
email = forms.EmailField(validators=[user_email_is_unique, email_is_not_disposable])
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.domain = kwargs.get("domain")
if "domain" in kwargs:
del kwargs["domain"]
super(RealmCreationForm, self).__init__(*args, **kwargs)
class LoggingSetPasswordForm(SetPasswordForm):
def save(self, commit=True):
# type: (bool) -> UserProfile
do_change_password(self.user, self.cleaned_data['new_password1'],
log=True, commit=commit)
return self.user
class ZulipPasswordResetForm(PasswordResetForm):
def get_users(self, email):
# type: (str) -> QuerySet
"""Given an email, return matching user(s) who should receive a reset.
This is modified from the original in that it allows non-bot
users who don't have a usable password to reset their
passwords.
"""
if not password_auth_enabled:
logging.info("Password reset attempted for %s even though password auth is disabled." % (email,))
return []
result = UserProfile.objects.filter(email__iexact=email, is_active=True,
is_bot=False)
if len(result) == 0:
logging.info("Password reset attempted for %s; no active account." % (email,))
return result
class CreateUserForm(forms.Form):
full_name = forms.CharField(max_length=100)
email = forms.EmailField()
class OurAuthenticationForm(AuthenticationForm):
def clean_username(self):
# type: () -> str
email = self.cleaned_data['username']
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return email
if user_profile.realm.deactivated:
error_msg = u"""Sorry for the trouble, but %s has been deactivated.
Please contact %s to reactivate this group.""" % (
user_profile.realm.name,
settings.ZULIP_ADMINISTRATOR)
raise ValidationError(mark_safe(error_msg))
if not check_subdomain(get_subdomain(self.request), user_profile.realm.subdomain):
logging.warning("User %s attempted to password login to wrong subdomain %s" %
(user_profile.email, get_subdomain(self.request)))
raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR))
return email
|
|
import logging as logger
import os
from django.apps.registry import AppRegistryNotReady
from django.core.management import call_command
from django.http.response import Http404
from django.utils.translation import gettext_lazy as _
from iceqube.common.classes import State
from iceqube.exceptions import UserCancelledError
from rest_framework import serializers
from rest_framework import viewsets
from rest_framework.decorators import list_route
from rest_framework.response import Response
from six import string_types
from sqlalchemy.orm.exc import NoResultFound
from .client import get_client
from kolibri.content.permissions import CanManageContent
from kolibri.content.utils.channels import get_mounted_drives_with_channel_info
from kolibri.content.utils.paths import get_content_database_file_path
from kolibri.utils import conf
try:
from django.apps import apps
apps.check_apps_ready()
except AppRegistryNotReady:
import django
django.setup()
logging = logger.getLogger(__name__)
NETWORK_ERROR_STRING = _("There was a network error.")
DISK_IO_ERROR_STRING = _("There was a disk access error.")
CATCHALL_SERVER_ERROR_STRING = _("There was an unknown error.")
class TasksViewSet(viewsets.ViewSet):
permission_classes = (CanManageContent,)
def list(self, request):
jobs_response = [_job_to_response(j) for j in get_client().all_jobs()]
return Response(jobs_response)
def create(self, request):
# unimplemented. Call out to the task-specific APIs for now.
pass
def retrieve(self, request, pk=None):
try:
task = _job_to_response(get_client().status(pk))
return Response(task)
except NoResultFound:
raise Http404('Task with {pk} not found'.format(pk=pk))
def destroy(self, request, pk=None):
# unimplemented for now.
pass
@list_route(methods=["post"])
def startremotechannelimport(self, request):
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
baseurl = request.data.get("baseurl", conf.OPTIONS['Urls']['CENTRAL_CONTENT_BASE_URL'])
job_metadata = {
"type": "REMOTECHANNELIMPORT",
"started_by": request.user.pk,
}
job_id = get_client().schedule(
call_command,
"importchannel",
"network",
channel_id,
baseurl=baseurl,
extra_metadata=job_metadata,
cancellable=True,
)
resp = _job_to_response(get_client().status(job_id))
return Response(resp)
@list_route(methods=["post"])
def startremotecontentimport(self, request):
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
# optional arguments
baseurl = request.data.get("base_url", conf.OPTIONS['Urls']['CENTRAL_CONTENT_BASE_URL'])
node_ids = request.data.get("node_ids", None)
exclude_node_ids = request.data.get("exclude_node_ids", None)
if node_ids and not isinstance(node_ids, list):
raise serializers.ValidationError("node_ids must be a list.")
if exclude_node_ids and not isinstance(exclude_node_ids, list):
raise serializers.ValidationError("exclude_node_ids must be a list.")
job_metadata = {
"type": "REMOTECONTENTIMPORT",
"started_by": request.user.pk,
}
job_id = get_client().schedule(
call_command,
"importcontent",
"network",
channel_id,
base_url=baseurl,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
extra_metadata=job_metadata,
track_progress=True,
cancellable=True,
)
resp = _job_to_response(get_client().status(job_id))
return Response(resp)
@list_route(methods=["post"])
def startdiskchannelimport(self, request):
# Load the required parameters
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
try:
drive_id = request.data["drive_id"]
except KeyError:
raise serializers.ValidationError("The drive_id field is required.")
try:
drives = get_mounted_drives_with_channel_info()
drive = drives[drive_id]
except KeyError:
raise serializers.ValidationError("That drive_id was not found in the list of drives.")
job_metadata = {
"type": "DISKCHANNELIMPORT",
"started_by": request.user.pk,
}
job_id = get_client().schedule(
call_command,
"importchannel",
"disk",
channel_id,
drive.datafolder,
extra_metadata=job_metadata,
cancellable=True,
)
resp = _job_to_response(get_client().status(job_id))
return Response(resp)
@list_route(methods=["post"])
def startdiskcontentimport(self, request):
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
try:
drive_id = request.data["drive_id"]
except KeyError:
raise serializers.ValidationError("The drive_id field is required.")
try:
drives = get_mounted_drives_with_channel_info()
drive = drives[drive_id]
except KeyError:
raise serializers.ValidationError("That drive_id was not found in the list of drives.")
# optional arguments
node_ids = request.data.get("node_ids", None)
exclude_node_ids = request.data.get("exclude_node_ids", None)
if node_ids and not isinstance(node_ids, list):
raise serializers.ValidationError("node_ids must be a list.")
if exclude_node_ids and not isinstance(exclude_node_ids, list):
raise serializers.ValidationError("exclude_node_ids must be a list.")
job_metadata = {
"type": "DISKCONTENTIMPORT",
"started_by": request.user.pk,
}
job_id = get_client().schedule(
call_command,
"importcontent",
"disk",
channel_id,
drive.datafolder,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
extra_metadata=job_metadata,
track_progress=True,
cancellable=True,
)
resp = _job_to_response(get_client().status(job_id))
return Response(resp)
@list_route(methods=['post'])
def startdeletechannel(self, request):
'''
Delete a channel and all its associated content from the server
'''
if "channel_id" not in request.data:
raise serializers.ValidationError(
"The 'channel_id' field is required.")
channel_id = request.data['channel_id']
job_metadata = {
"type": "DELETECHANNEL",
"started_by": request.user.pk,
}
task_id = get_client().schedule(
call_command,
"deletechannel",
channel_id,
track_progress=True,
extra_metadata=job_metadata,
)
# attempt to get the created Task, otherwise return pending status
resp = _job_to_response(get_client().status(task_id))
return Response(resp)
@list_route(methods=['post'])
def startdiskexport(self, request):
'''
Export a channel to a local drive, and copy content to the drive.
'''
# Load the required parameters
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
try:
drive_id = request.data["drive_id"]
except KeyError:
raise serializers.ValidationError("The drive_id field is required.")
# optional arguments
node_ids = request.data.get("node_ids", None)
exclude_node_ids = request.data.get("exclude_node_ids", None)
if node_ids and not isinstance(node_ids, list):
raise serializers.ValidationError("node_ids must be a list.")
if exclude_node_ids and not isinstance(exclude_node_ids, list):
raise serializers.ValidationError("exclude_node_ids must be a list.")
job_metadata = {
"type": "DISKEXPORT",
"started_by": request.user.pk,
}
task_id = get_client().schedule(
_localexport,
channel_id,
drive_id,
track_progress=True,
cancellable=True,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
extra_metadata=job_metadata,
)
# attempt to get the created Task, otherwise return pending status
resp = _job_to_response(get_client().status(task_id))
return Response(resp)
@list_route(methods=['post'])
def canceltask(self, request):
'''
Cancel a task with its task id given in the task_id parameter.
'''
if 'task_id' not in request.data:
raise serializers.ValidationError(
"The 'task_id' field is required.")
if not isinstance(request.data['task_id'], string_types):
raise serializers.ValidationError(
"The 'task_id' should be a string.")
try:
get_client().cancel(request.data['task_id'])
except NoResultFound:
pass
get_client().clear(force=True)
return Response({})
@list_route(methods=['post'])
def cleartasks(self, request):
'''
Cancels all running tasks.
'''
get_client().clear(force=True)
return Response({})
@list_route(methods=['get'])
def localdrive(self, request):
drives = get_mounted_drives_with_channel_info()
# make sure everything is a dict, before converting to JSON
assert isinstance(drives, dict)
out = [mountdata._asdict() for mountdata in drives.values()]
return Response(out)
def _localexport(channel_id, drive_id, update_progress=None, check_for_cancel=None, node_ids=None, exclude_node_ids=None, extra_metadata=None):
drives = get_mounted_drives_with_channel_info()
drive = drives[drive_id]
call_command(
"exportchannel",
channel_id,
drive.datafolder,
update_progress=update_progress,
check_for_cancel=check_for_cancel)
try:
call_command(
"exportcontent",
channel_id,
drive.datafolder,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
update_progress=update_progress,
check_for_cancel=check_for_cancel)
except UserCancelledError:
try:
os.remove(get_content_database_file_path(channel_id, datafolder=drive.datafolder))
except OSError:
pass
raise
def _job_to_response(job):
if not job:
return {
"type": None,
"started_by": None,
"status": State.SCHEDULED,
"percentage": 0,
"progress": [],
"id": None,
"cancellable": False,
}
else:
return {
"type": getattr(job, "extra_metadata", {}).get("type"),
"started_by": getattr(job, "extra_metadata", {}).get("started_by"),
"status": job.state,
"exception": str(job.exception),
"traceback": str(job.traceback),
"percentage": job.percentage_progress,
"id": job.job_id,
"cancellable": job.cancellable,
}
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
Spartan 6 bitstream analyzer tool.
This script reads a Spartan6 bitstream and prints out some useful information.
It can also create a frames file with the configuration data words.
The bitstream is analyzed word by word and interpreted according to
the UG380 Configuration User Guide.
The tool can be used to derive the initialization, startup and finalization
sequence as well as the configuration data. The latter is written to a frames
file which can be used by the bitstream tools such as frames2bit to generate
a valid bitstream.
'''
import argparse
from io import StringIO
conf_regs = {
0: "CRC",
1: "FAR_MAJ",
2: "FAR_MIN",
3: "FDRI",
4: "FDRO",
5: "CMD",
6: "CTL",
7: "MASK",
8: "STAT",
9: "LOUT",
10: "COR1",
11: "COR2",
12: "PWRDN_REG",
13: "FLR",
14: "IDCODE",
15: "CWDT",
16: "HC_OPT_REG",
18: "CSBO",
19: "GENERAL1",
20: "GENERAL2",
21: "GENERAL3",
22: "GENERAL4",
23: "GENERAL5",
24: "MODE_REG",
25: "PU_GWE",
26: "PU_GTS",
27: "MFWR",
28: "CCLK_FREQ",
29: "SEU_OPT",
30: "EXP_SIGN",
31: "RDBK_SIGN",
32: "BOOTSTS",
33: "EYE_MASK",
34: "CBC_REG"
}
cmd_reg_codes = {
0: "NULL",
1: "WCFG",
2: "MFW",
3: "LFRM",
4: "RCFG",
5: "START",
7: "RCRC",
8: "AGHIGH",
10: "GRESTORE",
11: "SHUTDOWN",
13: "DESYNC",
14: "IPROG"
}
opcodes = ("NOP", "READ", "WRITE", "UNKNOWN")
def KnuthMorrisPratt(text, pattern):
'''
Yields all starting positions of copies of the pattern in the text.
Calling conventions are similar to string.find, but its arguments can be
lists or iterators, not just strings, it returns all matches, not just
the first one, and it does not need the whole text in memory at once.
Whenever it yields, it will have read the text exactly up to and including
the match that caused the yield.
'''
# allow indexing into pattern and protect against change during yield
pattern = list(pattern)
# build table of shift amounts
shifts = [1] * (len(pattern) + 1)
shift = 1
for pos in range(len(pattern)):
while shift <= pos and pattern[pos] != pattern[pos - shift]:
shift += shifts[pos - shift]
shifts[pos + 1] = shift
# do the actual search
startPos = 0
matchLen = 0
for c in text:
while matchLen == len(pattern) or \
matchLen >= 0 and pattern[matchLen] != c:
startPos += shifts[matchLen]
matchLen -= shifts[matchLen]
matchLen += 1
if matchLen == len(pattern):
yield startPos
class Bitstream:
def __init__(self, file_name, verbose=False):
self.frame_data = []
self.idcode = 0
self.exp_sign = 0
self.far_min = 0
self.far_maj = 0
self.curr_fdri_write_len = 0
self.curr_crc_check = 0
self.fdri_in_progress = False
with open(file_name, "rb") as f:
self.bytes = f.read()
pos, self.header = self.get_header()
self.body = [
(i << 8) | j
for i, j in zip(self.bytes[pos::2], self.bytes[pos + 1::2])
]
self.parse_bitstream(verbose)
def get_header(self):
pos = next(KnuthMorrisPratt(self.bytes, [0xaa, 0x99, 0x55, 0x66]))
return pos + 4, self.bytes[:pos + 4]
def parse_bitstream(self, verbose):
payload_len = 0
for word in self.body:
if payload_len > 0:
if verbose:
print("\tWord: ", hex(word))
payload_len = self.parse_reg(
reg_addr, word, payload_len, verbose)
continue
else:
packet_header = self.parse_packet_header(word)
opcode = packet_header["opcode"]
reg_addr = packet_header["reg_addr"]
words = packet_header["word_count"]
type = packet_header["type"]
if verbose:
print(
"\tWord: ", hex(word),
'Type: {}, Op: {}, Addr: {}, Words: {}'.format(
type, opcodes[opcode], reg_addr, words))
if opcode and reg_addr in conf_regs:
payload_len = words
continue
def parse_packet_header(self, word):
type = (word >> 13) & 0x7
opcode = (word >> 11) & 0x3
reg_addr = (word >> 5) & 0x3F
if type == 1:
word_count = word & 0x1F
elif type == 2:
word_count = 2
else:
word_count = 0
return {
"type": type,
"opcode": opcode,
"reg_addr": reg_addr,
"word_count": word_count
}
def parse_command(self, word):
return cmd_reg_codes[word]
def parse_cor1(self, word):
return word
def parse_cor2(self, word):
return word
def parse_ctl(self, word):
#decryption
dec = (word >> 6) & 1
#security bits
sb = (word >> 4) & 3
#persist
p = (word >> 3) & 1
#use efuse
efuse = (word >> 2) & 1
#crc extstat disable
crc = (word >> 1) & 1
return {
"decryption": dec,
"security bits": sb,
"pesist": p,
"use efuse": efuse,
"crc extstat disable": crc
}
def parse_cclk_freq(self, word):
ext_mclk = (word >> 14) & 1
mclk_freq = word & 0x3FF
return (ext_mclk, mclk_freq)
def parse_pwrdn(self, word):
en_eyes = (word >> 14) & 1
filter_b = (word >> 5) & 1
en_pgsr = (word >> 4) & 1
en_pwrdn = (word >> 2) & 1
keep_sclk = word & 1
return {
"en_eyes": en_eyes,
"filter_b": filter_b,
"en_pgsr": en_pgsr,
"en_pwrdn": en_pwrdn,
"keep_sclk": keep_sclk
}
def parse_eye_mask(self, word):
return word & 0xFF
def parse_hc_opt(self, word):
return (word >> 6) & 1
def parse_cwdt(self, word):
return word
def parse_pu_gwe(self, word):
return word & 0x3FF
def parse_pu_gts(self, word):
return word & 0x3FF
def parse_mode(self, word):
new_mode = (word >> 13) & 0x1
buswidth = (word >> 11) & 0x3
bootmode = (word >> 8) & 0x7
bootvsel = word & 0xFF
return {
"new_mode": new_mode,
"buswidth": buswidth,
"bootmode": bootmode,
"bootvsel": bootvsel
}
def parse_seu(self, word):
seu_freq = (word >> 4) & 0x3FF
seu_run_on_err = (word >> 3) & 0x1
glut_mask = (word >> 1) & 0x1
seu_enable = word & 0x1
return {
"seu_freq": seu_freq,
"seu_run_on_err": seu_run_on_err,
"glut_mask": glut_mask,
"seu_enable": seu_enable
}
def parse_reg(self, reg_addr, word, payload_len, verbose):
reg = conf_regs[reg_addr]
if reg == "CMD":
command = self.parse_command(word)
if verbose:
print("Command: {}\n".format(command))
elif reg == "FLR":
frame_length = word
if verbose:
print("Frame length: {}\n".format(frame_length))
elif reg == "COR1":
conf_options = self.parse_cor1(word)
if verbose:
print("COR1 options: {}\n".format(conf_options))
elif reg == "COR2":
conf_options = self.parse_cor2(word)
if verbose:
print("COR2 options: {}\n".format(conf_options))
elif reg == "IDCODE":
assert payload_len < 3
if payload_len == 2:
self.idcode = word << 16
elif payload_len == 1:
self.idcode |= word
if verbose:
print("IDCODE: {}\n".format(hex(self.idcode)))
elif reg == "MASK":
mask = word
if verbose:
print("Mask value: {}\n".format(mask))
elif reg == "CTL":
ctl_options = self.parse_ctl(word)
if verbose:
print("CTL options: {}\n".format(ctl_options))
elif reg == "CCLK_FREQ":
cclk_freq_options = self.parse_cclk_freq(word)
if verbose:
print("CCLK_FREQ options: {}\n".format(cclk_freq_options))
elif reg == "PWRDN_REG":
suspend_reg_options = self.parse_pwrdn(word)
if verbose:
print("{} options: {}\n".format(reg, suspend_reg_options))
elif reg == "EYE_MASK":
eye_mask = self.parse_eye_mask(word)
if verbose:
print("{} options: {}\n".format(reg, eye_mask))
elif reg == "HC_OPT_REG":
hc_options = self.parse_hc_opt(word)
if verbose:
print("{} options: {}\n".format(reg, hc_options))
elif reg == "CWDT":
cwdt_options = self.parse_cwdt(word)
if verbose:
print("{} options: {}\n".format(reg, cwdt_options))
elif reg == "PU_GWE":
pu_gwe_sequence = self.parse_pu_gwe(word)
if verbose:
print("{} options: {}\n".format(reg, pu_gwe_sequence))
elif reg == "PU_GTS":
pu_gts_sequence = self.parse_pu_gts(word)
if verbose:
print("{} options: {}\n".format(reg, pu_gts_sequence))
elif reg == "MODE_REG":
mode_options = self.parse_mode(word)
if verbose:
print("{} options: {}\n".format(reg, mode_options))
elif reg == "GENERAL1" or reg == "GENERAL2" \
or reg == "GENERAL3" or reg == "GENERAL4" \
or reg == "GENERAL5":
general_options = word
if verbose:
print("{} options: {}\n".format(reg, general_options))
elif reg == "SEU_OPT":
seu_options = self.parse_seu(word)
if verbose:
print("{} options: {}\n".format(reg, seu_options))
elif reg == "EXP_SIGN":
if payload_len == 2:
self.exp_sign = word << 16
elif payload_len == 1:
self.exp_sign |= word
if verbose:
print("{}: {}\n".format(reg, self.exp_sign))
elif reg == "FAR_MAJ":
if payload_len == 2:
self.current_far_maj = word
elif payload_len == 1:
self.current_far_min = word
if verbose:
print(
"{}: {} FAR_MIN: {}\n".format(
reg, self.far_maj, self.far_min))
elif reg == "FDRI":
if self.fdri_in_progress:
self.frame_data.append(word)
if payload_len == 1:
self.fdri_in_progress = False
return 0
elif payload_len == 2:
self.curr_fdri_write_len = (word & 0xFFF) << 16
elif payload_len == 1:
self.curr_fdri_write_len |= word
self.fdri_in_progress = True
# Check if 0 words actually means read something
payload_len = self.curr_fdri_write_len + 2
if verbose:
print("{}: {}\n".format(reg, self.curr_fdri_write_len))
return payload_len
elif reg == "CRC":
if payload_len == 2:
self.curr_crc_check = (word & 0xFFF) << 16
elif payload_len == 1:
self.curr_crc_check |= word
if verbose:
print("{}: {}\n".format(reg, self.curr_crc_check))
payload_len -= 1
return payload_len
def write_frames_txt(self, file_name):
'''Write frame data in a more readable format'''
frame_stream = StringIO()
for i in range(len(self.frame_data)):
if i % 65 == 0:
frame_stream.write("\nFrame {:4}\n".format(i // 65))
#IOB word
if i % 65 == 32:
frame_stream.write(
"\n#{:3}:{:6}\n".format(i % 65, hex(self.frame_data[i])))
else:
frame_stream.write(
"#{:3}:{:6},".format(i % 65, hex(self.frame_data[i])))
with open(file_name, "w") as f:
print(frame_stream.getvalue(), file=f)
def write_frames(self, file_name):
'''Write configuration data to frames file'''
frame_stream = StringIO()
for i in range(len(self.frame_data)):
if i % 65 == 0:
frame_stream.write("0x{:08x} ".format(i // 65))
frame_stream.write("0x{:04x}".format(self.frame_data[i]))
if i % 65 == 64:
frame_stream.write("\n")
elif i < len(self.frame_data) - 1:
frame_stream.write(",")
with open(file_name, "w") as f:
print(frame_stream.getvalue(), file=f)
def main(args):
verbose = not args.silent
bitstream = Bitstream(args.bitstream, verbose)
print("Frame data length: ", len(bitstream.frame_data))
if args.frames_out:
bitstream.write_frames(args.frames_out)
if verbose:
bitstream.write_frames_txt(args.frames_out + ".txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bitstream', help='Input bitstream')
parser.add_argument('--frames_out', help='Output frames file')
parser.add_argument(
'--silent', help="Don't print analysis details", action='store_true')
args = parser.parse_args()
main(args)
|
|
"""Tests the Home Assistant workday binary sensor."""
from datetime import date
import pytest
import voluptuous as vol
import homeassistant.components.workday.binary_sensor as binary_sensor
from homeassistant.setup import setup_component
from tests.async_mock import patch
from tests.common import assert_setup_component, get_test_home_assistant
FUNCTION_PATH = "homeassistant.components.workday.binary_sensor.get_date"
class TestWorkdaySetup:
"""Test class for workday sensor."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# Set valid default config for test
self.config_province = {
"binary_sensor": {"platform": "workday", "country": "DE", "province": "BW"}
}
self.config_noprovince = {
"binary_sensor": {"platform": "workday", "country": "DE"}
}
self.config_invalidprovince = {
"binary_sensor": {
"platform": "workday",
"country": "DE",
"province": "invalid",
}
}
self.config_state = {
"binary_sensor": {"platform": "workday", "country": "US", "province": "CA"}
}
self.config_nostate = {
"binary_sensor": {"platform": "workday", "country": "US"}
}
self.config_includeholiday = {
"binary_sensor": {
"platform": "workday",
"country": "DE",
"province": "BW",
"workdays": ["holiday"],
"excludes": ["sat", "sun"],
}
}
self.config_example1 = {
"binary_sensor": {
"platform": "workday",
"country": "US",
"workdays": ["mon", "tue", "wed", "thu", "fri"],
"excludes": ["sat", "sun"],
}
}
self.config_example2 = {
"binary_sensor": {
"platform": "workday",
"country": "DE",
"province": "BW",
"workdays": ["mon", "wed", "fri"],
"excludes": ["sat", "sun", "holiday"],
"add_holidays": ["2020-02-24"],
}
}
self.config_remove_holidays = {
"binary_sensor": {
"platform": "workday",
"country": "US",
"workdays": ["mon", "tue", "wed", "thu", "fri"],
"excludes": ["sat", "sun", "holiday"],
"remove_holidays": ["2020-12-25", "2020-11-26"],
}
}
self.config_tomorrow = {
"binary_sensor": {"platform": "workday", "country": "DE", "days_offset": 1}
}
self.config_day_after_tomorrow = {
"binary_sensor": {"platform": "workday", "country": "DE", "days_offset": 2}
}
self.config_yesterday = {
"binary_sensor": {"platform": "workday", "country": "DE", "days_offset": -1}
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_valid_country(self):
"""Test topic name/filter validation."""
# Invalid UTF-8, must not contain U+D800 to U+DFFF
with pytest.raises(vol.Invalid):
binary_sensor.valid_country("\ud800")
with pytest.raises(vol.Invalid):
binary_sensor.valid_country("\udfff")
# Country MUST NOT be empty
with pytest.raises(vol.Invalid):
binary_sensor.valid_country("")
# Country must be supported by holidays
with pytest.raises(vol.Invalid):
binary_sensor.valid_country("HomeAssistantLand")
def test_setup_component_province(self):
"""Set up workday component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_province)
self.hass.block_till_done()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity is not None
# Freeze time to a workday - Mar 15th, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 3, 15))
def test_workday_province(self, mock_date):
"""Test if workdays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_province)
self.hass.block_till_done()
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "on"
# Freeze time to a weekend - Mar 12th, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 3, 12))
def test_weekend_province(self, mock_date):
"""Test if weekends are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_province)
self.hass.block_till_done()
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "off"
# Freeze time to a public holiday in province BW - Jan 6th, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 1, 6))
def test_public_holiday_province(self, mock_date):
"""Test if public holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_province)
self.hass.block_till_done()
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "off"
def test_setup_component_noprovince(self):
"""Set up workday component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_noprovince)
self.hass.block_till_done()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity is not None
# Freeze time to a public holiday in province BW - Jan 6th, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 1, 6))
def test_public_holiday_noprovince(self, mock_date):
"""Test if public holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_noprovince)
self.hass.block_till_done()
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "on"
# Freeze time to a public holiday in state CA - Mar 31st, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 3, 31))
def test_public_holiday_state(self, mock_date):
"""Test if public holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_state)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "off"
# Freeze time to a public holiday in state CA - Mar 31st, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 3, 31))
def test_public_holiday_nostate(self, mock_date):
"""Test if public holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_nostate)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "on"
def test_setup_component_invalidprovince(self):
"""Set up workday component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_invalidprovince)
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity is None
# Freeze time to a public holiday in province BW - Jan 6th, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 1, 6))
def test_public_holiday_includeholiday(self, mock_date):
"""Test if public holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_includeholiday)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "on"
# Freeze time to a saturday to test offset - Aug 5th, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 8, 5))
def test_tomorrow(self, mock_date):
"""Test if tomorrow are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_tomorrow)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "off"
# Freeze time to a saturday to test offset - Aug 5th, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 8, 5))
def test_day_after_tomorrow(self, mock_date):
"""Test if the day after tomorrow are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_day_after_tomorrow)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "on"
# Freeze time to a saturday to test offset - Aug 5th, 2017
@patch(FUNCTION_PATH, return_value=date(2017, 8, 5))
def test_yesterday(self, mock_date):
"""Test if yesterday are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_yesterday)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "on"
# Freeze time to a Presidents day to test Holiday on a Work day - Jan 20th, 2020
# Presidents day Feb 17th 2020 is mon.
@patch(FUNCTION_PATH, return_value=date(2020, 2, 17))
def test_config_example1_holiday(self, mock_date):
"""Test if public holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_example1)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "on"
# Freeze time to test tue - Feb 18th, 2020
@patch(FUNCTION_PATH, return_value=date(2020, 2, 18))
def test_config_example2_tue(self, mock_date):
"""Test if public holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_example2)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "off"
# Freeze time to test mon, but added as holiday - Feb 24th, 2020
@patch(FUNCTION_PATH, return_value=date(2020, 2, 24))
def test_config_example2_add_holiday(self, mock_date):
"""Test if public holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_example2)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "off"
def test_day_to_string(self):
"""Test if day_to_string is behaving correctly."""
assert binary_sensor.day_to_string(0) == "mon"
assert binary_sensor.day_to_string(1) == "tue"
assert binary_sensor.day_to_string(7) == "holiday"
assert binary_sensor.day_to_string(8) is None
# Freeze time to test Fri, but remove holiday - December 25, 2020
@patch(FUNCTION_PATH, return_value=date(2020, 12, 25))
def test_config_remove_holidays_xmas(self, mock_date):
"""Test if removed holidays are reported correctly."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config_remove_holidays)
self.hass.start()
entity = self.hass.states.get("binary_sensor.workday_sensor")
assert entity.state == "on"
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import sys
import tempfile
# pylint: disable=g-bad-todo
# TODO(#6568): Remove this hack that makes dlopen() not crash.
# pylint: enable=g-bad-todo
# pylint: disable=g-import-not-at-top
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
def _sparse_id_tensor(shape, vocab_size, seed=112123):
# Returns a arbitrary `SparseTensor` with given shape and vocab size.
np.random.seed(seed)
indices = np.array(list(itertools.product(*[range(s) for s in shape])))
# In order to create some sparsity, we include a value outside the vocab.
values = np.random.randint(0, vocab_size + 1, size=np.prod(shape))
# Remove entries outside the vocabulary.
keep = values < vocab_size
indices = indices[keep]
values = values[keep]
return sparse_tensor_lib.SparseTensor(
indices=indices, values=values, dense_shape=shape)
class FeatureColumnTest(test.TestCase):
def testImmutability(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
with self.assertRaises(AttributeError):
a.column_name = "bbb"
def testSparseColumnWithHashBucket(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.string)
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.int64)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.float32)
def testSparseColumnWithVocabularyFile(self):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454)
self.assertEqual(b.dtype, dtypes.string)
self.assertEqual(b.lookup_config.vocab_size, 454)
self.assertEqual(b.lookup_config.vocabulary_file, "a_file")
with self.assertRaises(ValueError):
# Vocabulary size should be defined if vocabulary_file is used.
fc.sparse_column_with_vocabulary_file("bbb", vocabulary_file="somefile")
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.int64)
self.assertEqual(b.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.float32)
def testWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertEqual(weighted_ids.name, "ids_weighted_by_weights")
def testEmbeddingColumn(self):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, combiner="sum")
b = fc.embedding_column(a, dimension=4, combiner="mean")
self.assertEqual(b.sparse_id_column.name, "aaa")
self.assertEqual(b.dimension, 4)
self.assertEqual(b.combiner, "mean")
def testSharedEmbeddingColumn(self):
a1 = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
a2 = fc.sparse_column_with_keys("a2", ["marlo", "omar", "stringer"])
b = fc.shared_embedding_columns([a1, a2], dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name, "a1_a2_shared_embedding")
self.assertEqual(b[1].shared_embedding_name, "a1_a2_shared_embedding")
# Create a sparse id tensor for a1.
input_tensor_c1 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
# Create a sparse id tensor for a2.
input_tensor_c2 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
with variable_scope.variable_scope("run_1"):
b1 = feature_column_ops.input_from_feature_columns({
b[0]: input_tensor_c1
}, [b[0]])
b2 = feature_column_ops.input_from_feature_columns({
b[1]: input_tensor_c2
}, [b[1]])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
b1_value = b1.eval()
b2_value = b2.eval()
for i in range(len(b1_value)):
self.assertAllClose(b1_value[i], b2_value[i])
# Test the case when a shared_embedding_name is explictly specified.
d = fc.shared_embedding_columns(
[a1, a2],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
# a3 is a completely different sparse column with a1 and a2, but since the
# same shared_embedding_name is passed in, a3 will have the same embedding
# as a1 and a2
a3 = fc.sparse_column_with_keys("a3", ["cathy", "tom", "anderson"])
e = fc.shared_embedding_columns(
[a3],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
with variable_scope.variable_scope("run_2"):
d1 = feature_column_ops.input_from_feature_columns({
d[0]: input_tensor_c1
}, [d[0]])
e1 = feature_column_ops.input_from_feature_columns({
e[0]: input_tensor_c1
}, [e[0]])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
d1_value = d1.eval()
e1_value = e1.eval()
for i in range(len(d1_value)):
self.assertAllClose(d1_value[i], e1_value[i])
def testSharedEmbeddingColumnDeterminism(self):
# Tests determinism in auto-generated shared_embedding_name.
sparse_id_columns = tuple([
fc.sparse_column_with_keys(k, ["foo", "bar"])
for k in ["07", "02", "00", "03", "05", "01", "09", "06", "04", "08"]
])
output = fc.shared_embedding_columns(
sparse_id_columns, dimension=2, combiner="mean")
self.assertEqual(len(output), 10)
for x in output:
self.assertEqual(x.shared_embedding_name,
"00_01_02_plus_7_others_shared_embedding")
def testSharedEmbeddingColumnErrors(self):
# Tries passing in a string.
with self.assertRaises(TypeError):
invalid_string = "Invalid string."
fc.shared_embedding_columns(invalid_string, dimension=2, combiner="mean")
# Tries passing in a set of sparse columns.
with self.assertRaises(TypeError):
invalid_set = set([
fc.sparse_column_with_keys("a", ["foo", "bar"]),
fc.sparse_column_with_keys("b", ["foo", "bar"]),
])
fc.shared_embedding_columns(invalid_set, dimension=2, combiner="mean")
def testOneHotColumn(self):
a = fc.sparse_column_with_keys("a", ["a", "b", "c", "d"])
onehot_a = fc.one_hot_column(a)
self.assertEqual(onehot_a.sparse_id_column.name, "a")
self.assertEqual(onehot_a.length, 4)
b = fc.sparse_column_with_hash_bucket(
"b", hash_bucket_size=100, combiner="sum")
onehot_b = fc.one_hot_column(b)
self.assertEqual(onehot_b.sparse_id_column.name, "b")
self.assertEqual(onehot_b.length, 100)
def testOneHotReshaping(self):
"""Tests reshaping behavior of `OneHotColumn`."""
id_tensor_shape = [3, 2, 4, 5]
sparse_column = fc.sparse_column_with_keys(
"animals", ["squirrel", "moose", "dragon", "octopus"])
one_hot = fc.one_hot_column(sparse_column)
vocab_size = len(sparse_column.lookup_config.keys)
id_tensor = _sparse_id_tensor(id_tensor_shape, vocab_size)
for output_rank in range(1, len(id_tensor_shape) + 1):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
one_hot_output = one_hot._to_dnn_input_layer(
id_tensor, output_rank=output_rank)
with self.test_session() as sess:
one_hot_value = sess.run(one_hot_output)
expected_shape = (id_tensor_shape[:output_rank - 1] + [vocab_size])
self.assertEquals(expected_shape, list(one_hot_value.shape))
def testOneHotColumnForWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
one_hot = fc.one_hot_column(weighted_ids)
self.assertEqual(one_hot.sparse_id_column.name, "ids_weighted_by_weights")
self.assertEqual(one_hot.length, 3)
def testRealValuedColumn(self):
a = fc.real_valued_column("aaa")
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dimension, 1)
b = fc.real_valued_column("bbb", 10)
self.assertEqual(b.dimension, 10)
self.assertTrue(b.default_value is None)
c = fc.real_valued_column("ccc", dimension=None)
self.assertIsNone(c.dimension)
self.assertTrue(c.default_value is None)
with self.assertRaisesRegexp(TypeError, "dimension must be an integer"):
fc.real_valued_column("d3", dimension=1.0)
with self.assertRaisesRegexp(ValueError,
"dimension must be greater than 0"):
fc.real_valued_column("d3", dimension=0)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("d3", dtype=dtypes.string)
# default_value is an integer.
c1 = fc.real_valued_column("c1", default_value=2)
self.assertListEqual(list(c1.default_value), [2.])
c2 = fc.real_valued_column("c2", default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c2.default_value), [2])
c3 = fc.real_valued_column("c3", dimension=4, default_value=2)
self.assertListEqual(list(c3.default_value), [2, 2, 2, 2])
c4 = fc.real_valued_column(
"c4", dimension=4, default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c4.default_value), [2, 2, 2, 2])
c5 = fc.real_valued_column("c5", dimension=None, default_value=2)
self.assertListEqual(list(c5.default_value), [2])
# default_value is a float.
d1 = fc.real_valued_column("d1", default_value=2.)
self.assertListEqual(list(d1.default_value), [2.])
d2 = fc.real_valued_column("d2", dimension=4, default_value=2.)
self.assertListEqual(list(d2.default_value), [2., 2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("d3", default_value=2., dtype=dtypes.int32)
d4 = fc.real_valued_column("d4", dimension=None, default_value=2.)
self.assertListEqual(list(d4.default_value), [2.])
# default_value is neither integer nor float.
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", default_value="string")
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", dimension=3, default_value=[1, 3., "string"])
# default_value is a list of integers.
f1 = fc.real_valued_column("f1", default_value=[2])
self.assertListEqual(list(f1.default_value), [2])
f2 = fc.real_valued_column("f2", dimension=3, default_value=[2, 2, 2])
self.assertListEqual(list(f2.default_value), [2., 2., 2.])
f3 = fc.real_valued_column(
"f3", dimension=3, default_value=[2, 2, 2], dtype=dtypes.int32)
self.assertListEqual(list(f3.default_value), [2, 2, 2])
# default_value is a list of floats.
g1 = fc.real_valued_column("g1", default_value=[2.])
self.assertListEqual(list(g1.default_value), [2.])
g2 = fc.real_valued_column("g2", dimension=3, default_value=[2., 2, 2])
self.assertListEqual(list(g2.default_value), [2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("g3", default_value=[2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(
ValueError, "The length of default_value must be equal to dimension"):
fc.real_valued_column("g4", dimension=3, default_value=[2.])
# Default value is a list but dimension is None.
with self.assertRaisesRegexp(ValueError,
"Only scalar default value is supported "
"when dimension is None"):
fc.real_valued_column("g5", dimension=None, default_value=[2., 3.])
# Test that the normalizer_fn gets stored for a real_valued_column
normalizer = lambda x: x - 1
h1 = fc.real_valued_column("h1", normalizer=normalizer)
self.assertEqual(normalizer(10), h1.normalizer_fn(10))
# Test that normalizer is not stored within key
self.assertFalse("normalizer" in g1.key)
self.assertFalse("normalizer" in g2.key)
self.assertFalse("normalizer" in h1.key)
def testRealValuedColumnReshaping(self):
"""Tests reshaping behavior of `RealValuedColumn`."""
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
np.random.seed(2222)
input_shape = [batch_size, sequence_length] + dimensions
real_valued_input = np.random.rand(*input_shape)
real_valued_column = fc.real_valued_column("values")
for output_rank in range(1, 3 + len(dimensions)):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
real_valued_output = real_valued_column._to_dnn_input_layer(
constant_op.constant(
real_valued_input, dtype=dtypes.float32),
output_rank=output_rank)
with self.test_session() as sess:
real_valued_eval = sess.run(real_valued_output)
expected_shape = (input_shape[:output_rank - 1] +
[np.prod(input_shape[output_rank - 1:])])
self.assertEquals(expected_shape, list(real_valued_eval.shape))
def testRealValuedColumnDensification(self):
"""Tests densification behavior of `RealValuedColumn`."""
# No default value, dimension 1 float.
real_valued_column = fc.real_valued_column(
"sparse_real_valued1", dimension=None)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
densified_output = real_valued_column._to_dnn_input_layer(sparse_tensor)
# With default value, dimension 2 int.
real_valued_column_with_default = fc.real_valued_column(
"sparse_real_valued2",
dimension=None,
default_value=-1,
dtype=dtypes.int32)
sparse_tensor2 = sparse_tensor_lib.SparseTensor(
values=[2, 5, 9, 0],
indices=[[0, 0], [1, 1], [2, 0], [2, 1]],
dense_shape=[3, 2])
densified_output2 = real_valued_column_with_default._to_dnn_input_layer(
sparse_tensor2)
with self.test_session() as sess:
densified_output_eval, densified_output_eval2 = sess.run(
[densified_output, densified_output2])
self.assertAllEqual(densified_output_eval, [[2.0], [0.0], [5.0]])
self.assertAllEqual(densified_output_eval2, [[2, -1], [-1, 5], [9, 0]])
def testBucketizedColumnNameEndsWithUnderscoreBucketized(self):
a = fc.bucketized_column(fc.real_valued_column("aaa"), [0, 4])
self.assertEqual(a.name, "aaa_bucketized")
def testBucketizedColumnRequiresRealValuedColumn(self):
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column("bbb", [0])
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column(
fc.sparse_column_with_integerized_feature(
column_name="bbb", bucket_size=10), [0])
def testBucketizedColumnRequiresRealValuedColumnDimension(self):
with self.assertRaisesRegexp(ValueError,
"source_column must have a defined dimension"):
fc.bucketized_column(fc.real_valued_column("bbb", dimension=None), [0])
def testBucketizedColumnRequiresSortedBuckets(self):
with self.assertRaisesRegexp(ValueError,
"boundaries must be a sorted list"):
fc.bucketized_column(fc.real_valued_column("ccc"), [5, 0, 4])
def testBucketizedColumnWithSameBucketBoundaries(self):
a_bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(a_bucketized.name, "a_bucketized")
self.assertTupleEqual(a_bucketized.boundaries, (1., 2., 3.))
def testCrossedColumnNameCreatesSortedNames(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
bucket = fc.bucketized_column(fc.real_valued_column("cost"), [0, 4])
crossed = fc.crossed_column(set([b, bucket, a]), hash_bucket_size=10000)
self.assertEqual("aaa_X_bbb_X_cost_bucketized", crossed.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed.columns[0].name)
self.assertEqual("bbb", crossed.columns[1].name)
self.assertEqual("cost_bucketized", crossed.columns[2].name)
def testCrossedColumnNotSupportRealValuedColumn(self):
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
with self.assertRaisesRegexp(
TypeError, "columns must be a set of _SparseColumn, _CrossedColumn, "
"or _BucketizedColumn instances"):
fc.crossed_column(
set([b, fc.real_valued_column("real")]), hash_bucket_size=10000)
def testWeightedSparseColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testRealValuedColumnDtypes(self):
rvc = fc.real_valued_column("rvc")
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)
},
rvc.config)
rvc = fc.real_valued_column("rvc", dimension=None)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.float32)
}, rvc.config)
rvc = fc.real_valued_column("rvc", dtype=dtypes.int32)
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.int32)
},
rvc.config)
rvc = fc.real_valued_column("rvc", dimension=None, dtype=dtypes.int32)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, rvc.config)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("rvc", dtype=dtypes.string)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("rvc", dimension=None, dtype=dtypes.string)
def testSparseColumnDtypes(self):
sc = fc.sparse_column_with_integerized_feature("sc", 10)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
sc = fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.int32)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, sc.config)
with self.assertRaisesRegexp(ValueError, "dtype must be an integer"):
fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.float32)
def testSparseColumnSingleBucket(self):
sc = fc.sparse_column_with_integerized_feature("sc", 1)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
self.assertEqual(1, sc._wide_embedding_lookup_arguments(None).vocab_size)
def testSparseColumnAcceptsDenseScalar(self):
"""Tests that `SparseColumn`s accept dense scalar inputs."""
batch_size = 4
dense_scalar_input = [1, 2, 3, 4]
sparse_column = fc.sparse_column_with_integerized_feature("values", 10)
features = {"values":
constant_op.constant(dense_scalar_input, dtype=dtypes.int64)}
sparse_column.insert_transformed_feature(features)
sparse_output = features[sparse_column]
expected_shape = [batch_size, 1]
with self.test_session() as sess:
sparse_result = sess.run(sparse_output)
self.assertEquals(expected_shape, list(sparse_result.dense_shape))
def testCreateFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
sparse_id_col = fc.sparse_column_with_keys("id_column",
["marlo", "omar", "stringer"])
weighted_id_col = fc.weighted_sparse_column(sparse_id_col,
"id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column1")
real_valued_col2 = fc.real_valued_column("real_valued_column2", 5)
real_valued_col3 = fc.real_valued_column(
"real_valued_column3", dimension=None)
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
bucketized_col2 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization2", 4),
[0, 4])
a = fc.sparse_column_with_hash_bucket("cross_aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("cross_bbb", hash_bucket_size=100)
cross_col = fc.crossed_column(set([a, b]), hash_bucket_size=10000)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, real_valued_col1,
real_valued_col2, real_valued_col3, bucketized_col1, bucketized_col2,
cross_col
])
expected_config = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"id_column":
parsing_ops.VarLenFeature(dtypes.string),
"id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5], dtype=dtypes.float32),
"real_valued_column3":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_column_for_bucketization1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32),
"real_valued_column_for_bucketization2":
parsing_ops.FixedLenFeature(
[4], dtype=dtypes.float32),
"cross_aaa":
parsing_ops.VarLenFeature(dtypes.string),
"cross_bbb":
parsing_ops.VarLenFeature(dtypes.string)
}
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual(expected_config, config)
# Test that the same config is parsed out if we pass a dictionary.
feature_columns_dict = {
str(i): val
for i, val in enumerate(feature_columns)
}
config = fc.create_feature_spec_for_parsing(feature_columns_dict)
self.assertDictEqual(expected_config, config)
def testCreateFeatureSpec_RealValuedColumnWithDefaultValue(self):
real_valued_col1 = fc.real_valued_column(
"real_valued_column1", default_value=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_column2", 5, default_value=4)
real_valued_col3 = fc.real_valued_column(
"real_valued_column3", default_value=[8])
real_valued_col4 = fc.real_valued_column(
"real_valued_column4", 3, default_value=[1, 0, 6])
real_valued_col5 = fc.real_valued_column(
"real_valued_column5", dimension=None, default_value=2)
feature_columns = [
real_valued_col1, real_valued_col2, real_valued_col3, real_valued_col4,
real_valued_col5
]
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertEqual(5, len(config))
self.assertDictEqual(
{
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[2.]),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5],
dtype=dtypes.float32,
default_value=[4., 4., 4., 4., 4.]),
"real_valued_column3":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[8.]),
"real_valued_column4":
parsing_ops.FixedLenFeature(
[3], dtype=dtypes.float32, default_value=[1., 0., 6.]),
"real_valued_column5":
parsing_ops.VarLenFeature(dtype=dtypes.float32)
},
config)
def testCreateSequenceFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
sparse_id_col = fc.sparse_column_with_keys("id_column",
["marlo", "omar", "stringer"])
weighted_id_col = fc.weighted_sparse_column(sparse_id_col,
"id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column", dimension=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_default_column", dimension=5, default_value=3.0)
real_valued_col3 = fc.real_valued_column(
"real_valued_var_len_column", dimension=None, default_value=3.0)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, real_valued_col1,
real_valued_col2, real_valued_col3
])
feature_spec = fc._create_sequence_feature_spec_for_parsing(feature_columns)
expected_feature_spec = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"id_column":
parsing_ops.VarLenFeature(dtypes.string),
"id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column":
parsing_ops.FixedLenSequenceFeature(
shape=[2], dtype=dtypes.float32, allow_missing=False),
"real_valued_default_column":
parsing_ops.FixedLenSequenceFeature(
shape=[5], dtype=dtypes.float32, allow_missing=True),
"real_valued_var_len_column":
parsing_ops.VarLenFeature(dtype=dtypes.float32)
}
self.assertDictEqual(expected_feature_spec, feature_spec)
def testMakePlaceHolderTensorsForBaseFeatures(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
real_valued_col = fc.real_valued_column("real_valued_column", 5)
vlen_real_valued_col = fc.real_valued_column(
"vlen_real_valued_column", dimension=None)
bucketized_col = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization"), [0, 4])
feature_columns = set(
[sparse_col, real_valued_col, vlen_real_valued_col, bucketized_col])
placeholders = (
fc.make_place_holder_tensors_for_base_features(feature_columns))
self.assertEqual(4, len(placeholders))
self.assertTrue(
isinstance(placeholders["sparse_column"],
sparse_tensor_lib.SparseTensor))
self.assertTrue(
isinstance(placeholders["vlen_real_valued_column"],
sparse_tensor_lib.SparseTensor))
placeholder = placeholders["real_valued_column"]
self.assertGreaterEqual(
placeholder.name.find(u"Placeholder_real_valued_column"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 5], placeholder.get_shape().as_list())
placeholder = placeholders["real_valued_column_for_bucketization"]
self.assertGreaterEqual(
placeholder.name.find(
u"Placeholder_real_valued_column_for_bucketization"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 1], placeholder.get_shape().as_list())
def testInitEmbeddingColumnWeightsFromCkpt(self):
sparse_col = fc.sparse_column_with_hash_bucket(
column_name="object_in_image", hash_bucket_size=4)
# Create _EmbeddingColumn which randomly initializes embedding of size
# [4, 16].
embedding_col = fc.embedding_column(sparse_col, dimension=16)
# Creating a SparseTensor which has all the ids possible for the given
# vocab.
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'layers.input_from_feature_columns' will create the embedding
# variable. Creating under scope 'run_1' so as to prevent name conflicts
# when creating embedding variable for 'embedding_column_pretrained'.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(embedding_col.name):
# This will return a [4, 16] tensor which is same as embedding variable.
embeddings = feature_column_ops.input_from_feature_columns({
embedding_col: input_tensor
}, [embedding_col])
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_embedding_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
saved_embedding = embeddings.eval()
save.save(sess, checkpoint_path)
embedding_col_initialized = fc.embedding_column(
sparse_id_column=sparse_col,
dimension=16,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/object_in_image_embedding/"
"input_from_feature_columns/object"
"_in_image_embedding/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the embedding from provided checkpoint and return a
# [4, 16] tensor which is same as embedding variable. Since we didn't
# modify embeddings, this should be same as 'saved_embedding'.
pretrained_embeddings = feature_column_ops.input_from_feature_columns({
embedding_col_initialized: input_tensor
}, [embedding_col_initialized])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_embedding = pretrained_embeddings.eval()
self.assertAllClose(saved_embedding, loaded_embedding)
def testInitCrossedColumnWeightsFromCkpt(self):
sparse_col_1 = fc.sparse_column_with_hash_bucket(
column_name="col_1", hash_bucket_size=4)
sparse_col_2 = fc.sparse_column_with_hash_bucket(
column_name="col_2", hash_bucket_size=4)
crossed_col = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2], hash_bucket_size=4)
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'weighted_sum_from_feature_columns' will create the crossed
# column weights variable.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(crossed_col.name):
# Returns looked up column weights which is same as crossed column
# weights as well as actual references to weights variables.
_, col_weights, _ = (
feature_column_ops.weighted_sum_from_feature_columns({
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor
}, [crossed_col], 1))
# Update the weights since default initializer initializes all weights
# to 0.0.
for weight in col_weights.values():
assign_op = state_ops.assign(weight[0], weight[0] + 0.5)
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_crossed_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(assign_op)
saved_col_weights = col_weights[crossed_col][0].eval()
save.save(sess, checkpoint_path)
crossed_col_initialized = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2],
hash_bucket_size=4,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/col_1_X_col_2/"
"weighted_sum_from_feature_columns/"
"col_1_X_col_2/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the crossed column weights from provided checkpoint
# and return a [4, 1] tensor which is same as weights variable. Since we
# won't modify weights, this should be same as 'saved_col_weights'.
_, col_weights, _ = (feature_column_ops.weighted_sum_from_feature_columns(
{
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor
}, [crossed_col_initialized], 1))
col_weights_from_ckpt = col_weights[crossed_col_initialized][0]
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_col_weights = col_weights_from_ckpt.eval()
self.assertAllClose(saved_col_weights, loaded_col_weights)
if __name__ == "__main__":
test.main()
|
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import argparse
from datetime import datetime
from copy import copy
from version import __version__
from Persist import Persist
from PersistantData import Schedule
from Appointment import Appointment
from Utils import int_input, input_with_quit
from AppointmentHandler import AppointmentHandler
from ScheduleChecker import ScheduleChecker
import mlhGui
def validate_children(children, persist):
if type(children) is not list:
raise TypeError
if not children:
count = int_input('How many children would you like to book? ', 1, 20)
for child in range(count):
while True:
name = input_with_quit('Please enter the name for child {}'.format(child + 1))
if name not in persist.user_data.children:
print('{} not a valid child name'.format(name))
else:
break
children.append(name)
def validate_datetime(args):
while True:
if not args.date:
args.date = input_with_quit('Please enter the date you would like to book (YYYYMMDD): ')
if not args.time:
args.time = input_with_quit('Please enter the time you would like to book (HHMM): ')
# Combine args.data and args.time into a DateTime object
dt = '{} {}'.format(args.date, args.time)
try:
args.dt = datetime.strptime(dt, '%Y%m%d %H%M')
except:
print('Unable to parse data and time of appointment.')
args.date = args.time = None
continue
# Check that the appointment time slot is valid
if ScheduleChecker.check_time(args.dt, args.duration):
break
else:
print('The time {} on {} is not a valid appointment time.'.format(args.time, args.date))
def validate_duration(duration):
duration_str = ', '.join([str(dur) for dur in Appointment.durations.keys()])
while True:
if not duration:
duration = int_input('Please enter the duration of the appointment ({}): '.format(
duration_str))
if duration not in Appointment.durations:
print('Appointment duration must be either {} minutes'.format(duration_str))
duration = None
else:
break
def validate_args(args, persist):
args.new_appt = False
if args.children or args.date or args.time or args.duration:
# User wants to schedule a new appointment
args.new_appt = True
# We need to make sure that all arguments are completed
validate_children(args.children, persist)
validate_duration(args.duration)
validate_datetime(args)
def parse_args():
parser = argparse.ArgumentParser(
description='''Utility for advanced booking of child sitting
appointments at Paul Derda Recreation Center''')
parser.add_argument('-v', '--version', action='version',
version="Mother's Little Helper {}".format(__version__))
parser.add_argument('-c', dest='children', nargs='+', metavar='CHILD',
help='''List the names of the children for this appointment.''')
parser.add_argument('-d', dest='date', metavar='YYYYMMDD',
help='''Specify the date to book.''')
parser.add_argument('-t', dest='time', metavar='HHMM',
help='''Specify the time to book. HH is 00-23.''')
parser.add_argument('-r', dest='duration', metavar='MINUTES', type=int,
choices=list(Appointment.durations.keys()),
help='''Specfiy the duration of the appointment.''')
parser.add_argument('-x', dest='clear', action='store_true',
help='Clear appointments that have yet to be booked.')
parser.add_argument('-l', dest='log_level', choices=['debug', 'info'], default='info',
help='log level')
parser.add_argument('--execute', action='store_true',
help='Book appointments from the persistant store now')
parser.add_argument('-p', dest='print_store', action='store_true',
help='Print the contents of the persistant store')
parser.add_argument('-g', dest='gui', action='store_true',
help='Start MLH GUI')
return parser.parse_args()
def configure_logging(log_level):
logger = logging.getLogger('mlh_logger')
log_level = getattr(logging, log_level.upper(), None)
logger.setLevel(log_level)
fh = logging.FileHandler('log')
sh = logging.StreamHandler()
fileFormatter = logging.Formatter('%(asctime)s[%(levelname)s]<%(name)s>|%(funcName)s:%(message)s')
streamFormatter = logging.Formatter('%(asctime)s|%(funcName)s:%(message)s')
fh.setFormatter(fileFormatter)
sh.setFormatter(streamFormatter)
logger.addHandler(fh)
logger.addHandler(sh)
class Mlh():
def __init__(self):
self.persist = Persist('db.pick')
self.store = self.persist.get_data()
def print_store(self):
print(self.store)
return
def clear_store(self):
self.store.appointments.clear()
self.persist.set_data()
return
def run(self):
handler = AppointmentHandler(self.persist)
handler.run()
return
def run_gui(self):
myGui = mlhGui.mlhGui(self, list(self.store.user_data.children.keys()))
def split_appointments(self, schedule):
appointments = []
def get_appt_by_type(type):
names = [name for name, info in self.store.user_data.children.items()
if info.type == type and name in schedule.children]
if names:
new_schedule = copy(schedule)
new_schedule.children = names
appointments.append(new_schedule)
get_appt_by_type('child')
get_appt_by_type('infant')
return appointments
def add_appointment(self, schedule):
# If an appointment was specified with children and infants,
# it needs to be split into separate appointments
appts = self.split_appointments(schedule)
# Remove appointments that already exist in the store
for appt in self.store.appointments:
for new_appt in copy(appts):
if new_appt == appt:
appts.remove(new_appt)
self.store.appointments.extend(appts)
self.persist.set_data()
def remove_appointments(self, indexes=[]):
for index in sorted(indexes, reverse=True):
self.store.appointments.pop(index)
self.persist.set_data()
def main(args):
configure_logging(args.log_level)
app = Mlh()
if args.print_store:
app.print_store()
if args.clear:
app.clear_store()
# book all available scheduled appointments
if args.execute:
app.run()
if args.gui:
app.run_gui()
else:
validate_args(args, app.store)
if args.new_appt: # We want to schedule a new appointment
app.add_appointment(Schedule(args.dt, args.duration, args.children))
if __name__ == '__main__':
main(parse_args())
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import os
import socket
import sys
from nova.compat import flagfile
from nova.openstack.common import cfg
class NovaConfigOpts(cfg.ConfigOpts):
def __init__(self, *args, **kwargs):
super(NovaConfigOpts, self).__init__(*args, **kwargs)
self.disable_interspersed_args()
def __call__(self, argv):
with flagfile.handle_flagfiles_managed(argv[1:]) as args:
return argv[:1] + super(NovaConfigOpts, self).__call__(args)
FLAGS = NovaConfigOpts()
class UnrecognizedFlag(Exception):
pass
def DECLARE(name, module_string, flag_values=FLAGS):
if module_string not in sys.modules:
__import__(module_string, globals(), locals())
if name not in flag_values:
raise UnrecognizedFlag('%s not defined by %s' % (name, module_string))
def _get_my_ip():
"""Returns the actual ip of the local machine."""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error as ex:
return "127.0.0.1"
log_opts = [
cfg.BoolOpt('verbose',
default=False,
help='show debug output'),
cfg.StrOpt('logdir',
default=None,
help='output to a per-service log file in named directory'),
cfg.StrOpt('logfile',
default=None,
help='output to named file'),
cfg.BoolOpt('use_syslog',
default=False,
help='output to syslog'),
cfg.BoolOpt('use_stderr',
default=True,
help='log to standard error'),
]
core_opts = [
cfg.StrOpt('connection_type',
default=None,
help='libvirt, xenapi or fake'),
cfg.StrOpt('sql_connection',
default='sqlite:///$state_path/$sqlite_db',
help='connection string for sql database'),
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('state_path',
default=os.path.join(os.path.dirname(__file__), '../'),
help="Top-level directory for maintaining nova's state"),
cfg.StrOpt('lock_path',
default=os.path.join(os.path.dirname(__file__), '../'),
help='Directory for lock files'),
]
debug_opts = [
cfg.BoolOpt('fake_network',
default=False,
help='should we use fake network devices and addresses'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='use a fake rabbit'),
]
FLAGS.register_cli_opts(log_opts)
FLAGS.register_cli_opts(core_opts)
FLAGS.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='host ip address'),
cfg.ListOpt('region_list',
default=[],
help='list of region=fqdn pairs separated by commas'),
cfg.StrOpt('aws_access_key_id',
default='admin',
help='AWS Access ID'),
cfg.StrOpt('aws_secret_access_key',
default='admin',
help='AWS Access Key'),
cfg.StrOpt('glance_host',
default='$my_ip',
help='default glance host'),
cfg.IntOpt('glance_port',
default=9292,
help='default glance port'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='glance api servers available to nova (host:port)'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
cfg.IntOpt('s3_port',
default=3333,
help='s3 port'),
cfg.StrOpt('s3_host',
default='$my_ip',
help='s3 host (for infrastructure)'),
cfg.StrOpt('s3_dmz',
default='$my_ip',
help='s3 dmz ip (for instances)'),
cfg.StrOpt('cert_topic',
default='cert',
help='the topic cert nodes listen on'),
cfg.StrOpt('compute_topic',
default='compute',
help='the topic compute nodes listen on'),
cfg.StrOpt('console_topic',
default='console',
help='the topic console proxy nodes listen on'),
cfg.StrOpt('scheduler_topic',
default='scheduler',
help='the topic scheduler nodes listen on'),
cfg.StrOpt('volume_topic',
default='volume',
help='the topic volume nodes listen on'),
cfg.StrOpt('network_topic',
default='network',
help='the topic network nodes listen on'),
cfg.StrOpt('vsa_topic',
default='vsa',
help='the topic that nova-vsa service listens on'),
cfg.StrOpt('rabbit_host',
default='localhost',
help='rabbit host'),
cfg.IntOpt('rabbit_port',
default=5672,
help='rabbit port'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='rabbit userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='rabbit password'),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='rabbit virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='rabbit connection retry interval to start'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='rabbit connection retry backoff in seconds'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum rabbit connection attempts (0=try forever)'),
cfg.StrOpt('control_exchange',
default='nova',
help='the main exchange to connect to'),
cfg.BoolOpt('rabbit_durable_queues',
default=False,
help='use durable queues'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'osapi_volume', 'metadata'],
help='list of APIs to enable by default'),
cfg.StrOpt('ec2_host',
default='$my_ip',
help='ip of api server'),
cfg.StrOpt('ec2_dmz_host',
default='$my_ip',
help='internal ip of api server'),
cfg.IntOpt('ec2_port',
default=8773,
help='cloud controller port'),
cfg.StrOpt('ec2_scheme',
default='http',
help='prefix for ec2'),
cfg.StrOpt('ec2_path',
default='/services/Cloud',
help='suffix for ec2'),
cfg.ListOpt('osapi_compute_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'compute_extension option with nova.api.openstack.'
'compute.contrib.select_extensions'),
cfg.MultiStrOpt('osapi_compute_extension',
default=[
'nova.api.openstack.compute.contrib.standard_extensions'
],
help='osapi compute extension to load'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with nova.api.openstack.'
'volume.contrib.select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=[
'nova.api.openstack.volume.contrib.standard_extensions'
],
help='osapi volume extension to load'),
cfg.StrOpt('osapi_scheme',
default='http',
help='prefix for openstack'),
cfg.StrOpt('osapi_path',
default='/v1.1/',
help='suffix for openstack'),
cfg.StrOpt('osapi_compute_link_prefix',
default=None,
help='Base URL that will be presented to users in links '
'to the Openstack Compute API'),
cfg.StrOpt('osapi_glance_link_prefix',
default=None,
help='Base URL that will be presented to users in links '
'to glance resources'),
cfg.IntOpt('osapi_max_limit',
default=1000,
help='max number of items returned in a collection response'),
cfg.StrOpt('metadata_host',
default='$my_ip',
help='ip of metadata server'),
cfg.IntOpt('metadata_port',
default=8775,
help='Metadata API port'),
cfg.StrOpt('default_project',
default='openstack',
help='default project for openstack'),
cfg.StrOpt('default_image',
default='ami-11111',
help='default image to use, testing only'),
cfg.StrOpt('default_instance_type',
default='m1.small',
help='default instance type to use, testing only'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('vpn_image_id',
default='0',
help='image id for cloudpipe vpn server'),
cfg.StrOpt('vpn_key_suffix',
default='-vpn',
help='Suffix to add to project name for vpn key and secgroups'),
cfg.IntOpt('auth_token_ttl',
default=3600,
help='Seconds for auth tokens to linger'),
cfg.StrOpt('logfile_mode',
default='0644',
help='Default file mode of the logs.'),
cfg.StrOpt('sqlite_db',
default='nova.sqlite',
help='file name for sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='Synchronous mode for sqlite'),
cfg.IntOpt('sql_idle_timeout',
default=3600,
help='timeout for idle sql database connections'),
cfg.IntOpt('sql_max_retries',
default=12,
help='sql connection attempts'),
cfg.IntOpt('sql_retry_interval',
default=10,
help='sql connection retry interval'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='Manager for console proxy'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='Manager for cert'),
cfg.StrOpt('instance_dns_manager',
default='nova.network.dns_driver.DNSDriver',
help='DNS Manager for instance IPs'),
cfg.StrOpt('instance_dns_domain',
default='',
help='DNS Zone for instance IPs'),
cfg.StrOpt('floating_ip_dns_manager',
default='nova.network.dns_driver.DNSDriver',
help='DNS Manager for floating IPs'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='Manager for network'),
cfg.StrOpt('volume_manager',
default='nova.volume.manager.VolumeManager',
help='Manager for volume'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='Manager for scheduler'),
cfg.StrOpt('vsa_manager',
default='nova.vsa.manager.VsaManager',
help='Manager for vsa'),
cfg.StrOpt('vc_image_name',
default='vc_image',
help='the VC image ID (for a VC image that exists in Glance)'),
cfg.StrOpt('default_vsa_instance_type',
default='m1.small',
help='default instance type for VSA instances'),
cfg.IntOpt('max_vcs_in_vsa',
default=32,
help='maxinum VCs in a VSA'),
cfg.IntOpt('vsa_part_size_gb',
default=100,
help='default partition size for shared capacity'),
cfg.StrOpt('firewall_driver',
default='nova.virt.firewall.IptablesFirewallDriver',
help='Firewall driver (defaults to iptables)'),
cfg.StrOpt('image_service',
default='nova.image.glance.GlanceImageService',
help='The service to use for retrieving and searching images.'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address.'),
cfg.StrOpt('node_availability_zone',
default='nova',
help='availability zone of this node'),
cfg.StrOpt('notification_driver',
default='nova.notifier.no_op_notifier',
help='Default driver for sending notifications'),
cfg.ListOpt('memcached_servers',
default=None,
help='Memcached servers or None for in process cache.'),
cfg.StrOpt('zone_name',
default='nova',
help='name of this zone'),
cfg.ListOpt('zone_capabilities',
default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
help='Key/Multi-value list with the capabilities of the zone'),
cfg.StrOpt('build_plan_encryption_key',
default=None,
help='128bit (hex) encryption key for scheduler build plans.'),
cfg.StrOpt('instance_usage_audit_period',
default='month',
help='time period to generate instance usages for.'),
cfg.IntOpt('bandwith_poll_interval',
default=600,
help='interval to pull bandwidth usage info'),
cfg.BoolOpt('start_guests_on_host_boot',
default=False,
help='Whether to restart guests when the host reboots'),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.StrOpt('default_ephemeral_format',
default=None,
help='The default format a ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('root_helper',
default='sudo',
help='Command prefix to use for running commands as root'),
cfg.StrOpt('network_driver',
default='nova.network.linux_net',
help='Driver to use for network creation'),
cfg.BoolOpt('use_ipv6',
default=False,
help='use ipv6'),
cfg.BoolOpt('enable_instance_password',
default=True,
help='Allows use of instance password during '
'server creation'),
cfg.IntOpt('password_length',
default=12,
help='Length of generated instance admin passwords'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Whether to log monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[
'nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
'nova.compute.api:nova.notifier.api.notify_decorator'
],
help='List of modules/decorators to monkey patch'),
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('stub_network',
default=False,
help='Stub network related code'),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('zombie_instance_updated_at_window',
default=172800,
help='Number of seconds zombie instances are cleaned up.'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
cfg.StrOpt('default_schedule_zone',
default=None,
help='zone to use when user doesnt specify one'),
cfg.ListOpt('isolated_images',
default=[],
help='Images to run on isolated host'),
cfg.ListOpt('isolated_hosts',
default=[],
help='Host reserved for specific images'),
cfg.BoolOpt('cache_images',
default=True,
help='Cache glance images locally'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images')
]
FLAGS.register_opts(global_opts)
|
|
# -*- encoding: utf-8 -*-
"""
Local server.
`H2OLocalServer` allows to start H2O servers on your local machine:
hs = H2OLocalServer.start() : start a new local server
hs.is_running() : check if the server is running
hs.shutdown() : shut down the server
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import os
import subprocess
import sys
import tempfile
import time
from random import choice
from sysconfig import get_config_var
from warnings import warn
from h2o.exceptions import H2OServerError, H2OStartupError, H2OValueError
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type, assert_satisfies, BoundInt, I, is_type
__all__ = ("H2OLocalServer", )
class H2OLocalServer(object):
"""
Handle to an H2O server launched locally.
Public interface::
hs = H2OLocalServer.start(...) # launch a new local H2O server
hs.is_running() # check if the server is running
hs.shutdown() # shut down the server
hs.scheme # either "http" or "https"
hs.ip # ip address of the server, typically "127.0.0.1"
hs.port # port on which the server is listening
Once started, the server will run until the script terminates, or until you call `.shutdown()` on it. Moreover,
if the server terminates with an exception, then the server will not stop and will continue to run even after
Python process exits. This runaway process may end up being in a bad shape (e.g. frozen), then the only way to
terminate it is to kill the java process from the terminal.
Alternatively, it is possible to start the server as a context manager, in which case it will be automatically
shut down even if an exception occurs in Python (but not if the Python process is killed)::
with H2OLocalServer.start() as hs:
# do something with the server -- probably connect to it
"""
_TIME_TO_START = 10 # Maximum time we wait for the server to start up (in seconds)
_TIME_TO_KILL = 3 # Maximum time we wait for the server to shut down until we kill it (in seconds)
@staticmethod
def start(jar_path=None, nthreads=-1, enable_assertions=True, max_mem_size=None, min_mem_size=None,
ice_root=None, port="54321+", verbose=True):
"""
Start new H2O server on the local machine.
:param jar_path: Path to the h2o.jar executable. If not given, then we will search for h2o.jar in the
locations returned by `._jar_paths()`.
:param nthreads: Number of threads in the thread pool. This should be related to the number of CPUs used.
-1 means use all CPUs on the host. A positive integer specifies the number of CPUs directly.
:param enable_assertions: If True, pass `-ea` option to the JVM.
:param max_mem_size: Maximum heap size (jvm option Xmx), in bytes.
:param min_mem_size: Minimum heap size (jvm option Xms), in bytes.
:param ice_root: A directory where H2O stores its temporary files. Default location is determined by
tempfile.mkdtemp().
:param port: Port where to start the new server. This could be either an integer, or a string of the form
"DDDDD+", indicating that the server should start looking for an open port starting from DDDDD and up.
:param verbose: If True, then connection info will be printed to the stdout.
:returns: a new H2OLocalServer instance
"""
assert_is_type(jar_path, None, str)
assert_is_type(port, None, int, str)
assert_is_type(nthreads, -1, BoundInt(1, 4096))
assert_is_type(enable_assertions, bool)
assert_is_type(min_mem_size, None, int)
assert_is_type(max_mem_size, None, BoundInt(1 << 25))
assert_is_type(ice_root, None, I(str, os.path.isdir))
if jar_path:
assert_satisfies(jar_path, jar_path.endswith("h2o.jar"))
if min_mem_size is not None and max_mem_size is not None and min_mem_size > max_mem_size:
raise H2OValueError("`min_mem_size`=%d is larger than the `max_mem_size`=%d" % (min_mem_size, max_mem_size))
if port is None: port = "54321+"
baseport = None
# TODO: get rid of this port gimmick and have 2 separate parameters.
if is_type(port, str):
if port.isdigit():
port = int(port)
else:
if not(port[-1] == "+" and port[:-1].isdigit()):
raise H2OValueError("`port` should be of the form 'DDDD+', where D is a digit. Got: %s" % port)
baseport = int(port[:-1])
port = 0
hs = H2OLocalServer()
hs._verbose = bool(verbose)
hs._jar_path = hs._find_jar(jar_path)
hs._ice_root = ice_root
if not ice_root:
hs._ice_root = tempfile.mkdtemp()
hs._tempdir = hs._ice_root
if verbose: print("Attempting to start a local H2O server...")
hs._launch_server(port=port, baseport=baseport, nthreads=int(nthreads), ea=enable_assertions,
mmax=max_mem_size, mmin=min_mem_size)
if verbose: print(" Server is running at %s://%s:%d" % (hs.scheme, hs.ip, hs.port))
atexit.register(lambda: hs.shutdown())
return hs
def is_running(self):
"""Return True if the server process is still running, False otherwise."""
return self._process is not None and self._process.poll() is None
def shutdown(self):
"""
Shut down the server by trying to terminate/kill its process.
First we attempt to terminate the server process gracefully (sending SIGTERM signal). However after
_TIME_TO_KILL seconds if the process didn't shutdown, we forcefully kill it with a SIGKILL signal.
"""
if not self._process: return
try:
kill_time = time.time() + self._TIME_TO_KILL
while self._process.poll() is None and time.time() < kill_time:
self._process.terminate()
time.sleep(0.2)
if self._process().poll() is None:
self._process.kill()
time.sleep(0.2)
if self._verbose:
print("Local H2O server %s:%s stopped." % (self.ip, self.port))
except:
pass
self._process = None
@property
def scheme(self):
"""Connection scheme, 'http' or 'https'."""
return self._scheme
@property
def ip(self):
"""IP address of the server."""
return self._ip
@property
def port(self):
"""Port that the server is listening to."""
return self._port
#-------------------------------------------------------------------------------------------------------------------
# Private
#-------------------------------------------------------------------------------------------------------------------
def __init__(self):
"""[Internal] please use H2OLocalServer.start() to launch a new server."""
self._scheme = None # "http" or "https"
self._ip = None
self._port = None
self._process = None
self._verbose = None
self._jar_path = None
self._ice_root = None
self._stdout = None
self._stderr = None
self._tempdir = None
def _find_jar(self, path0=None):
"""
Return the location of an h2o.jar executable.
:param path0: Explicitly given h2o.jar path. If provided, then we will simply check whether the file is there,
otherwise we will search for an executable in locations returned by ._jar_paths().
:raises H2OStartupError: if no h2o.jar executable can be found.
"""
jar_paths = [path0] if path0 else self._jar_paths()
searched_paths = []
for jp in jar_paths:
searched_paths.append(jp)
if os.path.exists(jp):
return jp
raise H2OStartupError("Cannot start local server: h2o.jar not found. Paths searched:\n" +
"".join(" %s\n" % s for s in searched_paths))
@staticmethod
def _jar_paths():
"""Produce potential paths for an h2o.jar executable."""
# Check if running from an h2o-3 src folder (or any subfolder), in which case use the freshly-built h2o.jar
cwd_chunks = os.path.abspath(".").split(os.path.sep)
for i in range(len(cwd_chunks), 0, -1):
if cwd_chunks[i - 1] == "h2o-3":
yield os.path.sep.join(cwd_chunks[:i] + ["build", "h2o.jar"])
# Then check the backend/bin folder:
# (the following works assuming this code is located in h2o/backend/server.py file)
backend_dir = os.path.split(os.path.realpath(__file__))[0]
yield os.path.join(backend_dir, "bin", "h2o.jar")
# Then try several old locations where h2o.jar might have been installed
prefix1 = prefix2 = sys.prefix
# On Unix-like systems Python typically gets installed into /Library/... or /System/Library/... If one of
# those paths is sys.prefix, then we also build its counterpart.
if prefix1.startswith(os.path.sep + "Library"):
prefix2 = os.path.join("", "System", prefix1)
elif prefix1.startswith(os.path.sep + "System"):
prefix2 = prefix1[len(os.path.join("", "System")):]
yield os.path.join(prefix1, "h2o_jar", "h2o.jar")
yield os.path.join(os.path.abspath(os.sep), "usr", "local", "h2o_jar", "h2o.jar")
yield os.path.join(prefix1, "local", "h2o_jar", "h2o.jar")
yield os.path.join(get_config_var("userbase"), "h2o_jar", "h2o.jar")
yield os.path.join(prefix2, "h2o_jar", "h2o.jar")
def _launch_server(self, port, baseport, mmax, mmin, ea, nthreads):
"""Actually start the h2o.jar executable (helper method for `.start()`)."""
self._ip = "127.0.0.1"
# Find Java and check version. (Note that subprocess.check_output returns the output as a bytes object)
java = self._find_java()
jver_bytes = subprocess.check_output([java, "-version"], stderr=subprocess.STDOUT)
jver = jver_bytes.decode(encoding="utf-8", errors="ignore")
if self._verbose:
print(" Java Version: " + jver.strip().replace("\n", "; "))
if "GNU libgcj" in jver:
raise H2OStartupError("Sorry, GNU Java is not supported for H2O.\n"
"Please download the latest 64-bit Java SE JDK from Oracle.")
if "Client VM" in jver:
warn(" You have a 32-bit version of Java. H2O works best with 64-bit Java.\n"
" Please download the latest 64-bit Java SE JDK from Oracle.\n")
if self._verbose:
print(" Starting server from " + self._jar_path)
print(" Ice root: " + self._ice_root)
# Construct java command to launch the process
cmd = [java]
# ...add JVM options
cmd += ["-ea"] if ea else []
for (mq, num) in [("-Xms", mmin), ("-Xmx", mmax)]:
if num is None: continue
numstr = "%dG" % (num >> 30) if num == (num >> 30) << 30 else \
"%dM" % (num >> 20) if num == (num >> 20) << 20 else \
str(num)
cmd += [mq + numstr]
cmd += ["-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps"]
cmd += ["-jar", self._jar_path] # This should be the last JVM option
# ...add H2O options
cmd += ["-ip", self._ip]
cmd += ["-port", str(port)] if port else []
cmd += ["-baseport", str(baseport)] if baseport else []
cmd += ["-ice_root", self._ice_root]
cmd += ["-nthreads", str(nthreads)] if nthreads > 0 else []
cmd += ["-name", "H2O_from_python_%s" % self._tmp_file("salt")]
# Warning: do not change to any higher log-level, otherwise we won't be able to know which port the
# server is listening to.
cmd += ["-log_level", "INFO"]
# Create stdout and stderr files
self._stdout = self._tmp_file("stdout")
self._stderr = self._tmp_file("stderr")
cwd = os.path.abspath(os.getcwd())
out = open(self._stdout, "w")
err = open(self._stderr, "w")
if self._verbose:
print(" JVM stdout: " + out.name)
print(" JVM stderr: " + err.name)
# Launch the process
win32 = sys.platform == "win32"
flags = getattr(subprocess, "CREATE_NEW_PROCESS_GROUP", 0) if win32 else 0
prex = os.setsid if not win32 else None
try:
proc = subprocess.Popen(args=cmd, stdout=out, stderr=err, cwd=cwd, creationflags=flags, preexec_fn=prex)
except OSError as e:
traceback = getattr(e, "child_traceback", None)
raise H2OServerError("Unable to start server: %s" % e, traceback)
# Wait until the server is up-and-running
giveup_time = time.time() + self._TIME_TO_START
while True:
if proc.poll() is not None:
raise H2OServerError("Server process terminated with error code %d" % proc.returncode)
ret = self._get_server_info_from_logs()
if ret:
self._scheme = ret[0]
self._ip = ret[1]
self._port = ret[2]
self._process = proc
break
if time.time() > giveup_time:
elapsed_time = time.time() - (giveup_time - self._TIME_TO_START)
raise H2OServerError("Server wasn't able to start in %f seconds." % elapsed_time)
time.sleep(0.2)
@staticmethod
def _find_java():
"""
Find location of the java executable (helper for `._launch_server()`).
This method is not particularly robust, and may require additional tweaking for different platforms...
:return: Path to the java executable.
:raises H2OStartupError: if java cannot be found.
"""
# is java callable directly (doesn't work on windows it seems)?
java = "java.exe" if sys.platform == "win32" else "java"
if os.access(java, os.X_OK):
return java
# Can Java be found on the PATH?
for path in os.getenv("PATH").split(os.pathsep): # not same as os.path.sep!
full_path = os.path.join(path, java)
if os.access(full_path, os.X_OK):
return full_path
# check if JAVA_HOME is set (for Windows)
if os.getenv("JAVA_HOME"):
full_path = os.path.join(os.getenv("JAVA_HOME"), "bin", java)
if os.path.exists(full_path):
return full_path
# check "/Program Files" and "/Program Files (x86)" on Windows
if sys.platform == "win32":
# On Windows, backslash on the drive letter is necessary, otherwise os.path.join produces an invalid path
program_folders = [os.path.join("C:\\", "Program Files", "Java"),
os.path.join("C:\\", "Program Files (x86)", "Java"),
os.path.join("C:\\", "ProgramData", "Oracle", "Java")]
for folder in program_folders:
for dirpath, dirnames, filenames in os.walk(folder):
if java in filenames:
return os.path.join(dirpath, java)
# not found...
raise H2OStartupError("Cannot find Java. Please install the latest JRE from\n"
"http://www.oracle.com/technetwork/java/javase/downloads/index.html")
def _tmp_file(self, kind):
"""
Generate names for temporary files (helper method for `._launch_server()`).
:param kind: one of "stdout", "stderr" or "salt". The "salt" kind is used for process name, not for a
file, so it doesn't contain a path. All generated names are based on the user name of the currently
logged-in user.
"""
if sys.platform == "win32":
username = os.getenv("USERNAME")
else:
username = os.getenv("USER")
if not username:
username = "unknownUser"
usr = "".join(ch if ch.isalnum() else "_" for ch in username)
if kind == "salt":
return usr + "_" + "".join(choice("0123456789abcdefghijklmnopqrstuvwxyz") for _ in range(6))
else:
if not self._tempdir:
self._tempdir = tempfile.mkdtemp()
return os.path.join(self._tempdir, "h2o_%s_started_from_python.%s" % (usr, kind[3:]))
def _get_server_info_from_logs(self):
"""
Check server's output log, and determine its scheme / IP / port (helper method for `._launch_server()`).
This method is polled during process startup. It looks at the server output log and checks for a presence of
a particular string ("INFO: Open H2O Flow in your web browser:") which indicates that the server is
up-and-running. If the method detects this string, it extracts the server's scheme, ip and port and returns
them; otherwise it returns None.
:returns: (scheme, ip, port) tuple if the server has already started, None otherwise.
"""
searchstr = "INFO: Open H2O Flow in your web browser:"
with open(self._stdout, "rt") as f:
for line in f:
if searchstr in line:
url = line[line.index(searchstr) + len(searchstr):].strip().rstrip("/")
parts = url.split(":")
assert len(parts) == 3 and (parts[0] == "http" or parts[1] == "https") and parts[2].isdigit(), \
"Unexpected URL: %s" % url
return parts[0], parts[1][2:], int(parts[2])
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
assert len(args) == 3 # Avoid warning about unused args...
return False # ensure that any exception will be re-raised
# Do not stop child process when the object is garbage collected!
# This ensures that simple code such as
# for _ in range(5):
# h2o.H2OConnection.start()
# will launch 5 servers, and they will not be closed down immediately (only when the program exits).
|
|
# coding: utf-8
import serial
import struct
import logging
import glob
import request
class SpheroError(Exception):
pass
class Sphero(object):
def __init__(self, path=None):
self.sp = None
self.dev = 0x00
self.seq = 0x00
self.set_sphero(path)
def set_sphero(self, path=None):
if not path:
spheros = self.paired_spheros()
if not spheros:
raise SpheroError('you need to pair with a Sphero first')
path = spheros[-1]
self.path = path
def paired_spheros(self):
return glob.glob('/dev/tty.Sphero*')
def connect(self, retry=100):
tries=retry
logging.info('connecting to %s' % self.path)
while True:
try:
self.sp = serial.Serial(self.path, 115200)
return
except serial.serialutil.SerialException:
logging.info('retrying')
if not retry:
raise SpheroError('failed to connect after %d tries' % (tries-retry))
retry -= 1
def write(self, packet):
self.sp.write(str(packet))
self.seq += 1
if self.seq == 0xFF:
self.seq = 0x00
header = struct.unpack('5B', self.sp.read(5))
body = self.sp.read(header[-1])
response = packet.response(header, body)
if response.success:
return response
else:
raise SpheroError('request failed (request: %s:%s, response: %s:%s)' % (header, repr(body), response.header, repr(response.body)))
def prep_str(self, s):
""" Helper method to take a string and give a array of "bytes" """
return [ord(c) for c in s]
# CORE COMMANDS
def ping(self):
return self.write(request.Ping(self.seq))
def set_rgb(self, r, g, b, persistant=False):
return self.write(request.SetRGB(self.seq, r, g, b, 0x01 if persistant else 0x00))
def get_rgb(self):
return self.write(request.GetRGB(self.seq))
def get_version(self):
raise NotImplementedError
def get_device_name(self):
# GET_DEVICE_NAME is not really part of the api,
# it has changed to GET_BLUETOOTH_INFO.
# Which returns both name and Bluetooth mac address.
return self.get_bluetooth_info().name
def set_device_name(self, newname):
""" Sets internal device name. (not announced bluetooth name).
requires utf-8 encoded string. """
return self.write(request.SetDeviceName(self.seq, *self.prep_str(newname)))
def get_bluetooth_info(self):
return self.write(request.GetBluetoothInfo(self.seq))
def set_auto_reconnect(self):
raise NotImplementedError
def get_auto_reconnect(self):
raise NotImplementedError
def get_power_state(self):
raise NotImplementedError
def set_power_notification(self):
raise NotImplementedError
def sleep(self, wakeup=0, macro=0, orbbasic=0):
return self.write(request.Sleep(self.seq, wakeup, macro, orbbasic))
def get_voltage_trip_points(self):
raise NotImplementedError
def set_voltage_trip_points(self):
raise NotImplementedError
def set_inactivity_timeout(self):
raise NotImplementedError
def jump_to_bootloader(self):
raise NotImplementedError
def perform_level_1_diagnostics(self):
raise NotImplementedError
def perform_level_2_diagnostics(self):
raise NotImplementedError
def clear_counters(self):
raise NotImplementedError
def set_time_value(self):
raise NotImplementedError
def poll_packet_times(self):
raise NotImplementedError
# SPHERO COMMANDS
def set_heading(self, value):
"""value can be between 0 and 359"""
return self.write(request.SetHeading(self.seq, value))
def set_stabilization(self, state):
return self.write(request.SetStabilization(self.seq, state))
def set_rotation_rate(self, val):
"""value ca be between 0x00 and 0xFF:
value is a multiplied with 0.784 degrees/s except for:
0 --> 1 degrees/s
255 --> jumps to 400 degrees/s
"""
return self.write(request.SetRotationRate(self.seq, val))
def set_application_configuration_block(self):
raise NotImplementedError
def get_application_configuration_block(self):
raise NotImplementedError
def reenable_demo_mode(self):
raise NotImplementedError
def get_chassis_id(self):
raise NotImplementedError
def set_chassis_id(self):
raise NotImplementedError
def self_level(self):
raise NotImplementedError
def set_data_streaming(self):
raise NotImplementedError
def configure_collision_detection(self):
raise NotImplementedError
def set_back_led_output(self, value):
"""value can be between 0x00 and 0xFF"""
return self.write(request.SetBackLEDOutput(self.seq, value))
def roll(self, speed, heading, state=1):
"""
speed can have value between 0x00 and 0xFF
heading can have value between 0 and 359
"""
return self.write(request.Roll(self.seq, speed, heading, state ))
def set_boost_with_time(self):
raise NotImplementedError
def set_raw_motor_values(self):
raise NotImplementedError
def set_motion_timeout(self):
raise NotImplementedError
def set_option_flags(self):
raise NotImplementedError
def get_option_flags(self):
raise NotImplementedError
def get_configuration_block(self):
raise NotImplementedError
def set_device_mode(self):
raise NotImplementedError
def run_macro(self):
raise NotImplementedError
def save_temporary_macro(self):
raise NotImplementedError
def reinit_macro(self):
raise NotImplementedError
def abort_macro(self):
raise NotImplementedError
def get_macro_status(self):
raise NotImplementedError
def set_macro_parameter(self):
raise NotImplementedError
def append_macro_chunk(self):
raise NotImplementedError
def erase_orbbasic_storage(self):
raise NotImplementedError
def append_orbbasic_fragment(self):
raise NotImplementedError
def run_orbbasic_program(self):
raise NotImplementedError
def abort_orbbasic_program(self):
raise NotImplementedError
# BOOTLOADER COMMANDS (still looking for actual docs on these)
def begin_bootloader_reflash(self):
raise NotImplementedError
def set_bootloader_page(self):
raise NotImplementedError
def leave_bootloader(self):
raise NotImplementedError
def is_bootloader_page_blank(self):
raise NotImplementedError
def erase_user_config(self):
raise NotImplementedError
# Additional "higher-level" commands
def stop(self):
return self.roll(0,0)
if __name__ == '__main__':
import time
logging.getLogger().setLevel(logging.DEBUG)
s = Sphero()
s.connect()
#print ( s.set_device_name("Sphero-Salmon") )
print( """Bluetooth info:
name: %s
bta: %s
"""
% ( s.get_bluetooth_info().name,
s.get_bluetooth_info().bta
)
)
s.set_rotation_rate(0x00)
s.set_heading(0)
time.sleep(1)
s.roll(0x80, 270)
time.sleep(2)
s.set_heading(45)
time.sleep(3)
s.stop()
# handy for debugging calls
def raw(did, cid, *data, **kwargs):
req = request.Request(s.seq, *data)
req.did = did
req.cid = cid
if 'fmt' in kwargs:
req.fmt = kwargs['fmt']
res = s.write(req)
logging.debug('request: %s', repr(req.bytes))
logging.debug('response: %s', repr(res.data))
return res
|
|
# Name: Maud Ottenheijm
# Student nr: 10641785
#
# This file contains code for preprocessing the dataset from UNESCO on women in science,
# combining it with birth rate data government expences data (World Bank). Outputfile 'data_all.js' separates data
# per year, then per country with all values in one object. Data for a bar chart is formatted in an array within the country object.
#
# Part of final programming project 2016.
# set of country codes and respective country name
country_codes = [
["af", "AFG", "Afghanistan"],
["ax", "ALA", "Aland Islands"],
["al", "ALB", "Albania"],
["dz", "DZA", "Algeria"],
["as", "ASM", "American Samoa"],
["ad", "AND", "Andorra"],
["ao", "AGO", "Angola"],
["ai", "AIA", "Anguilla"],
["aq", "ATA", "Antarctica"],
["ag", "ATG", "Antigua and Barbuda"],
["ar", "ARG", "Argentina"],
["am", "ARM", "Armenia"],
["aw", "ABW", "Aruba"],
["au", "AUS", "Australia"],
["at", "AUT", "Austria"],
["az", "AZE", "Azerbaijan"],
["bs", "BHS", "Bahamas"],
["bh", "BHR", "Bahrain"],
["bd", "BGD", "Bangladesh"],
["bb", "BRB", "Barbados"],
["by", "BLR", "Belarus"],
["be", "BEL", "Belgium"],
["bz", "BLZ", "Belize"],
["bj", "BEN", "Benin"],
["bm", "BMU", "Bermuda"],
["bt", "BTN", "Bhutan"],
["bo", "BOL", "Bolivia Plurinational State of"],
["bq", "BES", "Bonaire, Sint Eustatius and Saba"],
["ba", "BIH", "Bosnia and Herzegovina"],
["bw", "BWA", "Botswana"],
["bv", "BVT", "Bouvet Island"],
["br", "BRA", "Brazil"],
["io", "IOT", "British Indian Ocean Territory"],
["bn", "BRN", "Brunei Darussalam"],
["bg", "BGR", "Bulgaria"],
["bf", "BFA", "Burkina Faso"],
["bi", "BDI", "Burundi"],
["kh", "KHM", "Cambodia"],
["cm", "CMR", "Cameroon"],
["ca", "CAN", "Canada"],
["cv", "CPV", "Cape Verde"],
["ky", "CYM", "Cayman Islands"],
["cf", "CAF", "Central African Republic"],
["td", "TCD", "Chad"],
["cl", "CHL", "Chile"],
["cn", "CHN", "China"],
["cx", "CXR", "Christmas Island"],
["cc", "CCK", "Cocos (Keeling) Islands"],
["co", "COL", "Colombia"],
["km", "COM", "Comoros"],
["cg", "COG", "Congo"],
["cd", "COD", "Congo Democratic Republic of the"],
["ck", "COK", "Cook Islands"],
["cr", "CRI", "Costa Rica"],
["ci", "CIV", "Cote d'Ivoire"],
["hr", "HRV", "Croatia"],
["cu", "CUB", "Cuba"],
["cw", "CUW", "Curacao"],
["cy", "CYP", "Cyprus"],
["cz", "CZE", "Czech Republic"],
["dk", "DNK", "Denmark"],
["dj", "DJI", "Djibouti"],
["dm", "DMA", "Dominica"],
["do", "DOM", "Dominican Republic"],
["ec", "ECU", "Ecuador"],
["eg", "EGY", "Egypt"],
["sv", "SLV", "El Salvador"],
["gq", "GNQ", "Equatorial Guinea"],
["er", "ERI", "Eritrea"],
["ee", "EST", "Estonia"],
["et", "ETH", "Ethiopia"],
["fk", "FLK", "Falkland Islands (Malvinas)"],
["fo", "FRO", "Faroe Islands"],
["fj", "FJI", "Fiji"],
["fi", "FIN", "Finland"],
["fr", "FRA", "France"],
["gf", "GUF", "French Guiana"],
["pf", "PYF", "French Polynesia"],
["tf", "ATF", "French Southern Territories"],
["ga", "GAB", "Gabon"],
["gm", "GMB", "Gambia"],
["ge", "GEO", "Georgia"],
["de", "DEU", "Germany"],
["gh", "GHA", "Ghana"],
["gi", "GIB", "Gibraltar"],
["gr", "GRC", "Greece"],
["gl", "GRL", "Greenland"],
["gd", "GRD", "Grenada"],
["gp", "GLP", "Guadeloupe"],
["gu", "GUM", "Guam"],
["gt", "GTM", "Guatemala"],
["gg", "GGY", "Guernsey"],
["gn", "GIN", "Guinea"],
["gw", "GNB", "Guinea-Bissau"],
["gy", "GUY", "Guyana"],
["ht", "HTI", "Haiti"],
["hm", "HMD", "Heard Island and McDonald Islands"],
["va", "VAT", "Holy See (Vatican City State)"],
["hn", "HND", "Honduras"],
["hk", "HKG", "Hong Kong"],
["hu", "HUN", "Hungary"],
["is", "ISL", "Iceland"],
["in", "IND", "India"],
["id", "IDN", "Indonesia"],
["ir", "IRN", "Iran (Islamic Republic of)"],
["iq", "IRQ", "Iraq"],
["ie", "IRL", "Ireland"],
["im", "IMN", "Isle of Man"],
["il", "ISR", "Israel"],
["it", "ITA", "Italy"],
["jm", "JAM", "Jamaica"],
["jp", "JPN", "Japan"],
["je", "JEY", "Jersey"],
["jo", "JOR", "Jordan"],
["kz", "KAZ", "Kazakhstan"],
["ke", "KEN", "Kenya"],
["ki", "KIR", "Kiribati"],
["kp", "PRK", "Korea, Democratic People's Republic of"],
["kr", "KOR", "Republic of Korea"],
["kw", "KWT", "Kuwait"],
["kg", "KGZ", "Kyrgyzstan"],
["la", "LAO", "Lao People's Democratic Republic"],
["lv", "LVA", "Latvia"],
["lb", "LBN", "Lebanon"],
["ls", "LSO", "Lesotho"],
["lr", "LBR", "Liberia"],
["ly", "LBY", "Libya"],
["li", "LIE", "Liechtenstein"],
["lt", "LTU", "Lithuania"],
["lu", "LUX", "Luxembourg"],
["mo", "MAC", "Macao"],
["mk", "MKD", "The former Yugoslav Republic of Macedonia"],
["mg", "MDG", "Madagascar"],
["mw", "MWI", "Malawi"],
["my", "MYS", "Malaysia"],
["mv", "MDV", "Maldives"],
["ml", "MLI", "Mali"],
["mt", "MLT", "Malta"],
["mh", "MHL", "Marshall Islands"],
["mq", "MTQ", "Martinique"],
["mr", "MRT", "Mauritania"],
["mu", "MUS", "Mauritius"],
["yt", "MYT", "Mayotte"],
["mx", "MEX", "Mexico"],
["fm", "FSM", "Micronesia, Federated States of"],
["md", "MDA", "Republic of Moldova"],
["mc", "MCO", "Monaco"],
["mn", "MNG", "Mongolia"],
["me", "MNE", "Montenegro"],
["ms", "MSR", "Montserrat"],
["ma", "MAR", "Morocco"],
["mz", "MOZ", "Mozambique"],
["mm", "MMR", "Myanmar"],
["na", "NAM", "Namibia"],
["nr", "NRU", "Nauru"],
["np", "NPL", "Nepal"],
["nl", "NLD", "Netherlands"],
["nc", "NCL", "New Caledonia"],
["nz", "NZL", "New Zealand"],
["ni", "NIC", "Nicaragua"],
["ne", "NER", "Niger"],
["ng", "NGA", "Nigeria"],
["nu", "NIU", "Niue"],
["nf", "NFK", "Norfolk Island"],
["mp", "MNP", "Northern Mariana Islands"],
["no", "NOR", "Norway"],
["om", "OMN", "Oman"],
["pk", "PAK", "Pakistan"],
["pw", "PLW", "Palau"],
["ps", "PSE", "Palestine"],
["pa", "PAN", "Panama"],
["pg", "PNG", "Papua New Guinea"],
["py", "PRY", "Paraguay"],
["pe", "PER", "Peru"],
["ph", "PHL", "Philippines"],
["pn", "PCN", "Pitcairn"],
["pl", "POL", "Poland"],
["pt", "PRT", "Portugal"],
["pr", "PRI", "Puerto Rico"],
["qa", "QAT", "Qatar"],
["re", "REU", "Reunion"],
["ro", "ROU", "Romania"],
["ru", "RUS", "Russian Federation"],
["rw", "RWA", "Rwanda"],
["bl", "BLM", "Saint Barthelemy"],
["sh", "SHN", "Saint Helena"],
["kn", "KNA", "Saint Kitts and Nevis"],
["lc", "LCA", "Saint Lucia"],
["mf", "MAF", "Saint Martin (French part)"],
["pm", "SPM", "Saint Pierre and Miquelon"],
["vc", "VCT", "Saint Vincent and the Grenadines"],
["ws", "WSM", "Samoa"],
["sm", "SMR", "San Marino"],
["st", "STP", "Sao Tome and Principe"],
["sa", "SAU", "Saudi Arabia"],
["sn", "SEN", "Senegal"],
["rs", "SRB", "Serbia"],
["sc", "SYC", "Seychelles"],
["sl", "SLE", "Sierra Leone"],
["sg", "SGP", "Singapore"],
["sx", "SXM", "Sint Maarten (Dutch part)"],
["sk", "SVK", "Slovakia"],
["si", "SVN", "Slovenia"],
["sb", "SLB", "Solomon Islands"],
["so", "SOM", "Somalia"],
["za", "ZAF", "South Africa"],
["gs", "SGS", "South Georgia and the South Sandwich Islands"],
["ss", "SSD", "South Sudan"],
["es", "ESP", "Spain"],
["lk", "LKA", "Sri Lanka"],
["sd", "SDN", "Sudan"],
["sr", "SUR", "Suriname"],
["sj", "SJM", "Svalbard and Jan Mayen"],
["sz", "SWZ", "Swaziland"],
["se", "SWE", "Sweden"],
["ch", "CHE", "Switzerland"],
["sy", "SYR", "Syrian Arab Republic"],
["tw", "TWN", "Taiwan, Province of China"],
["tj", "TJK", "Tajikistan"],
["tz", "TZA", "United Republic of Tanzania"],
["th", "THA", "Thailand"],
["tl", "TLS", "Timor-Leste"],
["tg", "TGO", "Togo"],
["tk", "TKL", "Tokelau"],
["to", "TON", "Tonga"],
["tt", "TTO", "Trinidad and Tobago"],
["tn", "TUN", "Tunisia"],
["tr", "TUR", "Turkey"],
["tm", "TKM", "Turkmenistan"],
["tc", "TCA", "Turks and Caicos Islands"],
["tv", "TUV", "Tuvalu"],
["ug", "UGA", "Uganda"],
["ua", "UKR", "Ukraine"],
["ae", "ARE", "United Arab Emirates"],
["gb", "GBR", "United Kingdom"],
["us", "USA", "United States"],
["um", "UMI", "United States Minor Outlying Islands"],
["uy", "URY", "Uruguay"],
["uz", "UZB", "Uzbekistan"],
["vu", "VUT", "Vanuatu"],
["ve", "VEN", "Venezuela (Bolivarian Republic of)"],
["vn", "VNM", "Viet Nam"],
["vg", "VGB", "Virgin Islands, British"],
["vi", "VIR", "Virgin Islands, U.S."],
["wf", "WLF", "Wallis and Futuna"],
["eh", "ESH", "Western Sahara"],
["ye", "YEM", "Yemen"],
["zm", "ZMB", "Zambia"],
["zw", "ZWE", "Zimbabwe"] ]
# set of data values to be extracted, set to 0
female_in_research = 0
birth_rate = 0
expences_education = 0
total_nature = 0
total_engineering = 0
total_agriculture = 0
total_medicine = 0
total_social = 0
total_humanities = 0
total_notspecified = 0
female_nature = 0
female_engineering = 0
female_agriculture = 0
female_medicine = 0
female_social = 0
female_humanities = 0
female_notspecified = 0
# array of years in dataset
years = [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012]
# open first datafile with 'Women in Science' data
import csv
dataFile = open('dataset_3.csv', 'rb')
Reader = csv.reader(dataFile)
data = list(Reader)
# open second datafile for birth rate data
dataFile2 = open('birth_rate_childperwoman.csv', 'rb')
Reader2 = csv.reader(dataFile2)
data2 = list(Reader2)
# open third datafile for education expences data
dataFile3 = open('expenditure_education_percentage.csv', 'rb')
Reader3 = csv.reader(dataFile3)
data3 = list(Reader3)
# create json-string for output
json = ""
# create outputfile
outputFile = open('data_all.js', 'w')
outputFile.write("var dataset = [ ")
# this function calculates fillKeys for worldmap based on amount of females in research
def define_fillKey( value ):
fillKey = "defaultFill"
if value > 0.0:
fillKey = "< 5%"
if value > 5.0:
fillKey = "< 10%"
if value > 10.0:
fillKey = "< 15%"
if value > 15.0:
fillKey = "< 20%"
if value > 20.0:
fillKey = "< 25%"
if value > 25.0:
fillKey = "< 30%"
if value > 30.0:
fillKey = "< 35%"
if value > 35.0:
fillKey = "< 40%"
if value > 40.0:
fillKey = "< 45%"
if value > 45.0:
fillKey = "< 50%"
if value > 50.0:
fillKey = "> 50%"
return fillKey
# iterate over input datasets per year in data
for year in years:
indx = years.index(year)
json = "\n{"
# preset first country name, counter for resetting data to 0
country_name = "Albania"
counter = 0
# iterate over dataset per row
for row in data[1:]:
# if new country is reached
if str(row[0]) != country_name:
# get country code
countryCode = "Unknown";
for code in country_codes:
if country_name == country_codes[country_codes.index(code)][2]:
countryCode = country_codes[country_codes.index(code)][1]
break
# define fillKey with define_fillKey function
fillKey = define_fillKey(float(female_in_research))
# iterate over dataset 2 for birth rate data
for row2 in data2[1:]:
if row2[2] == countryCode and row2[4] == str(year):
birth_rate = row2[6]
# iterate over dataset 3 for education expences data
for row3 in data3[1:]:
if row3[2] == countryCode and row3[4] == str(year):
expences_education = row3[6]
# create bar chart array of data in string
barchart = "barchart: [[\"female_nature\", " + str(female_nature) + "], [\"total_nature\", " + str(total_nature) +"], [\"female_engineering\", " + str(female_engineering) + "], [\"total_engineering\", " + str(total_engineering) + "], [\"female_agriculture\", " + str(female_agriculture) + "], [\"total_agriculture\", " + str(total_agriculture) + "], [\"female_medicine\", " + str(female_medicine) + "], [\"total_medicine\", " + str(total_medicine) + "], [\"female_social\", " + str(female_social) + "], [\"total_social\", " + str(total_social) + "], [\"female_humanities\", " + str(female_humanities) + "], [\"total_humanities\", " + str(total_humanities) + "], [\"female_notspecified\", " + str(female_notspecified) +"], [\"total_notspecified\", " + str(total_notspecified) + "]]"
# create country object of data in json string
json += "\"" + str(countryCode) + "\" : { country_name: \"" + str(country_name) + "\", fillKey: \"" + fillKey + "\", female_in_research: " + str(float(female_in_research)) + ", birth_rate: " + str(birth_rate) + ", expences_education: " + str(expences_education) + ", " + barchart + "}, "
# if iteration is past the first country
if counter is not 0:
# set data to null
female_in_research = 0
birth_rate = 0
female_work = 0
expences_education = 0
total_nature = 0
total_engineering = 0
total_agriculture = 0
total_medicine = 0
total_social = 0
total_humanities = 0
total_notspecified = 0
female_nature = 0
female_engineering = 0
female_agriculture = 0
female_medicine = 0
female_social = 0
female_humanities = 0
female_notspecified = 0
# get current country name
country_name = str(row[0])
# Get value of current row's indicator
if row[1] == "Researchers (FTE) - % Female":
female_in_research = row[indx + 2]
if row[1] == "Researchers (HC) - % Female" and female_in_research < row[indx + 2]:
female_in_research = row[indx + 2]
if row[1] == "Researchers (FTE) - % Female":
female_in_research = row[indx + 2]
if row[1] == "Researchers (FTE) - Natural sciences %":
total_nature = row[indx + 2]
if row[1] == "Researchers (FTE) - Engineering and technology %":
total_engineering = row[indx + 2]
if row[1] == "Researchers (FTE) - Medical and health sciences %":
total_medicine = row[indx + 2]
if row[1] == "Researchers (FTE) - Agricultural sciences %":
total_agriculture = row[indx + 2]
if row[1] == "Researchers (FTE) - Social sciences %":
total_social = row[indx + 2]
if row[1] == "Researchers (FTE) - Humanities %":
total_humanities = row[indx + 2]
if row[1] == "Researchers (FTE) - Not specified fields %":
total_notspecified = row[indx + 2]
if row[1] == "Female researchers as a percentage of total researchers (FTE) - Natural sciences":
female_nature = row[indx + 2]
if row[1] == "Female researchers as a percentage of total researchers (FTE) - Engineering and technology":
female_engineering = row[indx + 2]
if row[1] == "Female researchers as a percentage of total researchers (FTE) - Medical and health sciences":
female_medicine = row[indx + 2]
if row[1] == "Female researchers as a percentage of total researchers (FTE) - Agricultural sciences":
female_medicine = row[indx + 2]
if row[1] == "Female researchers as a percentage of total researchers (FTE) - Social sciences":
female_social = row[indx + 2]
if row[1] == "Female researchers as a percentage of total researchers (FTE) - Humanities":
female_humanities = row[indx + 2]
if row[1] == "Female researchers as a percentage of total researchers (FTE) - Not specified fields":
female_notspecified = row[indx + 2]
counter += 1
# reset country code for last country
countryCode = "Unknown";
# get last country code
for code in country_codes:
if country_name == country_codes[country_codes.index(code)][2]:
countryCode = country_codes[country_codes.index(code)][1]
break
# define fillKey with define_fillKey function
fillKey = define_fillKey(float(female_in_research))
# iterate over dataset 2 for birth rate data
for row2 in data2[1:]:
if row2[2] == countryCode and row2[4] == str(year):
birth_rate = row2[6]
# iterate over dataset 3 for education expences data
for row3 in data3[1:]:
if row3[2] == countryCode and row3[4] == str(year):
expences_education = row3[6]
# create bar chart array of data in string
barchart = "barchart: [[\"female_nature\", " + str(female_nature) + "], [\"total_nature\", " + str(total_nature) +"], [\"female_engineering\", " + str(female_engineering) + "], [\"total_engineering\", " + str(total_engineering) + "], [\"female_agriculture\", " + str(female_agriculture) + "], [\"total_agriculture\", " + str(total_agriculture) + "], [\"female_medicine\", " + str(female_medicine) + "], [\"total_medicine\", " + str(total_medicine) + "], [\"female_social\", " + str(female_social) + "], [\"total_social\", " + str(total_social) + "], [\"female_humanities\", " + str(female_humanities) + "], [\"total_humanities\", " + str(total_humanities) + "], [\"female_notspecified\", " + str(female_notspecified) +"], [\"total_notspecified\", " + str(total_notspecified) + "]]"
# create country object of data in json string
json += "\"" + str(countryCode) + "\" : { country_name: \"" + str(country_name) + "\", fillKey: \"" + fillKey + "\", female_in_research: " + str(float(female_in_research)) + ", birth_rate: " + str(birth_rate) + ", expences_education: " + str(expences_education) + ", " + barchart + "}, "
# end final object without comma and close year object
json = json[0:-2] + "}"
# if not final year, put comma after object
if indx < 12:
json += ","
# write output string in output file
outputFile.write(json)
# end, write and close file
end_of_file = "\n];"
outputFile.write(end_of_file)
outputFile.close()
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 15:11:17 2018
@author: tih
"""
def main(input_nc, output_nc, input_JRC, Inflow_Text_Files, include_reservoirs = 1):
import time
import wa.General.raster_conversions as RC
import wa.General.data_conversions as DC
import numpy as np
import netCDF4
####################### add inflow text files #################################
if len(Inflow_Text_Files) > 0:
import wa.Models.SurfWAT.Part0_Add_Inlets as Part0_Add_Inlets
# Calculate the runoff that will be routed by including the inlets
Runoff = Part0_Add_Inlets(input_nc, Inflow_Text_Files)
else:
# Extract runoff data from NetCDF file
Runoff = RC.Open_nc_array(input_nc, Var = 'Runoff_M')
###############################################################################
# Extract flow direction data from NetCDF file
flow_directions = RC.Open_nc_array(input_nc, Var = 'demdir')
# Extract basin data from NetCDF file
Basin = RC.Open_nc_array(input_nc, Var = 'basin')
Areas_in_m2 = RC.Open_nc_array(input_nc, Var = 'area')
Runoff_in_m3_month = ((Runoff/1000) * Areas_in_m2)
###############################################################################
############################### Run Part 1 ####################################
###############################################################################
import wa.Models.SurfWAT.Part1_Channel_Routing as Part1_Channel_Routing
Routed_Array, Accumulated_Pixels, Rivers = Part1_Channel_Routing.Run(Runoff_in_m3_month, flow_directions, Basin)
###############################################################################
################## Create NetCDF Part 1 results ###############################
###############################################################################
################### Get Example parameters for NetCDF #########################
# Create NetCDF
geo_out_example, epsg_example, size_X_example, size_Y_example, size_Z_example, Time_example = RC.Open_nc_info(input_nc)
geo_out_example = np.array(geo_out_example)
time_or = RC.Open_nc_array(input_nc, Var = 'time')
# Latitude and longitude
lon_ls = np.arange(size_X_example)*geo_out_example[1]+geo_out_example[0] + 0.5 * geo_out_example[1]
lat_ls = np.arange(size_Y_example)*geo_out_example[5]+geo_out_example[3] - 0.5 * geo_out_example[5]
lat_n = len(lat_ls)
lon_n = len(lon_ls)
################################ Save NetCDF ##################################
# Create NetCDF file
nc_file = netCDF4.Dataset(output_nc, 'w', format = 'NETCDF4')
nc_file.set_fill_on()
# Create dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
# Create NetCDF variables
crso = nc_file.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.standard_name = 'crs'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = epsg_example
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = geo_out_example
######################### Save Rasters in NetCDF ##############################
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude',))
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lat_var.pixel_size = geo_out_example[5]
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude',))
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
lon_var.pixel_size = geo_out_example[1]
nc_file.createDimension('time', None)
timeo = nc_file.createVariable('time', 'f4', ('time',))
timeo.units = 'Monthly'
timeo.standard_name = 'time'
# Variables
rivers_var = nc_file.createVariable('rivers', 'i',
('latitude', 'longitude'),
fill_value=-9999)
rivers_var.long_name = 'Rivers'
rivers_var.grid_mapping = 'crs'
accpix_var = nc_file.createVariable('accpix', 'f8',
('latitude', 'longitude'),
fill_value=-9999)
accpix_var.long_name = 'Accumulated Pixels'
accpix_var.units = 'AmountPixels'
accpix_var.grid_mapping = 'crs'
discharge_nat_var = nc_file.createVariable('discharge_natural', 'f8',
('time', 'latitude', 'longitude'),
fill_value=-9999)
discharge_nat_var.long_name = 'Natural Discharge'
discharge_nat_var.units = 'm3/month'
discharge_nat_var.grid_mapping = 'crs'
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
timeo[:] = time_or
# Static variables
rivers_var[:, :] = Rivers[:, :]
accpix_var[:, :] = Accumulated_Pixels[:, :]
for i in range(len(time_or)):
discharge_nat_var[i,:,:] = Routed_Array[i,:,:]
time.sleep(1)
nc_file.close()
del Routed_Array, Accumulated_Pixels
###############################################################################
############################### Run Part 2 ####################################
###############################################################################
import wa.Models.SurfWAT.Part2_Create_Dictionaries as Part2_Create_Dictionaries
DEM_dict, River_dict, Distance_dict, Discharge_dict = Part2_Create_Dictionaries.Run(input_nc, output_nc)
###############################################################################
################## Create NetCDF Part 2 results ###############################
###############################################################################
# Create NetCDF file
nc_file = netCDF4.Dataset(output_nc, 'r+', format = 'NETCDF4')
nc_file.set_fill_on()
###################### Save Dictionaries in NetCDF ############################
parmsdem = nc_file.createGroup('demdict_static')
for k,v in DEM_dict.items():
setattr(parmsdem, str(k), str(v.tolist()))
parmsriver = nc_file.createGroup('riverdict_static')
for k,v in River_dict.items():
setattr(parmsriver, str(k), str(v.tolist()))
parmsdist = nc_file.createGroup('distancedict_static')
for k,v in Distance_dict.items():
setattr(parmsdist, str(k), str(v.tolist()))
parmsdis = nc_file.createGroup('dischargedict_dynamic')
for k,v in Discharge_dict.items():
setattr(parmsdis, str(k), str(v.tolist()))
# Close file
time.sleep(1)
nc_file.close()
###############################################################################
############################### Run Part 3 ####################################
###############################################################################
if include_reservoirs == 1:
import wa.Models.SurfWAT.Part3_Reservoirs as Part3_Reservoirs
Discharge_dict_2, River_dict_2, DEM_dict_2, Distance_dict_2 = Part3_Reservoirs.Run(input_nc, output_nc, input_JRC)
else:
import copy
Discharge_dict_2 = copy.deepcopy(Discharge_dict)
River_dict_2 = copy.deepcopy(River_dict)
DEM_dict_2= copy.deepcopy(DEM_dict)
Distance_dict_2 = copy.deepcopy(Distance_dict)
###############################################################################
################## Create NetCDF Part 3 results ###############################
###############################################################################
# Create NetCDF file
nc_file = netCDF4.Dataset(output_nc, 'r+', format = 'NETCDF4')
nc_file.set_fill_on()
###################### Save Dictionaries in NetCDF ############################
parmsdisres = nc_file.createGroup('dischargedictreservoirs_dynamic')
for k,v in Discharge_dict_2.items():
setattr(parmsdisres, str(k), str(v.tolist()))
parmsrivresend = nc_file.createGroup('riverdictres_static')
for k,v in River_dict_2.items():
setattr(parmsrivresend, str(k), str(v.tolist()))
parmsdemres = nc_file.createGroup('demdictres_static')
for k,v in DEM_dict_2.items():
setattr(parmsdemres, str(k), str(v.tolist()))
parmsdistres = nc_file.createGroup('distancedictres_static')
for k,v in Distance_dict_2.items():
setattr(parmsdistres, str(k), str(v.tolist()))
# Close file
time.sleep(1)
nc_file.close()
del DEM_dict, River_dict, Distance_dict, Discharge_dict
###############################################################################
############################### Run Part 4 ####################################
###############################################################################
import wa.Models.SurfWAT.Part4_Withdrawals as Part4_Withdrawals
Discharge_dict_end = Part4_Withdrawals.Run(input_nc, output_nc)
###############################################################################
################## Create NetCDF Part 4 results ###############################
###############################################################################
# Create NetCDF file
nc_file = netCDF4.Dataset(output_nc, 'r+', format = 'NETCDF4')
nc_file.set_fill_on()
###################### Save Dictionaries in NetCDF ############################
parmsdisend = nc_file.createGroup('dischargedictend_dynamic')
for k,v in Discharge_dict_end.items():
setattr(parmsdisend, str(k), str(v.tolist()))
# Close file
time.sleep(1)
nc_file.close()
del Discharge_dict_end
###############################################################################
############### Part 5 Convert dictionaries to rasters ########################
###############################################################################
River_dict = RC.Open_nc_dict(output_nc, 'riverdict_static')
# End discharge dictionary to raster
Discharge_dict_end = RC.Open_nc_dict(output_nc, 'dischargedictend_dynamic')
DataCube_Discharge_end = DC.Convert_dict_to_array(River_dict, Discharge_dict_end, input_nc)
###################### Save Dictionaries in NetCDF ############################
# Create NetCDF file
nc_file = netCDF4.Dataset(output_nc, 'r+', format = 'NETCDF4')
nc_file.set_fill_on()
discharge_end_var = nc_file.createVariable('discharge_end', 'f8',
('time', 'latitude', 'longitude'),
fill_value=-9999)
discharge_end_var.long_name = 'End Discharge'
discharge_end_var.units = 'm3/month'
discharge_end_var.grid_mapping = 'crs'
for i in range(len(time_or)):
discharge_end_var[i,:,:] = DataCube_Discharge_end[i,:,:]
# Close file
nc_file.close()
del DataCube_Discharge_end
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for building I3D network models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# Orignaly, add_arg_scope = slim.add_arg_scope and layers = slim, now switch to
# more update-to-date tf.contrib.* API.
add_arg_scope = tf.contrib.framework.add_arg_scope
layers = tf.contrib.layers
def center_initializer():
"""Centering Initializer for I3D.
This initializer allows identity mapping for temporal convolution at the
initialization, which is critical for a desired convergence behavior
for training a seprable I3D model.
The centering behavior of this initializer requires an odd-sized kernel,
typically set to 3.
Returns:
A weight initializer op used in temporal convolutional layers.
Raises:
ValueError: Input tensor data type has to be tf.float32.
ValueError: If input tensor is not a 5-D tensor.
ValueError: If input and output channel dimensions are different.
ValueError: If spatial kernel sizes are not 1.
ValueError: If temporal kernel size is even.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument
"""Initializer op."""
if dtype != tf.float32 and dtype != tf.bfloat16:
raise ValueError(
'Input tensor data type has to be tf.float32 or tf.bfloat16.')
if len(shape) != 5:
raise ValueError('Input tensor has to be 5-D.')
if shape[3] != shape[4]:
raise ValueError('Input and output channel dimensions must be the same.')
if shape[1] != 1 or shape[2] != 1:
raise ValueError('Spatial kernel sizes must be 1 (pointwise conv).')
if shape[0] % 2 == 0:
raise ValueError('Temporal kernel size has to be odd.')
center_pos = int(shape[0] / 2)
init_mat = np.zeros(
[shape[0], shape[1], shape[2], shape[3], shape[4]], dtype=np.float32)
for i in range(0, shape[3]):
init_mat[center_pos, 0, 0, i, i] = 1.0
init_op = tf.constant(init_mat, dtype=dtype)
return init_op
return _initializer
@add_arg_scope
def conv3d_spatiotemporal(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
weights_regularizer=None,
separable=False,
data_format='NDHWC',
scope=''):
"""A wrapper for conv3d to model spatiotemporal representations.
This allows switching between original 3D convolution and separable 3D
convolutions for spatial and temporal features respectively. On Kinetics,
seprable 3D convolutions yields better classification performance.
Args:
inputs: a 5-D tensor `[batch_size, depth, height, width, channels]`.
num_outputs: integer, the number of output filters.
kernel_size: a list of length 3
`[kernel_depth, kernel_height, kernel_width]` of the filters. Can be an
int if all values are the same.
stride: a list of length 3 `[stride_depth, stride_height, stride_width]`.
Can be an int if all strides are the same.
padding: one of `VALID` or `SAME`.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`.
normalizer_params: dictionary of normalization function parameters.
weights_regularizer: Optional regularizer for the weights.
separable: If `True`, use separable spatiotemporal convolutions.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: scope for `variable_scope`.
Returns:
A tensor representing the output of the (separable) conv3d operation.
"""
assert len(kernel_size) == 3
if separable and kernel_size[0] != 1:
spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]
temporal_kernel_size = [kernel_size[0], 1, 1]
if isinstance(stride, list) and len(stride) == 3:
spatial_stride = [1, stride[1], stride[2]]
temporal_stride = [stride[0], 1, 1]
else:
spatial_stride = [1, stride, stride]
temporal_stride = [stride, 1, 1]
net = layers.conv3d(
inputs,
num_outputs,
spatial_kernel_size,
stride=spatial_stride,
padding=padding,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_regularizer=weights_regularizer,
data_format=data_format,
scope=scope)
net = layers.conv3d(
net,
num_outputs,
temporal_kernel_size,
stride=temporal_stride,
padding=padding,
scope=scope + '/temporal',
activation_fn=activation_fn,
normalizer_fn=None,
data_format=data_format,
weights_initializer=center_initializer())
return net
else:
return layers.conv3d(
inputs,
num_outputs,
kernel_size,
stride=stride,
padding=padding,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_regularizer=weights_regularizer,
data_format=data_format,
scope=scope)
@add_arg_scope
def inception_block_v1_3d(inputs,
num_outputs_0_0a,
num_outputs_1_0a,
num_outputs_1_0b,
num_outputs_2_0a,
num_outputs_2_0b,
num_outputs_3_0b,
temporal_kernel_size=3,
self_gating_fn=None,
data_format='NDHWC',
scope=''):
"""A 3D Inception v1 block.
This allows use of separable 3D convolutions and self-gating, as
described in:
Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy,
Rethinking Spatiotemporal Feature Learning For Video Understanding.
https://arxiv.org/abs/1712.04851.
Args:
inputs: a 5-D tensor `[batch_size, depth, height, width, channels]`.
num_outputs_0_0a: integer, the number of output filters for Branch 0,
operation Conv2d_0a_1x1.
num_outputs_1_0a: integer, the number of output filters for Branch 1,
operation Conv2d_0a_1x1.
num_outputs_1_0b: integer, the number of output filters for Branch 1,
operation Conv2d_0b_3x3.
num_outputs_2_0a: integer, the number of output filters for Branch 2,
operation Conv2d_0a_1x1.
num_outputs_2_0b: integer, the number of output filters for Branch 2,
operation Conv2d_0b_3x3.
num_outputs_3_0b: integer, the number of output filters for Branch 3,
operation Conv2d_0b_1x1.
temporal_kernel_size: integer, the size of the temporal convolutional
filters in the conv3d_spatiotemporal blocks.
self_gating_fn: function which optionally performs self-gating.
Must have two arguments, `inputs` and `scope`, and return one output
tensor the same size as `inputs`. If `None`, no self-gating is
applied.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: scope for `variable_scope`.
Returns:
A 5-D tensor `[batch_size, depth, height, width, out_channels]`, where
`out_channels = num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b
+ num_outputs_3_0b`.
"""
use_gating = self_gating_fn is not None
with tf.variable_scope(scope):
with tf.variable_scope('Branch_0'):
branch_0 = layers.conv3d(
inputs, num_outputs_0_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
if use_gating:
branch_0 = self_gating_fn(branch_0, scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = layers.conv3d(
inputs, num_outputs_1_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_1 = conv3d_spatiotemporal(
branch_1, num_outputs_1_0b, [temporal_kernel_size, 3, 3],
scope='Conv2d_0b_3x3')
if use_gating:
branch_1 = self_gating_fn(branch_1, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = layers.conv3d(
inputs, num_outputs_2_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_2 = conv3d_spatiotemporal(
branch_2, num_outputs_2_0b, [temporal_kernel_size, 3, 3],
scope='Conv2d_0b_3x3')
if use_gating:
branch_2 = self_gating_fn(branch_2, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = layers.max_pool3d(inputs, [3, 3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv3d(
branch_3, num_outputs_3_0b, [1, 1, 1], scope='Conv2d_0b_1x1')
if use_gating:
branch_3 = self_gating_fn(branch_3, scope='Conv2d_0b_1x1')
index_c = data_format.index('C')
assert 1 <= index_c <= 4, 'Cannot identify channel dimension.'
output = tf.concat([branch_0, branch_1, branch_2, branch_3], index_c)
return output
def reduced_kernel_size_3d(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size
[batch_size, time, height, width, channels].
kernel_size: desired kernel size of length 3, corresponding to time,
height and width.
Returns:
a tensor with the kernel size.
"""
assert len(kernel_size) == 3
shape = input_tensor.get_shape().as_list()
assert len(shape) == 5
if None in shape[1:4]:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1]),
min(shape[3], kernel_size[2])]
return kernel_size_out
|
|
from flask import Flask
from flask import request
from flask import render_template
from flask import make_response
from flask import jsonify
import numpy as np
import StringIO
import csv
import time
import logging
from logging.handlers import SysLogHandler
app = Flask(__name__)
# log Flask events
# address for OSX
handler = SysLogHandler(address='/var/run/syslog')
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
app.logger.setLevel(logging.DEBUG)
##############
# this doesn't log... why?
@app.after_request
def write_access_log(response):
app.logger.debug(u"%s %s --> %s" % (time.asctime(), request.path, response.status_code))
return response
##############
def get_coin_trial(n=100, p=0.5):
result = list(np.random.choice([0,1],n, p=[p, 1.0-p]))
return result
def get_coin_ensemble(k=10, n=100, p=0.5):
result = [get_coin_trial(n, p) for i in range(k)]
return result
##############
@app.route('/info.html')
def info():
return render_template( "info.html")
@app.route('/trial')
def trial(n=100, p=0.5):
n = request.args.get("n", n, type=int)
p = request.args.get("p", p, type=float)
result = {
"n" : n
, "result" : get_coin_trial(n, p)
, "p_0" : p
, "p_1" : 1.0-p
}
return jsonify(**result)
@app.route('/ensemble')
def ensemble(k=10, n=100, p=0.5):
k = request.args.get("k", k, type=int)
n = request.args.get("n", n, type=int)
p = request.args.get("p", p, type=float)
result = {
"n" : n
, "results" : get_coin_ensemble(k, n, p)
, "p_0" : p
, "p_1" : 1.0-p
}
return jsonify(**result)
@app.route('/ensemble/summary')
def ensemble_summary(k=10, n=100, p=0.5):
k = request.args.get("k", k, type=int)
n = request.args.get("n", n, type=int)
p = request.args.get("p", p, type=float)
res = get_coin_ensemble(k, n, p)
avgs = [np.average(i) for i in res]
cnts = [[i.count(0), i.count(1)] for i in res]
result = {
"summary": {
"counts" : cnts
, "averages" : avgs
},
"data": {
"n" : n
, "results" : res
, "p_0" : p
, "p_1" : 1.0-p
}
}
return jsonify(**result)
@app.route("/plot/demo.png")
def plot_demo():
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
mu, sigma = 100, 15
x = mu + sigma*np.random.randn(10000)
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75)
# add a 'best fit' line
y = mlab.normpdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=1)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
# Write to the canvas
fig = plt.gcf()
fig.set_size_inches(6,5)
canvas = FigureCanvas(fig)
output = StringIO.StringIO()
canvas.print_png(output)
response = make_response(output.getvalue())
response.mimetype = 'image/png'
return response
@app.route("/plot/hist/<spec>.png")
def plot_hist(spec="10_100_500"):
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
spec = request.args.get("spec", spec, type=str).split("_")
assert(len(spec) == 3)
k = int(spec[0])
n = int(spec[1])
p = float(spec[2])/1000.
res = get_coin_ensemble(k, n, p)
avgs = [np.average(i) for i in res]
plt.clf()
fig = plt.figure()
l = plt.hist(avgs)
fig.set_size_inches(5,4)
canvas = FigureCanvas(fig)
output = StringIO.StringIO()
canvas.print_png(output)
response = make_response(output.getvalue())
response.mimetype = 'image/png'
return response
@app.route('/ensemble/table.html')
def table_data(k=5, n=10, p=0.5):
k = request.args.get("k", k, type=int)
n = request.args.get("n", n, type=int)
p = request.args.get("p", p, type=float)
res = get_coin_ensemble(k, n, p)
rows = []
# convert to rows
for row in range(len(res[0])):
r = []
for col in res:
r.append(col[row])
rows.append(r)
# cell background colors
color={
0 : "#AA4040"
, 1 : "#00AA00"
}
return render_template("table.html", rows=rows, color=color)
@app.route('/ensemble/csv')
def csv_data(k=5, n=10, p=0.5):
k = request.args.get("k", k, type=int)
n = request.args.get("n", n, type=int)
p = request.args.get("p", p, type=float)
res = get_coin_ensemble(k, n, p)
rows = []
# convert to rows
for row in range(len(res[0])):
r = []
for col in res:
r.append(col[row])
rows.append(r)
si = StringIO.StringIO()
cw = csv.writer(si)
cw.writerows(rows)
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
if __name__ == '__main__':
app.run(debug=True)
|
|
import unittest
import numpy as np
import pysal
import pysal.spreg.diagnostics as D
from pysal.spreg.twosls_sp import BaseGM_Lag, GM_Lag
from pysal.common import RTOL
class TestBaseGMLag(unittest.TestCase):
def setUp(self):
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r')
y = np.array(self.db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
def test___init__(self):
w_lags = 2
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, w_lags, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, w_lags=w_lags)
betas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
h_0 = np.array([ 1. , 19.531 , 15.72598 , 18.594 ,
24.7142675 , 13.72216667, 27.82929567])
np.testing.assert_allclose(reg.h[0], h_0)
hth = np. array([ 49. , 704.371999 , 1721.312371 , 724.7435916 ,
1707.35412945, 711.31248483, 1729.63201243])
np.testing.assert_allclose(reg.hth[0], hth,RTOL)
hthi = np.array([ 7.33701328e+00, 2.27764882e-02, 2.18153588e-02,
-5.11035447e-02, 1.22515181e-03, -2.38079378e-01,
-1.20149133e-01])
np.testing.assert_allclose(reg.hthi[0], hthi,RTOL)
self.assertEqual(reg.k, 4)
self.assertEqual(reg.kstar, 1)
np.testing.assert_allclose(reg.mean_y, 38.436224469387746,RTOL)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([ 80.5588479 , -1.06625281, -0.61703759, -1.10071931])
np.testing.assert_allclose(reg.pfora1a2[0], pfora1a2,RTOL)
predy_5 = np.array([[ 50.87411532],[ 50.76969931],[ 41.77223722],[ 33.44262382],[ 28.77418036]])
np.testing.assert_allclose(reg.predy[0:5], predy_5,RTOL)
q_5 = np.array([ 18.594 , 24.7142675 , 13.72216667, 27.82929567])
np.testing.assert_allclose(reg.q[0], q_5)
np.testing.assert_allclose(reg.sig2n_k, 234.54258763039289,RTOL)
np.testing.assert_allclose(reg.sig2n, 215.39625394627919,RTOL)
np.testing.assert_allclose(reg.sig2, 215.39625394627919,RTOL)
np.testing.assert_allclose(reg.std_y, 18.466069465206047,RTOL)
u_5 = np.array( [[ 29.59288768], [ -6.20269831], [-15.42223722], [ -0.24262282], [ -5.54918036]])
np.testing.assert_allclose(reg.u[0:5], u_5,RTOL)
np.testing.assert_allclose(reg.utu, 10554.41644336768,RTOL)
varb = np.array( [[ 1.48966377e+00, -2.28698061e-02, -1.20217386e-02, -1.85763498e-02],
[ -2.28698061e-02, 1.27893998e-03, 2.74600023e-04, -1.33497705e-04],
[ -1.20217386e-02, 2.74600023e-04, 1.54257766e-04, 6.86851184e-05],
[ -1.85763498e-02, -1.33497705e-04, 6.86851184e-05, 4.67711582e-04]])
np.testing.assert_allclose(reg.varb, varb,RTOL)
vm = np.array([[ 3.20867996e+02, -4.92607057e+00, -2.58943746e+00, -4.00127615e+00],
[ -4.92607057e+00, 2.75478880e-01, 5.91478163e-02, -2.87549056e-02],
[ -2.58943746e+00, 5.91478163e-02, 3.32265449e-02, 1.47945172e-02],
[ -4.00127615e+00, -2.87549056e-02, 1.47945172e-02, 1.00743323e-01]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
x_0 = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0], x_0,RTOL)
y_5 = np.array( [[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_allclose(reg.y[0:5], y_5,RTOL)
yend_5 = np.array( [[ 35.4585005 ], [ 46.67233467], [ 45.36475125], [ 32.81675025], [ 30.81785714]])
np.testing.assert_allclose(reg.yend[0:5], yend_5,RTOL)
z_0 = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_allclose(reg.z[0], z_0,RTOL)
zthhthi = np.array( [[ 1.00000000e+00, -2.22044605e-16, -2.22044605e-16 , 2.22044605e-16,
4.44089210e-16, 0.00000000e+00, -8.88178420e-16],
[ 0.00000000e+00, 1.00000000e+00, -3.55271368e-15 , 3.55271368e-15,
-7.10542736e-15, 7.10542736e-14, 0.00000000e+00],
[ 1.81898940e-12, 2.84217094e-14, 1.00000000e+00 , 0.00000000e+00,
-2.84217094e-14, 5.68434189e-14, 5.68434189e-14],
[ -8.31133940e+00, -3.76104678e-01, -2.07028208e-01 , 1.32618931e+00,
-8.04284562e-01, 1.30527047e+00, 1.39136816e+00]])
# np.testing.assert_allclose(reg.zthhthi, zthhthi, RTOL) WHYYYY
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
def test_init_white_(self):
w_lags = 2
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, w_lags, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
base_gm_lag = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, w_lags=w_lags, robust='white')
tbetas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_allclose(base_gm_lag.betas, tbetas)
dbetas = D.se_betas(base_gm_lag)
se_betas = np.array([ 20.47077481, 0.50613931, 0.20138425, 0.38028295 ])
np.testing.assert_allclose(dbetas, se_betas)
def test_init_hac_(self):
w_lags = 2
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, w_lags, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=15,function='triangular', fixed=False)
base_gm_lag = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, w_lags=w_lags, robust='hac', gwk=gwk)
tbetas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_allclose(base_gm_lag.betas, tbetas)
dbetas = D.se_betas(base_gm_lag)
se_betas = np.array([ 19.08513569, 0.51769543, 0.18244862, 0.35460553])
np.testing.assert_allclose(dbetas, se_betas)
def test_init_discbd(self):
w_lags = 2
X = np.array(self.db.by_col("INC"))
self.X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, yd, q, w_lags, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = BaseGM_Lag(self.y, self.X, w=self.w.sparse, yend=yd2, q=q2, w_lags=w_lags)
tbetas = np.array([[ 100.79359082], [ -0.50215501], [ -1.14881711], [ -0.38235022]])
np.testing.assert_allclose(tbetas, reg.betas)
dbetas = D.se_betas(reg)
se_betas = np.array([ 53.0829123 , 1.02511494, 0.57589064, 0.59891744 ])
np.testing.assert_allclose(dbetas, se_betas)
def test_n_k(self):
w_lags = 2
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, w_lags, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, w_lags=w_lags, sig2n_k=True)
betas = np. array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array( [[ 3.49389596e+02, -5.36394351e+00, -2.81960968e+00, -4.35694515e+00],
[ -5.36394351e+00, 2.99965892e-01, 6.44054000e-02, -3.13108972e-02],
[ -2.81960968e+00, 6.44054000e-02, 3.61800155e-02, 1.61095854e-02],
[ -4.35694515e+00, -3.13108972e-02, 1.61095854e-02, 1.09698285e-01]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
def test_lag_q(self):
w_lags = 2
X = np.array(self.db.by_col("INC"))
self.X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, yd, q, w_lags, False)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = BaseGM_Lag(self.y, self.X, w=self.w.sparse, yend=yd2, q=q2, w_lags=w_lags, lag_q=False)
tbetas = np.array( [[ 108.83261383], [ -0.48041099], [ -1.18950006], [ -0.56140186]])
np.testing.assert_allclose(tbetas, reg.betas)
dbetas = D.se_betas(reg)
se_betas = np.array([ 58.33203837, 1.09100446, 0.62315167, 0.68088777])
np.testing.assert_allclose(dbetas, se_betas)
class TestGMLag(unittest.TestCase):
def setUp(self):
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r')
y = np.array(self.db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
def test___init__(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
reg = GM_Lag(self.y, self.X, w=self.w, w_lags=2)
betas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
e_5 = np.array( [[ 29.28976367], [ -6.07439501], [-15.30080685], [ -0.41773375], [ -5.67197968]])
np.testing.assert_allclose(reg.e_pred[0:5], e_5,RTOL)
h_0 = np.array([ 1. , 19.531 , 15.72598 , 18.594 ,
24.7142675 , 13.72216667, 27.82929567])
np.testing.assert_allclose(reg.h[0], h_0)
hth = np. array([ 49. , 704.371999 , 1721.312371 , 724.7435916 ,
1707.35412945, 711.31248483, 1729.63201243])
np.testing.assert_allclose(reg.hth[0], hth,RTOL)
hthi = np.array([ 7.33701328e+00, 2.27764882e-02, 2.18153588e-02,
-5.11035447e-02, 1.22515181e-03, -2.38079378e-01,
-1.20149133e-01])
np.testing.assert_allclose(reg.hthi[0], hthi,RTOL)
self.assertEqual(reg.k, 4)
self.assertEqual(reg.kstar, 1)
np.testing.assert_allclose(reg.mean_y, 38.436224469387746,RTOL)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([ 80.5588479 , -1.06625281, -0.61703759, -1.10071931])
np.testing.assert_allclose(reg.pr2, 0.3551928222612527,RTOL)
np.testing.assert_allclose(reg.pr2_e, 0.34763857386174174,RTOL)
np.testing.assert_allclose(reg.pfora1a2[0], pfora1a2,RTOL)
predy_5 = np.array([[ 50.87411532],[ 50.76969931],[ 41.77223722],[ 33.44262382],[ 28.77418036]])
np.testing.assert_allclose(reg.predy[0:5], predy_5,RTOL)
predy_e_5 = np.array( [[ 51.17723933], [ 50.64139601], [ 41.65080685], [ 33.61773475], [ 28.89697968]])
np.testing.assert_allclose(reg.predy_e[0:5], predy_e_5,RTOL)
q_5 = np.array([ 18.594 , 24.7142675 , 13.72216667, 27.82929567])
np.testing.assert_allclose(reg.q[0], q_5)
self.assertEqual(reg.robust, 'unadjusted')
np.testing.assert_allclose(reg.sig2n_k, 234.54258763039289,RTOL)
np.testing.assert_allclose(reg.sig2n, 215.39625394627919,RTOL)
np.testing.assert_allclose(reg.sig2, 215.39625394627919,RTOL)
np.testing.assert_allclose(reg.std_y, 18.466069465206047,RTOL)
u_5 = np.array( [[ 29.59288768], [ -6.20269831], [-15.42223722], [ -0.24262282], [ -5.54918036]])
np.testing.assert_allclose(reg.u[0:5], u_5,RTOL)
np.testing.assert_allclose(reg.utu, 10554.41644336768,RTOL)
varb = np.array( [[ 1.48966377e+00, -2.28698061e-02, -1.20217386e-02, -1.85763498e-02],
[ -2.28698061e-02, 1.27893998e-03, 2.74600023e-04, -1.33497705e-04],
[ -1.20217386e-02, 2.74600023e-04, 1.54257766e-04, 6.86851184e-05],
[ -1.85763498e-02, -1.33497705e-04, 6.86851184e-05, 4.67711582e-04]])
np.testing.assert_allclose(reg.varb, varb,RTOL)
vm = np.array([[ 3.20867996e+02, -4.92607057e+00, -2.58943746e+00, -4.00127615e+00],
[ -4.92607057e+00, 2.75478880e-01, 5.91478163e-02, -2.87549056e-02],
[ -2.58943746e+00, 5.91478163e-02, 3.32265449e-02, 1.47945172e-02],
[ -4.00127615e+00, -2.87549056e-02, 1.47945172e-02, 1.00743323e-01]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
x_0 = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0], x_0,RTOL)
y_5 = np.array( [[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_allclose(reg.y[0:5], y_5,RTOL)
yend_5 = np.array( [[ 35.4585005 ], [ 46.67233467], [ 45.36475125], [ 32.81675025], [ 30.81785714]])
np.testing.assert_allclose(reg.yend[0:5], yend_5,RTOL)
z_0 = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_allclose(reg.z[0], z_0,RTOL)
zthhthi = np.array( [[ 1.00000000e+00, -2.22044605e-16, -2.22044605e-16 , 2.22044605e-16,
4.44089210e-16, 0.00000000e+00, -8.88178420e-16],
[ 0.00000000e+00, 1.00000000e+00, -3.55271368e-15 , 3.55271368e-15,
-7.10542736e-15, 7.10542736e-14, 0.00000000e+00],
[ 1.81898940e-12, 2.84217094e-14, 1.00000000e+00 , 0.00000000e+00,
-2.84217094e-14, 5.68434189e-14, 5.68434189e-14],
[ -8.31133940e+00, -3.76104678e-01, -2.07028208e-01 , 1.32618931e+00,
-8.04284562e-01, 1.30527047e+00, 1.39136816e+00]])
# np.testing.assert_allclose(reg.zthhthi, zthhthi RTOL) #another issue with rtol
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
def test_init_white_(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
base_gm_lag = GM_Lag(self.y, self.X, w=self.w, w_lags=2, robust='white')
tbetas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_allclose(base_gm_lag.betas, tbetas)
dbetas = D.se_betas(base_gm_lag)
se_betas = np.array([ 20.47077481, 0.50613931, 0.20138425, 0.38028295 ])
np.testing.assert_allclose(dbetas, se_betas)
def test_init_hac_(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=15,function='triangular', fixed=False)
base_gm_lag = GM_Lag(self.y, self.X, w=self.w, w_lags=2, robust='hac', gwk=gwk)
tbetas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_allclose(base_gm_lag.betas, tbetas)
dbetas = D.se_betas(base_gm_lag)
se_betas = np.array([ 19.08513569, 0.51769543, 0.18244862, 0.35460553])
np.testing.assert_allclose(dbetas, se_betas)
def test_init_discbd(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag(self.y, X, w=self.w, yend=yd, q=q, w_lags=2)
tbetas = np.array([[ 100.79359082], [ -0.50215501], [ -1.14881711], [ -0.38235022]])
np.testing.assert_allclose(tbetas, reg.betas)
dbetas = D.se_betas(reg)
se_betas = np.array([ 53.0829123 , 1.02511494, 0.57589064, 0.59891744 ])
np.testing.assert_allclose(dbetas, se_betas)
def test_n_k(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
reg = GM_Lag(self.y, self.X, w=self.w, w_lags=2, sig2n_k=True)
betas = np. array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array( [[ 3.49389596e+02, -5.36394351e+00, -2.81960968e+00, -4.35694515e+00],
[ -5.36394351e+00, 2.99965892e-01, 6.44054000e-02, -3.13108972e-02],
[ -2.81960968e+00, 6.44054000e-02, 3.61800155e-02, 1.61095854e-02],
[ -4.35694515e+00, -3.13108972e-02, 1.61095854e-02, 1.09698285e-01]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
def test_lag_q(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag(self.y, X, w=self.w, yend=yd, q=q, w_lags=2, lag_q=False)
tbetas = np.array( [[ 108.83261383], [ -0.48041099], [ -1.18950006], [ -0.56140186]])
np.testing.assert_allclose(tbetas, reg.betas)
dbetas = D.se_betas(reg)
se_betas = np.array([ 58.33203837, 1.09100446, 0.62315167, 0.68088777])
np.testing.assert_allclose(dbetas, se_betas)
def test_spatial(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
reg = GM_Lag(self.y, X, yd, q, spat_diag=True, w=w)
betas = np.array([[ 5.46344924e+01], [ 4.13301682e-01], [ -5.92637442e-01], [ -7.40490883e-03]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array( [[ 4.45202654e+02, -1.50290275e+01, -6.36557072e+00, -5.71403440e-03],
[ -1.50290275e+01, 5.93124683e-01, 2.19169508e-01, -6.70675916e-03],
[ -6.36557072e+00, 2.19169508e-01, 1.06577542e-01, -2.96533875e-03],
[ -5.71403440e-03, -6.70675916e-03, -2.96533875e-03, 1.15655425e-03]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
ak_test = np.array([ 2.52597326, 0.11198567])
np.testing.assert_allclose(reg.ak_test, ak_test,RTOL)
def test_names(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
name_x = ['inc']
name_y = 'crime'
name_yend = ['crime']
name_q = ['discbd']
name_w = 'queen'
name_gwk = 'k=5'
name_ds = 'columbus'
reg = GM_Lag(self.y, X, yd, q,
spat_diag=True, w=w, robust='hac', gwk=gwk,
name_x=name_x, name_y=name_y, name_q=name_q, name_w=name_w,
name_yend=name_yend, name_gwk=name_gwk, name_ds=name_ds)
betas = np.array([[ 5.46344924e+01], [ 4.13301682e-01], [ -5.92637442e-01], [ -7.40490883e-03]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array( [[ 5.70817052e+02, -1.83655385e+01, -8.36602575e+00, 2.37538877e-02],
[ -1.85224661e+01, 6.53311383e-01, 2.84209566e-01, -6.47694160e-03],
[ -8.31105622e+00, 2.78772694e-01, 1.38144928e-01, -3.98175246e-03],
[ 2.66662466e-02, -6.23783104e-03, -4.11092891e-03, 1.10936528e-03]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
self.assertListEqual(reg.name_x, ['CONSTANT']+name_x)
name_yend.append('W_crime')
self.assertListEqual(reg.name_yend, name_yend)
name_q.extend(['W_inc', 'W_discbd'])
self.assertListEqual(reg.name_q, name_q)
self.assertEqual(reg.name_y, name_y)
self.assertEqual(reg.name_w, name_w)
self.assertEqual(reg.name_gwk, name_gwk)
self.assertEqual(reg.name_ds, name_ds)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
import unittest
import warnings
import mock
from influxdb.influxdb08 import SeriesHelper, InfluxDBClient
from requests.exceptions import ConnectionError
class TestSeriesHelper(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestSeriesHelper, cls).setUpClass()
TestSeriesHelper.client = InfluxDBClient(
'host',
8086,
'username',
'password',
'database'
)
class MySeriesHelper(SeriesHelper):
class Meta:
client = TestSeriesHelper.client
series_name = 'events.stats.{server_name}'
fields = ['time', 'server_name']
bulk_size = 5
autocommit = True
TestSeriesHelper.MySeriesHelper = MySeriesHelper
def test_auto_commit(self):
"""
Tests that write_points is called after the right number of events
"""
class AutoCommitTest(SeriesHelper):
class Meta:
series_name = 'events.stats.{server_name}'
fields = ['time', 'server_name']
bulk_size = 5
client = InfluxDBClient()
autocommit = True
fake_write_points = mock.MagicMock()
AutoCommitTest(server_name='us.east-1', time=159)
AutoCommitTest._client.write_points = fake_write_points
AutoCommitTest(server_name='us.east-1', time=158)
AutoCommitTest(server_name='us.east-1', time=157)
AutoCommitTest(server_name='us.east-1', time=156)
self.assertFalse(fake_write_points.called)
AutoCommitTest(server_name='us.east-1', time=3443)
self.assertTrue(fake_write_points.called)
def testSingleSeriesName(self):
"""
Tests JSON conversion when there is only one series name.
"""
TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=159)
TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=158)
TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=157)
TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=156)
expectation = [{'points': [[159, 'us.east-1'],
[158, 'us.east-1'],
[157, 'us.east-1'],
[156, 'us.east-1']],
'name': 'events.stats.us.east-1',
'columns': ['time', 'server_name']}]
rcvd = TestSeriesHelper.MySeriesHelper._json_body_()
self.assertTrue(all([el in expectation for el in rcvd]) and
all([el in rcvd for el in expectation]),
'Invalid JSON body of time series returned from '
'_json_body_ for one series name: {}.'.format(rcvd))
TestSeriesHelper.MySeriesHelper._reset_()
self.assertEqual(
TestSeriesHelper.MySeriesHelper._json_body_(),
[],
'Resetting helper did not empty datapoints.')
def testSeveralSeriesNames(self):
'''
Tests JSON conversion when there is only one series name.
'''
TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=159)
TestSeriesHelper.MySeriesHelper(server_name='fr.paris-10', time=158)
TestSeriesHelper.MySeriesHelper(server_name='lu.lux', time=157)
TestSeriesHelper.MySeriesHelper(server_name='uk.london', time=156)
expectation = [{'points': [[157, 'lu.lux']],
'name': 'events.stats.lu.lux',
'columns': ['time', 'server_name']},
{'points': [[156, 'uk.london']],
'name': 'events.stats.uk.london',
'columns': ['time', 'server_name']},
{'points': [[158, 'fr.paris-10']],
'name': 'events.stats.fr.paris-10',
'columns': ['time', 'server_name']},
{'points': [[159, 'us.east-1']],
'name': 'events.stats.us.east-1',
'columns': ['time', 'server_name']}]
rcvd = TestSeriesHelper.MySeriesHelper._json_body_()
self.assertTrue(all([el in expectation for el in rcvd]) and
all([el in rcvd for el in expectation]),
'Invalid JSON body of time series returned from '
'_json_body_ for several series names: {}.'
.format(rcvd))
TestSeriesHelper.MySeriesHelper._reset_()
self.assertEqual(
TestSeriesHelper.MySeriesHelper._json_body_(),
[],
'Resetting helper did not empty datapoints.')
def testInvalidHelpers(self):
'''
Tests errors in invalid helpers.
'''
class MissingMeta(SeriesHelper):
pass
class MissingClient(SeriesHelper):
class Meta:
series_name = 'events.stats.{server_name}'
fields = ['time', 'server_name']
autocommit = True
class MissingSeriesName(SeriesHelper):
class Meta:
fields = ['time', 'server_name']
class MissingFields(SeriesHelper):
class Meta:
series_name = 'events.stats.{server_name}'
for cls in [MissingMeta, MissingClient, MissingFields,
MissingSeriesName]:
self.assertRaises(
AttributeError, cls, **{'time': 159,
'server_name': 'us.east-1'})
def testWarnBulkSizeZero(self):
"""
Tests warning for an invalid bulk size.
"""
class WarnBulkSizeZero(SeriesHelper):
class Meta:
client = TestSeriesHelper.client
series_name = 'events.stats.{server_name}'
fields = ['time', 'server_name']
bulk_size = 0
autocommit = True
with warnings.catch_warnings(record=True) as rec_warnings:
warnings.simplefilter("always")
# Server defined in the client is invalid, we're testing
# the warning only.
with self.assertRaises(ConnectionError):
WarnBulkSizeZero(time=159, server_name='us.east-1')
self.assertGreaterEqual(
len(rec_warnings), 1,
'{} call should have generated one warning.'
'Actual generated warnings: {}'.format(
WarnBulkSizeZero, '\n'.join(map(str, rec_warnings))))
expected_msg = (
'Definition of bulk_size in WarnBulkSizeZero forced to 1, '
'was less than 1.')
self.assertIn(expected_msg, list(w.message.args[0]
for w in rec_warnings),
'Warning message did not contain "forced to 1".')
def testWarnBulkSizeNoEffect(self):
"""
Tests warning for a set bulk size but autocommit False.
"""
class WarnBulkSizeNoEffect(SeriesHelper):
class Meta:
series_name = 'events.stats.{server_name}'
fields = ['time', 'server_name']
bulk_size = 5
autocommit = False
with warnings.catch_warnings(record=True) as rec_warnings:
warnings.simplefilter("always")
WarnBulkSizeNoEffect(time=159, server_name='us.east-1')
self.assertGreaterEqual(
len(rec_warnings), 1,
'{} call should have generated one warning.'
'Actual generated warnings: {}'.format(
WarnBulkSizeNoEffect, '\n'.join(map(str, rec_warnings))))
expected_msg = (
'Definition of bulk_size in WarnBulkSizeNoEffect has no affect '
'because autocommit is false.')
self.assertIn(expected_msg, list(w.message.args[0]
for w in rec_warnings),
'Warning message did not contain the expected_msg.')
if __name__ == '__main__':
unittest.main()
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbroute6(base_resource) :
""" Configuration for LB route6 resource. """
def __init__(self) :
self._network = ""
self._gatewayname = ""
self._td = 0
self._flags = ""
self.___count = 0
@property
def network(self) :
"""The destination network.
"""
try :
return self._network
except Exception as e:
raise e
@network.setter
def network(self, network) :
"""The destination network.
"""
try :
self._network = network
except Exception as e:
raise e
@property
def gatewayname(self) :
"""The name of the route.<br/>Minimum length = 1.
"""
try :
return self._gatewayname
except Exception as e:
raise e
@gatewayname.setter
def gatewayname(self, gatewayname) :
"""The name of the route.<br/>Minimum length = 1
"""
try :
self._gatewayname = gatewayname
except Exception as e:
raise e
@property
def td(self) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Default value: 0<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Default value: 0<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def flags(self) :
"""State of the configured gateway.<br/>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED.
"""
try :
return self._flags
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbroute6_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbroute6
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.network) :
return str(self.network)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add lbroute6.
"""
try :
if type(resource) is not list :
addresource = lbroute6()
addresource.network = resource.network
addresource.gatewayname = resource.gatewayname
addresource.td = resource.td
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ lbroute6() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].network = resource[i].network
addresources[i].gatewayname = resource[i].gatewayname
addresources[i].td = resource[i].td
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete lbroute6.
"""
try :
if type(resource) is not list :
deleteresource = lbroute6()
if type(resource) != type(deleteresource):
deleteresource.network = resource
else :
deleteresource.network = resource.network
deleteresource.td = resource.td
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ lbroute6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].network = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ lbroute6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].network = resource[i].network
deleteresources[i].td = resource[i].td
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the lbroute6 resources that are configured on netscaler.
"""
try :
if not name :
obj = lbroute6()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [lbroute6() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of lbroute6 resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbroute6()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the lbroute6 resources configured on NetScaler.
"""
try :
obj = lbroute6()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of lbroute6 resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbroute6()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Flags:
UP = "UP"
DOWN = "DOWN"
UNKNOWN = "UNKNOWN"
BUSY = "BUSY"
OUT_OF_SERVICE = "OUT OF SERVICE"
GOING_OUT_OF_SERVICE = "GOING OUT OF SERVICE"
DOWN_WHEN_GOING_OUT_OF_SERVICE = "DOWN WHEN GOING OUT OF SERVICE"
NS_EMPTY_STR = "NS_EMPTY_STR"
Unknown = "Unknown"
DISABLED = "DISABLED"
class lbroute6_response(base_response) :
def __init__(self, length=1) :
self.lbroute6 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbroute6 = [lbroute6() for _ in range(length)]
|
|
#!/usr/bin/env python
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5_cccl import bigip
from f5.bigip import ManagementRoot
import json
from mock import Mock, patch
import pytest
class MockNode():
"""A mock BIG-IP node."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the node object."""
pass
def delete(self):
"""Delete the node object."""
pass
def load(self, name=None, partition=None):
"""Load the node object."""
return MockNode(name)
class Pool():
"""A mock BIG-IP Pool."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the pool object."""
pass
def delete(self):
"""Delete the pool object."""
pass
def load(self, name=None, partition=None):
"""Load the pool object."""
return Pool(name)
class Policy():
"""A mock BIG-IP Policy."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the policy object."""
pass
def delete(self):
"""Delete the policy object."""
pass
def load(self, name=None, partition=None):
"""Load the policy object."""
return Policy(name)
class IRule():
"""A mock BIG-IP iRule."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the iRule object."""
pass
def delete(self):
"""Delete the iRule object."""
pass
def load(self, name=None, partition=None):
"""Load the iRule object."""
return IRule(name)
class VirtualAddress():
"""A mock BIG-IP VirtualAddress."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the virtual address object."""
pass
def delete(self):
"""Delete the virtual address object."""
pass
def load(self, name=None, partition=None):
"""Load the virtual address object."""
return VirtualAddress(name)
class Member():
"""A mock BIG-IP Pool Member."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
self.session = kwargs.get('session', None)
if kwargs.get('state', None) == 'user-up':
self.state = 'up'
else:
self.state = 'user-down'
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
class Profiles():
"""A container of Virtual Server Profiles."""
def __init__(self, **kwargs):
"""Initialize the object."""
self.profiles = kwargs.get('profiles', [])
def exists(self, name, partition):
"""Check for the existance of a profile."""
for p in self.profiles:
if p['name'] == name and p['partition'] == partition:
return True
return False
def create(self, name, partition):
"""Placeholder: This will be mocked."""
pass
class ProfileSet():
"""A set of Virtual Server Profiles."""
def __init__(self, **kwargs):
"""Initialize the object."""
self.profiles = Profiles(**kwargs)
class Policies():
"""A container of Virtual Server Policies."""
def __init__(self, **kwargs):
"""Initialize the object."""
self.policies = kwargs.get('policies', [])
def exists(self, name, partition):
"""Check for the existance of a policy."""
for p in self.policies:
if p['name'] == name and p['partition'] == partition:
return True
return False
def create(self, name, partition):
"""Placeholder: This will be mocked."""
pass
class PolicySet():
"""A set of Virtual Server Policies."""
def __init__(self, **kwargs):
"""Initialize the object."""
self.policies = Policies(**kwargs)
class Virtual():
"""A mock BIG-IP Virtual Server."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.profiles_s = ProfileSet(**kwargs)
self.policies_s = PolicySet(**kwargs)
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, name=None, partition=None, **kwargs):
"""Create the virtual object."""
pass
def delete(self):
"""Delete the virtual object."""
pass
def load(self, name=None, partition=None):
"""Load the virtual object."""
return Virtual(name)
class HealthCheck():
"""A mock BIG-IP Health Monitor."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
self.interval = kwargs.get('interval', None)
self.timeout = kwargs.get('timeout', None)
self.send = kwargs.get('send', None)
self.partition = kwargs.get('partition', None)
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def delete(self):
"""Delete the healthcheck object."""
pass
class Arp():
"""A mock BIG-IP Arp entry."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, name=None, partition=None, **kwargs):
"""Create the ARP object."""
pass
def delete(self):
"""Delete the ARP object."""
pass
def load(self, name=None, partition=None):
"""Load the ARP object."""
return Arp(name)
class FDBTunnel():
"""A mock BIG-IP FDB tunnel entry."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, name=None, partition=None, **kwargs):
"""Create the FDB tunnel object."""
pass
def delete(self):
"""Delete the FDB tunnel object."""
pass
def load(self, name=None, partition=None):
"""Load the FDB tunnel object."""
return FDBTunnel(name)
class Partition():
"""A mock BIG-IP Partition."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
if kwargs.get('default-route-domain') is not None:
self.defaultRouteDomain = kwargs.get('default-route-domain')
else:
self.defaultRouteDomain = 0
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, name=None, **kwargs):
"""Create the partition object."""
pass
def delete(self):
"""Delete the partition object."""
pass
def load(self, name=None):
"""Load the partition object."""
return Partition(name)
class MockPartitions():
"""A mock Auth partitions object."""
def __init__(self):
"""Initialize the object."""
self.partition = Partition('test')
def get_collection(self):
"""Get collection of partitions."""
pass
class MockService():
"""A mock Services service object."""
def __init__(self):
"""Initialize the object."""
pass
def load(self, name, partition):
"""Load a mock iapp."""
return MockService()
def create(self, name=None, template=None, partition=None, variables=None,
tables=None, trafficGroup=None, description=None):
"""Create a mock iapp."""
pass
def update(self, **properties):
"""Update a mock iapp."""
pass
def delete(self):
"""Delete the iapp object."""
pass
class MockServices():
"""A mock Application services object."""
def __init__(self):
"""Initialize the object."""
self.service = MockService()
def get_collection(self):
"""Get collection of iapps."""
return []
class MockApplication():
"""A mock Sys application object."""
def __init__(self):
"""Initialize the object."""
self.services = MockServices()
class MockFolders():
"""A mock Sys folders object."""
def __init__(self):
"""Initialize the object."""
def get_collection(self):
"""Get collection of partitions."""
pass
class MockSys():
"""A mock BIG-IP sys object."""
def __init__(self):
"""Initialize the object."""
self.application = MockApplication()
self.folders = MockFolders()
class Iapp():
"""A mock BIG-IP iapp object."""
def __init__(self, name=None, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def delete(self):
"""Mock delete method."""
pass
def update(self, executeAction=None, name=None, partition=None,
variables=None, tables=None, **kwargs):
"""Mock update method."""
pass
class InternalDataGroup():
"""A mock BIG-IP data_group internal."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
#self.partition = partition
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the iRule object."""
pass
def delete(self):
"""Delete the iRule object."""
pass
def load(self, name=None, partition=None):
"""Load the iRule object."""
return InternalDataGroup(name, partition)
class MockFolder():
"""A mock BIG-IP folder object."""
def __init__(self, name):
"""Initialize the object."""
self.name = name
class MockHttp():
"""A mock Http http object."""
def __init__(self, name=None, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def create(self, partition=None, **kwargs):
"""Create a http healthcheck object."""
pass
def delete(self):
"""Delete the monitor object."""
pass
def load(self, name=None, partition=None):
"""Load an http healthcheck object."""
return MockHttp(name)
class MockHttps():
"""A mock Monitor https object."""
def __init__(self):
"""Initialize the object."""
self.http = MockHttp()
def get_collection(self):
"""Get collection of http healthchecks."""
return []
class MockTcp():
"""A mock Tcps tcp object."""
def __init__(self):
"""Initialize the object."""
pass
def create(self, partition=None, **kwargs):
"""Create a tcp healthcheck object."""
pass
def delete(self):
"""Delete the monitor object."""
pass
def load(self, name=None, partition=None):
"""Load a tcp healthcheck object."""
return MockTcp()
class MockTcps():
"""A mock Monitor tcps object."""
def __init__(self):
"""Initialize the object."""
self.tcp = MockTcp()
def get_collection(self):
"""Get collection of tcp healthchecks."""
return []
class MockUdp():
"""A mock Udps udp object."""
def __init__(self):
"""Initialize the object."""
pass
def create(self, partition=None, **kwargs):
"""Create a udp healthcheck object."""
pass
def delete(self):
"""Delete the monitor object."""
pass
def load(self, name=None, partition=None):
"""Load a udp healthcheck object."""
return MockUdp()
class MockUdps():
"""A mock Monitor udps object."""
def __init__(self):
"""Initialize the object."""
self.udp = MockUdp()
def get_collection(self):
"""Get collection of udp healthchecks."""
return []
class MockIcmp():
"""A mock Icmps tcp object."""
def __init__(self):
"""Initialize the object."""
pass
def create(self, partition=None, **kwargs):
"""Create a tcp healthcheck object."""
pass
def delete(self):
"""Delete the monitor object."""
pass
def load(self, name=None, partition=None):
"""Load a tcp healthcheck object."""
return MockIcmp()
class MockIcmps():
"""A mock Monitor tcps object."""
def __init__(self):
"""Initialize the object."""
self.gateway_icmp = MockIcmp()
def get_collection(self):
"""Get collection of tcp healthchecks."""
return []
class MockHttpS():
"""A mock Icmps tcp object."""
def __init__(self):
"""Initialize the object."""
pass
def create(self, partition=None, **kwargs):
"""Create a tcp healthcheck object."""
pass
def delete(self):
"""Delete the monitor object."""
pass
def load(self, name=None, partition=None):
"""Load a tcp healthcheck object."""
return MockHttpS()
class MockHttpSs():
"""A mock Monitor tcps object."""
def __init__(self):
"""Initialize the object."""
self.https = MockHttpS()
def get_collection(self):
"""Get collection of tcp healthchecks."""
pass
class MockMonitor():
"""A mock Ltm monitor object."""
def __init__(self):
"""Initialize the object."""
self.https = MockHttps()
self.tcps = MockTcps()
self.udps = MockUdps()
self.https_s = MockHttpSs()
self.gateway_icmps = MockIcmps()
class MockVirtuals():
"""A mock Ltm virtuals object."""
def __init__(self):
"""Initialize the object."""
self.virtual = Virtual('test')
def get_collection(self):
"""Get collection of virtuals."""
pass
class MockVirtualAddresses():
"""A mock Ltm virtual address object."""
def __init__(self):
"""Initialize the object."""
self.virtual_address = VirtualAddress('test')
def get_collection(self):
"""Get collection of virtual addresses."""
return []
class MockPools():
"""A mock Ltm pools object."""
def __init__(self):
"""Initialize the object."""
self.pool = Pool('test')
def get_collection(self):
"""Get collection of pools."""
pass
class MockPolicys():
"""A mock Ltm policy object."""
def __init__(self):
"""Initialize the object."""
self.policy = Policy('test')
def get_collection(self):
"""Get collection of policies."""
pass
class MockIRules():
"""A mock Ltm iRules object."""
def __init__(self):
"""Initialize the object."""
self.rule = IRule('test')
def get_collection(self):
"""Get collection of iRules."""
pass
class MockNodes():
"""A mock Ltm nodes object."""
def __init__(self):
"""Initialize the object."""
self.node = MockNode('test')
def get_collection(self):
"""Get collection of nodes."""
pass
class MockDataGroupInternals():
"""A mock Ltm data-group internals object."""
def __init__(self):
"""Initialize the object."""
self.internal = MockDataGroupInternal()
pass
class MockDataGroupInternal():
"""A mock Ltm data-group internal object."""
def __init__(self):
"""Initialize the object."""
pass
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the object."""
pass
def delete(self):
"""Delete the object."""
pass
def load(self, name=None, partition=None):
"""Load the object."""
return InternalDataGroup(name)
class MockDataGroup():
"""A mock Ltm data_group object."""
def __init__(self):
"""Initialize the object."""
self.internals = MockDataGroupInternals()
class MockAuth():
"""A mock BIG-IP auth object."""
def __init__(self):
"""Initialize the object."""
self.partitions = MockPartitions()
class MockArps():
"""A mock net ARP object."""
def __init__(self):
"""Initialize the object."""
self.arp = Arp('test')
def get_collection(self):
"""Get collection of ARPS."""
pass
class MockFDB():
"""A Mock net FDB object."""
def __init__(self):
"""Initialize the object."""
self.tunnels = MockFDBTunnels()
class MockFDBTunnels():
"""A mock net FDB tunnel object."""
def __init__(self):
"""Initialize the object."""
self.tunnel = FDBTunnel('test')
def get_collection(self):
"""Get collection of FDB tunnels."""
pass
class MockLtm():
"""A mock BIG-IP ltm object."""
def __init__(self):
"""Initialize the object."""
self.monitor = MockMonitor()
self.virtuals = MockVirtuals()
self.pools = MockPools()
self.nodes = MockNodes()
self.policys = MockPolicys()
self.rules = MockIRules()
self.virtual_address_s = MockVirtualAddresses()
self.data_group = MockDataGroup()
class MockNet():
"""A mock BIG-IP net object."""
def __init__(self):
"""Initialize the object."""
self.arps = MockArps()
self.fdb = MockFDB()
class MockTm():
def __init__(self):
self.ltm = MockLtm()
self.net = MockNet()
self.auth = MockAuth()
self.sys = MockSys()
class MockHealthMonitor():
"""A mock BIG-IP healthmonitor object."""
def __init__(self, name, partition):
"""Initialize the object."""
self.name = name
self.partition = partition
class MockBigIP(ManagementRoot):
"""BIG-IP configuration tests.
Test BIG-IP configuration given various cloud states and existing
BIG-IP states
"""
def partition_from_params(self, params):
"""Extract partition name from the request params"""
return params.split("partition+eq+")[1].split("&expand")[0]
def create_mock_pool(self, name, **kwargs):
"""Create a mock pool server object."""
pool = Pool(name, **kwargs)
self.pools[name] = pool
pool.modify = Mock()
return pool
def create_mock_pool_member(self, name, **kwargs):
"""Create a mock pool member object."""
member = Member(name, **kwargs)
self.members[name] = member
member.modify = Mock()
return member
def mock_virtuals_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of virtuals."""
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_data['virtuals']
virtuals = [
Virtual(**r)
for r in resources if partition == r['partition']
]
return virtuals
def mock_pools_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of pools."""
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_data['pools']
pools = [
Pool(**r)
for r in resources if partition == r['partition']
]
return pools
def mock_policys_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of policies."""
partition = self.partition_from_params(requests_params['params'])
policies = [
Policy(**r)
for r in self.bigip_data['policies'] if partition == r['partition']
]
return policies
def mock_irules_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of iRules."""
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_data['rules']
irules = [
IRule(**r)
for r in resources if partition == r['partition']
]
return irules
def mock_iapps_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of app svcs."""
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_data['iapps']
iapps = [
Iapp(**r)
for r in resources if partition == r['partition']
]
return iapps
def mock_monitors_get_collection(self, requests_params=None):
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_data['monitors']
monitors = [
MockHttp(**r)
for r in resources if partition == r['partition']
]
return monitors
def mock_nodes_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of nodes."""
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_data['nodes']
nodes = [
MockNode(**r)
for r in resources if partition == r['partition']
]
return nodes
def mock_vas_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of virtual addresses."""
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_data['virtual_addresses']
vas = [
MockVirtualAddresses(**r)
for r in resources if partition == r['partition']
]
return vas
def mock_data_group_internals_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of data_group internal."""
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_data['internaldatagroups']
int_dgs = [
InternalDataGroup(**r)
for r in resources if partition == r['partition']
]
return int_dgs
def mock_arps_get_collection(self, requests_params=None):
"""Mock: Return a mocked collection of arps."""
partition = self.partition_from_params(requests_params['params'])
resources = self.bigip_net_data['arps']
arps = [
Arp(**r)
for r in resources if partition == r['partition']
]
return arps
def mock_fdb_tunnels_get_collection(self):
"""Mock: Return a mocked collection of arps."""
resources = self.bigip_net_data['fdbTunnels']
tunnels = [
FDBTunnel(**r)
for r in resources
]
return tunnels
def read_test_data(self, bigip_ltm_state, bigip_net_state):
"""Read test data for the Big-IP state."""
# Read the BIG-IP state
with open(bigip_ltm_state) as json_data:
self.bigip_data = json.load(json_data)
with open(bigip_net_state) as json_data:
self.bigip_net_data = json.load(json_data)
@pytest.fixture
def bigip_proxy():
with patch.object(ManagementRoot, '_get_tmos_version'):
mgmt_root = MockBigIP('1.2.3.4', 'admin', 'admin')
mgmt_root.tm = MockTm()
mgmt_root.tm.ltm.pools.get_collection = \
Mock(side_effect=mgmt_root.mock_pools_get_collection)
mgmt_root.tm.ltm.policys.get_collection = \
Mock(side_effect=mgmt_root.mock_policys_get_collection)
mgmt_root.tm.ltm.rules.get_collection = \
Mock(side_effect=mgmt_root.mock_irules_get_collection)
mgmt_root.tm.ltm.virtuals.get_collection = \
Mock(side_effect=mgmt_root.mock_virtuals_get_collection)
mgmt_root.tm.ltm.monitor.https.get_collection = \
Mock(side_effect=mgmt_root.mock_monitors_get_collection)
mgmt_root.tm.ltm.monitor.https_s.get_collection = \
Mock(side_effect=mgmt_root.mock_monitors_get_collection)
mgmt_root.tm.ltm.monitor.tcps.get_collection = \
Mock(side_effect=mgmt_root.mock_monitors_get_collection)
mgmt_root.tm.ltm.monitor.udps.get_collection = \
Mock(side_effect=mgmt_root.mock_monitors_get_collection)
mgmt_root.tm.ltm.monitor.gateway_icmps.get_collection = \
Mock(side_effect=mgmt_root.mock_monitors_get_collection)
mgmt_root.tm.sys.application.services.get_collection = \
Mock(side_effect=mgmt_root.mock_iapps_get_collection)
mgmt_root.tm.ltm.nodes.get_collection = \
Mock(side_effect=mgmt_root.mock_nodes_get_collection)
mgmt_root.tm.ltm.virtual_address_s.get_collection = \
Mock(side_effect=mgmt_root.mock_vas_get_collection)
mgmt_root.tm.ltm.data_group.internals.get_collection = \
Mock(side_effect=mgmt_root.mock_data_group_internals_get_collection)
mgmt_root.tm.net.arps.get_collection = \
Mock(side_effect=mgmt_root.mock_arps_get_collection)
mgmt_root.tm.net.fdb.tunnels.get_collection = \
Mock(side_effect=mgmt_root.mock_fdb_tunnels_get_collection)
bigip_ltm_state='f5_cccl/test/bigip_data.json'
bigip_net_state='f5_cccl/test/bigip_net_data.json'
mgmt_root.read_test_data(bigip_ltm_state, bigip_net_state)
bigip_proxy = bigip.BigIPProxy(mgmt_root, 'test')
return bigip_proxy
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_split_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class StringSplitOpTest(test.TestCase):
def testStringSplit(self):
strings = ["pigs on the wing", "animals"]
with self.cached_session():
tokens = string_ops.string_split(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]])
self.assertAllEqual(values, [b"pigs", b"on", b"the", b"wing", b"animals"])
self.assertAllEqual(shape, [2, 4])
@test_util.run_deprecated_v1
def testStringSplitEmptyDelimiter(self):
strings = ["hello", "hola", b"\xF0\x9F\x98\x8E"] # Last string is U+1F60E
with self.cached_session():
tokens = string_ops.string_split(strings, delimiter="")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3], [2, 0],
[2, 1], [2, 2], [2, 3]])
expected = np.array(
[
"h", "e", "l", "l", "o", "h", "o", "l", "a", b"\xf0", b"\x9f",
b"\x98", b"\x8e"
],
dtype="|S1")
self.assertAllEqual(values.tolist(), expected)
self.assertAllEqual(shape, [3, 5])
def testStringSplitEmptyToken(self):
strings = ["", " a", "b ", " c", " ", " d ", " e", "f ", " g ", " "]
with self.cached_session():
tokens = string_ops.string_split(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices,
[[1, 0], [2, 0], [3, 0], [5, 0], [6, 0], [7, 0], [8, 0]])
self.assertAllEqual(values, [b"a", b"b", b"c", b"d", b"e", b"f", b"g"])
self.assertAllEqual(shape, [10, 1])
def testStringSplitOnSetEmptyToken(self):
strings = ["", " a", "b ", " c", " ", " d ", ". e", "f .", " .g. ", " ."]
with self.cached_session():
tokens = string_ops.string_split(strings, delimiter=" .")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices,
[[1, 0], [2, 0], [3, 0], [5, 0], [6, 0], [7, 0], [8, 0]])
self.assertAllEqual(values, [b"a", b"b", b"c", b"d", b"e", b"f", b"g"])
self.assertAllEqual(shape, [10, 1])
@test_util.run_deprecated_v1
def testStringSplitWithDelimiter(self):
strings = ["hello|world", "hello world"]
with self.cached_session():
self.assertRaises(
ValueError, string_ops.string_split, strings, delimiter=["|", ""])
self.assertRaises(
ValueError, string_ops.string_split, strings, delimiter=["a"])
tokens = string_ops.string_split(strings, delimiter="|")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
tokens = string_ops.string_split(strings, delimiter="| ")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(values, [b"hello", b"world", b"hello", b"world"])
self.assertAllEqual(shape, [2, 2])
@test_util.run_deprecated_v1
def testStringSplitWithDelimiterTensor(self):
strings = ["hello|world", "hello world"]
with self.cached_session() as sess:
delimiter = array_ops.placeholder(dtypes.string)
tokens = string_ops.string_split(strings, delimiter=delimiter)
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: "|"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
@test_util.run_deprecated_v1
def testStringSplitWithDelimitersTensor(self):
strings = ["hello.cruel,world", "hello cruel world"]
with self.cached_session() as sess:
delimiter = array_ops.placeholder(dtypes.string)
tokens = string_ops.string_split(strings, delimiter=delimiter)
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: ".,"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [1, 0]])
self.assertAllEqual(values,
[b"hello", b"cruel", b"world", b"hello cruel world"])
self.assertAllEqual(shape, [2, 3])
def testStringSplitWithNoSkipEmpty(self):
strings = ["#a", "b#", "#c#"]
with self.cached_session():
tokens = string_ops.string_split(strings, "#", skip_empty=False)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1],
[1, 0], [1, 1],
[2, 0], [2, 1], [2, 2]])
self.assertAllEqual(values, [b"", b"a", b"b", b"", b"", b"c", b""])
self.assertAllEqual(shape, [3, 3])
with self.cached_session():
tokens = string_ops.string_split(strings, "#")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(values, [b"a", b"b", b"c"])
self.assertAllEqual(indices, [[0, 0], [1, 0], [2, 0]])
self.assertAllEqual(shape, [3, 1])
class StringSplitV2OpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.named_parameters([
{"testcase_name": "Simple",
"input": [b"pigs on the wing", b"animals"],
"expected": [[b"pigs", b"on", b"the", b"wing"], [b"animals"]]},
{"testcase_name": "MultiCharSeparator",
"input": [b"1<>2<>3", b"<><>4<>5<><>6<>"],
"sep": b"<>",
"expected": [[b"1", b"2", b"3"],
[b"", b"", b"4", b"5", b"", b"6", b""]]},
{"testcase_name": "SimpleSeparator",
"input": [b"1,2,3", b"4,5,,6,"],
"sep": b",",
"expected": [[b"1", b"2", b"3"], [b"4", b"5", b"", b"6", b""]]},
{"testcase_name": "EmptySeparator",
"input": [b"1 2 3", b" 4 5 6 "],
"expected": [[b"1", b"2", b"3"], [b"4", b"5", b"6"]]},
{"testcase_name": "EmptySeparatorEmptyInputString",
"input": [b""],
"expected": [[]]},
{"testcase_name": "EmptyInputVector",
"input": [],
"expected": []},
{"testcase_name": "SimpleSeparatorMaxSplit",
"input": [b"1,2,3", b"4,5,,6,"],
"sep": b",",
"maxsplit": 1,
"expected": [[b"1", b"2,3"], [b"4", b"5,,6,"]]},
{"testcase_name": "EmptySeparatorMaxSplit",
"input": [b"1 2 3", b" 4 5 6 "],
"maxsplit": 1,
"expected": [[b"1", b"2 3"], [b"4", b"5 6 "]]},
{"testcase_name": "ScalarInput",
"input": b"1,2,3",
"sep": b",",
"expected": [b"1", b"2", b"3"]},
{"testcase_name": "Dense2DInput",
"input": [[b"1,2,3", b"4"], [b"5,6", b"7,8,9"]],
"sep": b",",
"expected": [[[b"1", b"2", b"3"], [b"4"]],
[[b"5", b"6"], [b"7", b"8", b"9"]]]},
{"testcase_name": "Ragged2DInput",
"input": [[b"1,2,3", b"4"], [b"5,6"]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]]},
{"testcase_name": "Ragged3DInput",
"input": [[[b"1,2,3", b"4"], [b"5,6"]], [[b"7,8,9"]]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]]},
{"testcase_name": "Ragged4DInput",
"input": [[[[b"1,2,3", b"4"], [b"5,6"]], [[b"7,8,9"]]], [[[b""]]]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]], [[[[b""]]]]]},
{"testcase_name": "Ragged4DInputEmptySeparator",
"input": [[[[b"1 2 3", b"4"], [b"5 6"]], [[b"7 8 9"]]], [[[b""]]]],
"input_is_ragged": True,
"expected": [[[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]], [[[[]]]]]},
]) # pyformat: disable
def testSplitV2(self,
input,
expected,
input_is_ragged=False,
**kwargs): # pylint: disable=redefined-builtin
# Check that we are matching the behavior of Python's str.split:
self.assertEqual(expected, self._py_split(input, **kwargs))
# Prepare the input tensor.
if input_is_ragged:
input = ragged_factory_ops.constant(input, dtype=dtypes.string)
else:
input = constant_op.constant(input, dtype=dtypes.string)
# Check that the public version (which returns a RaggedTensor) works
# correctly.
expected_ragged = ragged_factory_ops.constant(
expected, ragged_rank=input.shape.ndims)
actual_ragged_v1 = ragged_string_ops.strings_split_v1(
input, result_type="RaggedTensor", **kwargs)
actual_ragged_v1_input_kwarg = ragged_string_ops.strings_split_v1(
input=input, result_type="RaggedTensor", **kwargs)
actual_ragged_v1_source_kwarg = ragged_string_ops.strings_split_v1(
source=input, result_type="RaggedTensor", **kwargs)
actual_ragged_v2 = ragged_string_ops.string_split_v2(input, **kwargs)
actual_ragged_v2_input_kwarg = ragged_string_ops.string_split_v2(
input=input, **kwargs)
self.assertRaggedEqual(expected_ragged, actual_ragged_v1)
self.assertRaggedEqual(expected_ragged, actual_ragged_v1_input_kwarg)
self.assertRaggedEqual(expected_ragged, actual_ragged_v1_source_kwarg)
self.assertRaggedEqual(expected_ragged, actual_ragged_v2)
self.assertRaggedEqual(expected_ragged, actual_ragged_v2_input_kwarg)
# Check that the internal version (which returns a SparseTensor) works
# correctly. Note: the internal version oly supports vector inputs.
if input.shape.ndims == 1:
expected_sparse = self.evaluate(expected_ragged.to_sparse())
actual_sparse_v1 = ragged_string_ops.strings_split_v1(
input, result_type="SparseTensor", **kwargs)
actual_sparse_v2 = string_ops.string_split_v2(input, **kwargs)
for actual_sparse in [actual_sparse_v1, actual_sparse_v2]:
self.assertEqual(expected_sparse.indices.tolist(),
self.evaluate(actual_sparse.indices).tolist())
self.assertEqual(expected_sparse.values.tolist(),
self.evaluate(actual_sparse.values).tolist())
self.assertEqual(expected_sparse.dense_shape.tolist(),
self.evaluate(actual_sparse.dense_shape).tolist())
def _py_split(self, strings, **kwargs):
if isinstance(strings, compat.bytes_or_text_types):
# Note: str.split doesn't accept keyword args.
if "maxsplit" in kwargs:
return strings.split(kwargs.get("sep", None), kwargs["maxsplit"])
else:
return strings.split(kwargs.get("sep", None))
else:
return [self._py_split(s, **kwargs) for s in strings]
if __name__ == "__main__":
test.main()
|
|
import pytest
import crow2.test.setup # pylint: disable = W0611
from crow2.test.util import Counter
from crow2.events.hook import Hook, CancellableHook
from crow2.events import exceptions
def pytest_generate_tests(metafunc):
"""
Parameterize tests with different targets
"""
if metafunc.cls is None:
return
try:
test_targets = metafunc.cls.targets
except AttributeError: # pragma: no cover
return # it's okay to be target-specific
target_name = getattr(metafunc.cls, "target_name", "target")
metafunc.parametrize(target_name, test_targets)
class TestSimpleHook(object):
targets = [Hook, CancellableHook]
def test_simple(self, target):
"""
Test that simple use of the Hook class works
"""
counter = Counter()
hook = target()
@hook
def testfunc(event):
"call check"
counter.tick()
hook.fire()
assert counter.count == 1
def test_error_checking(self, target):
hook = target()
with pytest.raises(exceptions.DuplicateRegistrationError):
@hook
@hook
def stub():
"registering to the same hook twice doesn't work"
should_never_run()
def test_calldicts(self, target):
hook = target()
counter = Counter()
@hook
def check(event):
assert event.foo
assert event.bar
event.baz = True
originalcontext = {"foo": True}
context = dict(originalcontext)
result = hook.fire(context, bar=True)
assert result.foo
assert result.bar
assert result.baz
assert context == originalcontext
class GetNameTarget(object):
pass
class TestOrderedHook(object):
targets = [Hook, CancellableHook]
def test_simple_dependency(self, target):
"""
Test that simple three-in-a-row dependencies work
"""
counter = Counter()
hook = target()
@hook
def second(event):
"handler with no dependencies"
assert event.first_was_called
event.second_was_called = True
assert counter.count == 1
counter.tick()
@hook(before=second)
def first(event):
"handler which reverse-depends only on the second handler"
assert counter.count == 0
counter.tick()
event.first_was_called = True
@hook(after=second)
def third(event):
"handler which depends on the second handler"
assert counter.count == 2
counter.tick()
assert event.first_was_called
assert event.second_was_called
hook.fire()
assert counter.count == 3
def test_unorderable_dependencies(self, target):
hook = target()
@hook
def firstfunc(event):
"a target function"
should_never_run()
with pytest.raises(exceptions.InvalidOrderRequirementsError):
@hook(tag="first", after=firstfunc)
def stub():
"function with nonsense order requirements"
should_never_run()
def test_missing_dependencies(self, target):
hook = target()
@hook(after="dependency missing")
def stub():
"handler which depends on something which doesn't exist"
should_never_run()
with pytest.raises(exceptions.DependencyMissingError):
hook.fire()
def test_tags(self, target):
counter = Counter()
hook = target(["early", "normal", "late"])
@hook(tag="normal")
def func_normal(event):
assert event.first_was_called
event.second_was_called = True
assert counter.count == 1
counter.tick()
@hook(tag="early")
def func_early(event):
assert counter.count == 0
counter.tick()
event.first_was_called = True
@hook(tag="late")
def func_late(event):
assert counter.count == 2
counter.tick()
assert event.first_was_called
assert event.second_was_called
assert event.somewhere_was_called
@hook(before=":late", after="early")
def func_somewhere(event):
assert event.first_was_called
event.somewhere_was_called = True
assert counter.count > 0
hook.fire()
assert counter.count == 3
def test_once(self, target):
hook = target(["tag", "tag2"])
counter = Counter()
def callonce(event):
counter.tick()
assert counter.count == 1
hook.register_once(callonce, tag="tag")
def callsecond(event):
counter.tick()
assert counter.count == 2
hook.register_once(callsecond, tag="tag2")
def forgetme(event):
pass # tests tag garbage collection
hook.register_once(forgetme, tag="temporary_tag")
hook.fire()
assert counter.incremented(2)
hook.fire()
assert counter.incremented(0)
def tag_stub(event):
should_never_run()
hook.register_once(tag_stub, tag="tag")
def tag2_stub(event):
should_never_run()
hook.register_once(tag2_stub, tag="tag2")
def impossible_link(event):
should_never_run()
hook.register_once(impossible_link, before="tag", after="tag2")
# if this fails, the tags were lost when there was nothing
# pointed to them
with pytest.raises(exceptions.CyclicDependencyError):
hook.fire()
with pytest.raises(exceptions.NotRegisteredError):
hook.unregister(callonce)
def test_dependency_lookup(self, target):
hook = target()
@hook
def local_target(event):
event.local_target_run = True
@hook(after="local_target", before="hook_reference_target.target")
def check_after(event):
assert event.local_target_run
event.check_from_remote_target = True
from .hook_reference_target import attach_to_hook
attach_to_hook(hook, after="test_hook.local_target")
hook.fire()
def test_get_name(self, target):
hook = target()
assert hook._get_name(GetNameTarget) == "crow2.events.test.test_hook.GetNameTarget"
from crow2.events.test import hook_reference_target
assert hook._get_name(hook_reference_target) == "crow2.events.test.hook_reference_target"
assert hook._get_name(self.test_dependency_lookup) == "crow2.events.test.test_hook.TestOrderedHook.test_dependency_lookup"
with pytest.raises(Exception):
hook._get_name(5)
class Immutable(object):
"test to ensure caching does not cause any unexpected behavior"
__slots__ = ()
immutable_instance = Immutable()
# NOTE: this is not the behavior people probably expect! will need documenting
assert hook._get_name(Immutable) == "crow2.events.test.test_hook.Immutable"
assert hook._get_name(Immutable) == "crow2.events.test.test_hook.Immutable"
def test_unresolvable_object(self, capsys, target):
hook = target()
hook.register(tuple())
out, err = capsys.readouterr()
assert "warning" in out.lower()
hook.unregister(tuple())
out, err = capsys.readouterr()
assert "warning" in out.lower()
def test_error_logging(self, capsys, target):
safe_hook = target(stop_exceptions=True)
safe_counter = Counter()
class TestErrorLoggingError(Exception):
pass
@safe_hook
def raising_handler(event):
event.before = True
raise TestErrorLoggingError("derp")
@safe_hook(after="raising_handler")
def check_success(event):
safe_counter.tick()
assert event.before
oldout, olderr = capsys.readouterr()
# check that it works
safe_hook.fire()
assert safe_counter.count == 1
out, err = capsys.readouterr() # test that the error was logged
assert "TestErrorLoggingError" in out
assert "derp" in out
assert "raising_handler" in out
unsafe_hook = target(stop_exceptions=False)
unsafe_counter = Counter()
@unsafe_hook
def raising_handler_2(event):
event.before = True
raise TestErrorLoggingError("herp")
@unsafe_hook(after="raising_handler_2")
def should_never_run(event): # pragma: no cover
assert event.before
unsafe_counter.tick()
with pytest.raises(TestErrorLoggingError):
unsafe_hook.fire()
assert unsafe_counter.count == 0
def test_tag_dependency(self, target):
hook = target()
hook.tag("tag", after=":derp")
@hook(tag="tag")
def derk(event):
assert event.derp_called
event.derk_called = True
@hook(tag="derp")
def derp(event):
event.derp_called = True
event = hook.fire()
assert event.derk_called
hook.unregister(derk)
hook.register(derk, tag="tag")
event = hook.fire()
assert event.derk_called
def test_tag_invalid_dependency(self, target):
hook = target()
hook.tag("tag", after="derp")
@hook(tag="tag")
def derk(event):
should_never_run()
@hook
def derp(event):
should_never_run()
with pytest.raises(exceptions.DependencyMissingError):
hook.fire()
def test_tag_dependencyjoin(self, target):
hook = target()
hook.tag("tag", after="herp", before=(":herk"))
hook.tag("tag", after=":derp", before=("derk"))
@hook(tag="tag")
def donk(event):
assert event.herp_called
assert event.derp_called
event.donk_called = True
@hook(tag="herp")
def herp(event):
event.herp_called = True
@hook(tag="derp")
def derp(event):
event.derp_called = True
@hook(tag="herk")
def herk_func(event):
assert event.donk_called
event.herk_called = True
@hook(tag="derk")
def derk_func(event):
assert event.donk_called
event.derk_called = True
event = hook.fire()
assert event.derk_called
assert event.herk_called
def test_cancellation():
hook = CancellableHook()
@hook(before="second")
def first(event):
event.first_called = True
event.cancel()
@hook
def second(event):
assert not "reached" # pragma: no cover
event = hook.fire()
assert event.first_called
assert event.cancelled
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Search Ads operators.
"""
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Optional
from airflow import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.marketing_platform.hooks.search_ads import GoogleSearchAdsHook
from airflow.utils.decorators import apply_defaults
class GoogleSearchAdsInsertReportOperator(BaseOperator):
"""
Inserts a report request into the reporting system.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/request
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsInsertReportOperator`
:param report: Report to be generated
:type report: Dict[str, Any]
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report",)
template_ext = (".json",)
@apply_defaults
def __init__(
self,
report: Dict[str, Any],
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.report = report
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
self.log.info("Generating Search Ads report")
response = hook.insert_report(report=self.report)
report_id = response.get("id")
self.xcom_push(context, key="report_id", value=report_id)
self.log.info("Report generated, id: %s", report_id)
return response
class GoogleSearchAdsDownloadReportOperator(BaseOperator):
"""
Downloads a report to GCS bucket.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/getFile
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsGetfileReportOperator`
:param report_id: ID of the report.
:type report_id: str
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param report_name: The report name to set when uploading the local file. If not provided then
report_id is used.
:type report_name: str
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report_name", "report_id", "bucket_name")
@apply_defaults
def __init__(
self,
report_id: str,
bucket_name: str,
report_name: Optional[str] = None,
gzip: bool = True,
chunk_size: int = 10 * 1024 * 1024,
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.report_id = report_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.report_id = report_id
self.chunk_size = chunk_size
self.gzip = gzip
self.bucket_name = self._set_bucket_name(bucket_name)
self.report_name = report_name
def _resolve_file_name(self, name: str) -> str:
csv = ".csv"
gzip = ".gz"
if not name.endswith(csv):
name += csv
if self.gzip:
name += gzip
return name
@staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
@staticmethod
def _handle_report_fragment(fragment: bytes) -> bytes:
fragment_records = fragment.split(b"\n", 1)
if len(fragment_records) > 1:
return fragment_records[1]
return b""
def execute(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to
)
# Resolve file name of the report
report_name = self.report_name or self.report_id
report_name = self._resolve_file_name(report_name)
response = hook.get(report_id=self.report_id)
if not response['isReportReady']:
raise AirflowException('Report {} is not ready yet'.format(self.report_id))
# Resolve report fragments
fragments_count = len(response["files"])
# Download chunks of report's data
self.log.info("Downloading Search Ads report %s", self.report_id)
with NamedTemporaryFile() as temp_file:
for i in range(fragments_count):
byte_content = hook.get_file(
report_fragment=i, report_id=self.report_id
)
fragment = (
byte_content
if i == 0
else self._handle_report_fragment(byte_content)
)
temp_file.write(fragment)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=report_name,
gzip=self.gzip,
filename=temp_file.name,
)
self.xcom_push(context, key="file_name", value=report_name)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# Copyright/License Notice (BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
from danlog import DanLog
from ddp import *
import os
import sys
from xml.dom import minidom
###########
# Globals #
###########
conn = None
log = DanLog("HTTPProxyServer")
#############
# Constants #
#############
ALLOW_UNSIGNED_PACKETS = False
BACKEND_DATAMODE = "PSK500R"
BACKEND_HOSTNAME = "localhost"
BACKEND_PORT = 7362
DEBUG_MODE = False
DISABLE_CRYPTO = False
PROXY_CALLSIGN = "CHANGEME"
SPECIFICATION = 0
UPSTREAM_PROXY_IP = ""
UPSTREAM_PROXY_PORT = 0
USE_TCP = 0
XML_SETTINGS_FILE = "httpproxyserver-settings.xml"
###############
# Subroutines #
###############
def cBool(value):
if str(value).lower() == "false" or str(value) == "0":
return False
elif str(value).lower() == "true" or str(value) == "1":
return True
else:
return False
def exitProgram():
sys.exit(0)
def ifNoneReturnZero(strinput):
if strinput is None:
return 0
else:
return strinput
def iif(testval, trueval, falseval):
if testval:
return trueval
else:
return falseval
def main():
log.info("""
#########################################################################
# Copyright/License Notice (BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
""")
log.info("")
log.info("HTTP Proxy - Server")
log.info("===================")
log.info("Checking settings...")
if os.path.exists(XML_SETTINGS_FILE) == False:
log.warn("The XML settings file doesn't exist, create one...")
xmlProxySettingsWrite()
log.info("The XML settings file has been created using the default settings. Please edit it and restart the proxy server once you're happy with the settings.")
exitProgram()
else:
log.info("Reading XML settings...")
xmlProxySettingsRead()
# This will ensure it will have any new settings in
if os.path.exists(XML_SETTINGS_FILE + ".bak"):
os.unlink(XML_SETTINGS_FILE + ".bak")
os.rename(XML_SETTINGS_FILE, XML_SETTINGS_FILE + ".bak")
xmlProxySettingsWrite()
log.info("Checking the config...")
if PROXY_CALLSIGN == "" or PROXY_CALLSIGN == "CHANGEME":
log.error("The proxy server callsign is invalid. Please edit the config XML file.")
exitProgram()
log.info("Setting up DDP...")
ddp = DDP(hostname = BACKEND_HOSTNAME, port = BACKEND_PORT, data_mode = BACKEND_DATAMODE, timeout = 120., ack_timeout = 60., tx_hangtime = 1.25, data_length = 4096, specification = SPECIFICATION, disable_ec = False, disable_crypto = DISABLE_CRYPTO, allow_unsigned_packets = ALLOW_UNSIGNED_PACKETS, application = "DDP Example: HTTP Proxy", ignore_broadcast_packets = True, debug_mode = DEBUG_MODE)
ddp.setCallsign(PROXY_CALLSIGN)
log.info("Waiting for a packet...")
while True:
try:
data = ddp.receiveDataFromAny(PROXY_CALLSIGN)
if data is not None:
# Check the flags
d = data[0]
packet = data[1]
# Pass on the packet to the upstream proxy server
try:
log.info("Passing packet to upstream proxy on %s:%d..." % (UPSTREAM_PROXY_IP, UPSTREAM_PROXY_PORT))
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
skt.connect((UPSTREAM_PROXY_IP, UPSTREAM_PROXY_PORT))
buf = ""
log.info("Buffering data...")
skt.settimeout(1)
skt.sendall(d + "\n\n")
while True:
try:
data = skt.recv(4096)
if data:
buf += data
else:
break
except Exception, ex:
log.fatal(ex)
break
skt.close()
# Send back the result
log.info("Sending the packet from the proxy back to client...")
ddp.transmitData(PROXY_CALLSIGN, "", packet[ddp.SECTION_SOURCE], buf, USE_TCP, 1)
except Exception, ex:
print ex
except KeyboardInterrupt:
break
except Exception, ex:
log.fatal(ex)
log.info("Cleaning up...")
ddp.dispose()
ddp = None
log.info("Exiting...")
exitProgram()
def xmlProxySettingsRead():
global ALLOW_UNSIGNED_PACKETS, BACKEND_DATAMODE, BACKEND_HOSTNAME, BACKEND_PORT, DEBUG_MODE, DISABLE_CRYPTO, PROXY_CALLSIGN, SPECIFICATION, UPSTREAM_PROXY_IP, UPSTREAM_PROXY_PORT, USE_TCP
if os.path.exists(XML_SETTINGS_FILE):
xmldoc = minidom.parse(XML_SETTINGS_FILE)
myvars = xmldoc.getElementsByTagName("Setting")
for var in myvars:
for key in var.attributes.keys():
val = str(var.attributes[key].value)
# Now put the correct values to correct key
if key == "ServerCallsign":
PROXY_CALLSIGN = val
elif key == "BackendDataMode":
BACKEND_DATAMODE = val.upper()
elif key == "BackendHostname":
BACKEND_HOSTNAME = val
elif key == "BackendPort":
BACKEND_PORT = val.upper()
elif key == "Specification":
SPECIFICATION = int(val)
elif key == "UseTCP":
USE_TCP = int(val)
elif key == "AllowUnsignedPackets":
ALLOW_UNSIGNED_PACKETS = cBool(val)
elif key == "DisableCrypto":
DISABLE_CRYPTO = cBool(val)
elif key == "DebugMode":
DEBUG_MODE = cBool(val)
elif key == "UpstreamProxyIP":
UPSTREAM_PROXY_IP = val
elif key == "UpstreamProxyPort":
UPSTREAM_PROXY_PORT = int(val)
else:
log.warn("XML setting attribute \"%s\" isn't known. Ignoring..." % key)
def xmlProxySettingsWrite():
if os.path.exists(XML_SETTINGS_FILE) == False:
xmloutput = file(XML_SETTINGS_FILE, "w")
xmldoc = minidom.Document()
# Create header
settings = xmldoc.createElement("HTTPProxyServer")
xmldoc.appendChild(settings)
# Write each of the details one at a time, makes it easier for someone to alter the file using a text editor
var = xmldoc.createElement("Setting")
var.setAttribute("BackendDataMode", str(BACKEND_DATAMODE))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("BackendHostname", str(BACKEND_HOSTNAME))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("BackendPort", str(BACKEND_PORT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("ServerCallsign", str(PROXY_CALLSIGN))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("Specification", str(SPECIFICATION))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("UseTCP", str(USE_TCP))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("AllowUnsignedPackets", str(ALLOW_UNSIGNED_PACKETS))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("DisableCrypto", str(DISABLE_CRYPTO))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("DebugMode", str(DEBUG_MODE))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("UpstreamProxyIP", str(UPSTREAM_PROXY_IP))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("UpstreamProxyPort", str(UPSTREAM_PROXY_PORT))
settings.appendChild(var)
# Finally, save to the file
xmloutput.write(xmldoc.toprettyxml())
xmloutput.close()
##########################
# Main
##########################
if __name__ == "__main__":
main()
|
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Story resources for version 1 of the Timesketch API."""
from flask import jsonify
from flask import request
from flask import abort
from flask_restful import Resource
from flask_login import login_required
from flask_login import current_user
from sqlalchemy import desc
from timesketch.api.v1 import resources
from timesketch.api.v1 import utils
from timesketch.lib import forms
from timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED
from timesketch.lib.definitions import HTTP_STATUS_CODE_OK
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
from timesketch.lib.definitions import HTTP_STATUS_CODE_FORBIDDEN
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.stories import api_fetcher as story_api_fetcher
from timesketch.lib.stories import manager as story_export_manager
from timesketch.models import db_session
from timesketch.models.sketch import Sketch
from timesketch.models.sketch import Story
class StoryListResource(resources.ResourceMixin, Resource):
"""Resource to get all stories for a sketch or to create a new story."""
@login_required
def get(self, sketch_id):
"""Handles GET request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
Returns:
Stories in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'read'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have read access controls on sketch.')
stories = []
for story in Story.query.filter_by(
sketch=sketch).order_by(desc(Story.created_at)):
stories.append(story)
return self.to_json(stories)
@login_required
def post(self, sketch_id):
"""Handles POST request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
Returns:
A view in JSON (instance of flask.wrappers.Response)
"""
form = forms.StoryForm.build(request)
if not form.validate_on_submit():
abort(
HTTP_STATUS_CODE_BAD_REQUEST, 'Unable to validate form data.')
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'write'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have write access controls on sketch.')
title = ''
if form.title.data:
title = form.title.data
story = Story(
title=title, content='[]', sketch=sketch, user=current_user)
db_session.add(story)
db_session.commit()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return self.to_json(story, status_code=HTTP_STATUS_CODE_CREATED)
class StoryResource(resources.ResourceMixin, Resource):
"""Resource to get a story."""
@staticmethod
def _export_story(story, sketch_id, export_format='markdown'):
"""Returns a story in a format as requested in export_format.
Args:
story: a story object (instance of Story) that is to be exported.
sketch_id: integer with the sketch ID.
export_format: string with the name of the format to export the
story to. Defaults to "markdown".
Returns:
The exported story in the format described. This could be a text
or a binary, depending on the output format.
"""
exporter_class = story_export_manager.StoryExportManager.get_exporter(
export_format)
if not exporter_class:
return b''
with exporter_class() as exporter:
data_fetcher = story_api_fetcher.ApiDataFetcher()
data_fetcher.set_sketch_id(sketch_id)
exporter.set_data_fetcher(data_fetcher)
exporter.set_title(story.title)
exporter.set_creation_date(story.created_at.isoformat())
if story.user:
exporter.set_author(story.user.username)
exporter.set_exporter(current_user.username)
exporter.from_string(story.content)
return exporter.export_story()
@login_required
def get(self, sketch_id, story_id):
"""Handles GET request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
story_id: Integer primary key for a story database model
Returns:
A story in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
story = Story.query.get(story_id)
if not story:
msg = 'No Story found with this ID.'
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
if not sketch:
msg = 'No sketch found with this ID.'
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
if not sketch.has_permission(current_user, 'read'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have read access controls on sketch.')
# Check that this story belongs to the sketch
if story.sketch_id != sketch.id:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'Sketch ID ({0:d}) does not match with the ID in '
'the story ({1:d})'.format(sketch.id, story.sketch_id))
# Only allow editing if the current user is the author.
# This is needed until we have proper collaborative editing and
# locking implemented.
meta = dict(is_editable=False)
if current_user == story.user:
meta['is_editable'] = True
return self.to_json(story, meta=meta)
@login_required
def post(self, sketch_id, story_id):
"""Handles POST request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
story_id: Integer primary key for a story database model
Returns:
A view in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
story = Story.query.get(story_id)
if not story:
msg = 'No Story found with this ID.'
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
if not sketch:
msg = 'No sketch found with this ID.'
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
if story.sketch_id != sketch.id:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'Sketch ID ({0:d}) does not match with the ID in '
'the story ({1:d})'.format(sketch.id, story.sketch_id))
if not sketch.has_permission(current_user, 'write'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have write access controls on sketch.')
form = request.json
if not form:
form = request.data
if form and form.get('export_format'):
export_format = form.get('export_format')
return jsonify(story=self._export_story(
story=story, sketch_id=sketch_id, export_format=export_format))
story.title = form.get('title', '')
story.content = form.get('content', '[]')
db_session.add(story)
db_session.commit()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return self.to_json(story, status_code=HTTP_STATUS_CODE_CREATED)
@login_required
def delete(self, sketch_id, story_id):
"""Handles DELETE request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
story_id: Integer primary key for a story database model
"""
sketch = Sketch.query.get_with_acl(sketch_id)
story = Story.query.get(story_id)
if not story:
msg = 'No Story found with this ID.'
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
if not sketch:
msg = 'No sketch found with this ID.'
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
# Check that this timeline belongs to the sketch
if story.sketch_id != sketch.id:
msg = (
'The sketch ID ({0:d}) does not match with the story'
'sketch ID ({1:d})'.format(sketch.id, story.sketch_id))
abort(HTTP_STATUS_CODE_FORBIDDEN, msg)
if not sketch.has_permission(user=current_user, permission='write'):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'The user does not have write permission on the sketch.')
sketch.stories.remove(story)
db_session.commit()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return HTTP_STATUS_CODE_OK
|
|
#
#----------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#----------------------------------------------------------------
#authors :
#---------
# Piumi Francois ([email protected]) software conception and development (engineer in bioinformatics)
# Jouneau Luc ([email protected]) software conception and development (engineer in bioinformatics)
# Gasselin Maxime ([email protected]) software user and data analysis (PhD student in Epigenetics)
# Perrier Jean-Philippe ([email protected]) software user and data analysis (PhD student in Epigenetics)
# Al Adhami Hala ([email protected]) software user and data analysis (postdoctoral researcher in Epigenetics)
# Jammes Helene ([email protected]) software user and data analysis (research group leader in Epigenetics)
# Kiefer Helene ([email protected]) software user and data analysis (principal invertigator in Epigenetics)
#
#
import os
import re
import sys
from sys import argv
### Sort CpG 'chr.position' by chromosome then position
def CpG_compare(x, y):
CpG1=x.split(".")
CpG2=y.split(".")
if (CpG1[0] == CpG2[0]):
#Meme chromosome : on compare numeriquement les coordonnees
return int(float(CpG1[1])) - int(float(CpG2[1]))
else:
#Les chromosomes sont dfifferents : on les compare
chr1_is_num=re.match("^[0-9]+$",CpG1[0])
chr2_is_num=re.match("^[0-9]+$",CpG2[0])
if chr1_is_num!=None and chr2_is_num!=None:
#Les 2 chromosomes sont numeriques : on les compare numeriquement
return int(float(CpG1[0])) - int(float(CpG2[0]))
elif chr1_is_num!=None:
#Seule le chromosome 1 est numerique
return -1
elif chr2_is_num!=None:
#Seule le chromosome 2 est numerique
return +1
else:
#Les 2 chromosomes ne sont pas numeriques : on les compare sous forme de chaines
if CpG1[0].__lt__(CpG2[0]):
return -1
else:
return +1
###################
debug=0;
###################
config_file=argv[1];
log_file=argv[2];
step2file={}
try :
in_file=open(log_file,"rt")
for line in in_file.readlines():
line=line.rstrip("\n")
me=re.match("^CMD (.*)$",line)
if me is not None:
step=me.group(1)
continue
me=re.match("^OUT (.*)$",line)
if me is not None:
file_in=me.group(1)
step2file[step]=file_in
in_file.close()
except IOError as exc:
sys.exit("Cannot open log file '{0}' : {1}".format(log_file,exc))
#####################
## Default values
#####################
stat_value="pvalue";
stat_threshold1=0.01;
stat_threshold2=0.05;
output_dir=".";
try :
in_file=open(config_file,"rt")
for line in in_file.readlines():
line=line.rstrip("\n")
me=re.match("^#stat_method\t([^#]*)(#.*)?$",line)
if me is not None:
stat_method=me.group(1)
continue
me=re.match("^#stat_value\t([^#]*)(#.*)?$",line)
if me is not None:
stat_value=me.group(1)
continue
me=re.match("^#stat_threshold1\t([^#]*)(#.*)?$",line)
if me is not None:
stat_threshold1=float(me.group(1))
continue
me=re.match("^#stat_threshold2\t([^#]*)(#.*)?$",line)
if me is not None:
stat_threshold2=float(me.group(1))
continue
me=re.match("^#output_dir\t([^#]*)(#.*)?$",line)
if me is not None:
output_dir=me.group(1)
continue
in_file.close()
except IOError as exc:
sys.exit("Cannot open config file '{0}' : {1}".format(config_file,exc))
try:
out_log=open(log_file,"at")
out_log.write("CMD merge_DMCs.py\n")
out_log.write("\tConfiguration file :\n")
out_log.write("\t--------------------\n")
out_log.write("\t\tstat.value="+stat_value+"\n")
out_log.write("\t\tstat1.threshold="+str(stat_threshold1)+"\n")
out_log.write("\t\tstat2.threshold="+str(stat_threshold2)+"\n")
out_log.write("\t\toutput.dir="+output_dir+"\n")
CpGs={}
#Read statistical results
if "get_diff_methyl.R" not in step2file:
sys.exit("No output file defined fo statistical step. Exiting.")
stat_file=step2file["get_diff_methyl.R"];
in_stat=open(stat_file,"rt")
no_line=0
field2pos={}
for line in in_stat.readlines():
no_line+=1
line=line.rstrip("\n")
elmts=line.split("\t")
if no_line==1:
header_stat=line
pos=0
for field in elmts:
field2pos[field]=pos
pos+=1
if stat_value not in field2pos:
sys.exit("No '"+stat_value+"' field found in header of '"+stat_file+"'.")
continue
pq_value=float(elmts[field2pos[stat_value]])
if pq_value > stat_threshold1:
continue
id=elmts[0]+"."+elmts[1]
CpGs[id]=line
in_stat.close()
#Read obvious results
nb_obvious_added=0
if "get_obvious_DMC.py" not in step2file:
sys.exit("No output file defined for obvious DMCs discovery step. Exiting.")
obvious_file=step2file["get_obvious_DMC.py"];
in_obvious=open(obvious_file,"rt")
no_line=0
field2pos={}
for line in in_obvious.readlines():
no_line+=1
line=line.rstrip("\n")
elmts=line.split("\t")
if no_line==1:
#Add pValue/qValue field before last field
idx=len(elmts)-1
elmts.append(elmts[idx])
elmts[idx]=elmts[idx-1]
elmts[idx-1]=stat_value
header_obvious="\t".join(elmts)
if header_obvious != header_stat:
print "header stat:\n'"+header_stat+"'\n"
print "header obvious:\n'"+header_obvious+"'\n"
sys.exit("Order of samples in '"+stat_file+"' and '"+obvious_file+"' differs. Exiting.")
continue
id=elmts[0]+"."+elmts[1]
if id not in CpGs:
#Add pValue/qValue field before last field
idx=len(elmts)-1
elmts.append(elmts[idx])
elmts[idx]=elmts[idx-1]
elmts[idx-1]=""
line="\t".join(elmts)
CpGs[id]=line
nb_obvious_added+=1
in_stat.close()
#Output
txt_out=step2file["get_diff_methyl.R"].replace(".txt"," - with obvious DMCs.txt")
txt_out=txt_out.replace(stat_value+str(stat_threshold2),stat_value+str(stat_threshold1))
out_txt=open(txt_out,"wt")
out_txt.write(header_stat+"\n")
bed_out=txt_out.replace(".txt",".bed")
out_bed=open(bed_out,"wt")
for pos in sorted(CpGs.keys(), cmp=CpG_compare):
out_txt.write(CpGs[pos]+"\n")
me=re.match("^(.*)[.]([0-9]+)$",pos)
if me is None:
sys.exit("Cannot interpret CpG position '"+pos+"'. Exiting.")
chr=me.group(1)
pos=int(float(me.group(2)))
out_bed.write(chr+"\t"+str(pos)+"\t"+str(pos+1)+"\n")
out_txt.close()
out_bed.close()
out_log.write("INFO number of obvious CpGs added to "+stat_method+"="+str(nb_obvious_added)+"\n")
out_log.write("OUT "+txt_out+"\n")
out_log.close()
except IOError as exc:
sys.exit("Cannot append to log file '{0}' : {1}".format(log_file,exc))
|
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic proxy to access any AdWords web service."""
__author__ = '[email protected] (Joseph DiLallo)'
import time
from adspygoogle import SOAPpy
from adspygoogle.adwords import AdWordsUtils
from adspygoogle.adwords import AUTH_TOKEN_EXPIRE
from adspygoogle.adwords import AUTH_TOKEN_SERVICE
from adspygoogle.adwords import LIB_SIG
from adspygoogle.adwords import LIB_URL
from adspygoogle.adwords.AdWordsErrors import AdWordsApiError
from adspygoogle.adwords.AdWordsErrors import AdWordsError
from adspygoogle.adwords.AdWordsErrors import ERRORS
from adspygoogle.adwords.AdWordsSoapBuffer import AdWordsSoapBuffer
from adspygoogle.common import Utils
from adspygoogle.common.Errors import Error
from adspygoogle.common.Errors import ValidationError
from adspygoogle.common.GenericApiService import GenericApiService
from adspygoogle.common.GenericApiService import MethodInfoKeys
class GenericAdWordsService(GenericApiService):
"""Wrapper for any AdWords web service."""
# The _POSSIBLE_ADWORDS_REQUEST_HEADERS are both the SOAP element names and
# the self._headers dictionary keys for all elements that may be in an AdWords
# header.
_POSSIBLE_ADWORDS_REQUEST_HEADERS = (
'authToken', 'developerToken', 'userAgent', 'clientCustomerId',
'validateOnly', 'partialFailure')
# The _OAUTH_IGNORE_HEADERS are the header elements that should not be
# included when the client is using OAuth.
_OAUTH_IGNORE_HEADERS = ('authToken')
# The _WRAP_LISTS constant indicates that AdWords services do not need to wrap
# lists in an extra layer of XML element tags.
_WRAP_LISTS = False
# The _BUFFER_CLASS is the subclass of SoapBuffer that should be used to track
# all SOAP interactions
_BUFFER_CLASS = AdWordsSoapBuffer
# List of fields we should convert to string.
_STR_CONVERT = ['clientCustomerId']
def __init__(self, headers, config, op_config, lock, logger, service_name):
"""Inits GenericAdWordsService.
Args:
headers: dict Dictionary object with populated authentication
credentials.
config: dict Dictionary object with populated configuration values.
op_config: dict Dictionary object with additional configuration values for
this operation.
lock: thread.lock Thread lock to use to synchronize requests.
logger: Logger Instance of Logger to use for logging.
service_name: string The name of this service.
"""
group = op_config['group']
if service_name == 'BulkMutateJobService': group = 'job'
service_url = [op_config['server'], 'api/adwords', group,
op_config['version'], service_name]
if config['access']:
service_url.insert(len(service_url) - 1, config['access'])
service_url = '/'.join(service_url)
namespace = '/'.join(['https://adwords.google.com/api/adwords',
op_config['group'], op_config['version']])
namespace_extractor = _DetermineNamespacePrefix
super(GenericAdWordsService, self).__init__(
headers, config, op_config, lock, logger, service_name, service_url,
GenericAdWordsService._WRAP_LISTS, GenericAdWordsService._BUFFER_CLASS,
namespace, namespace_extractor)
# AdWords-specific changes to the SOAPpy.WSDL.Proxy
methodattrs = {}
for namespace in self._soappyservice.wsdl.types.keys():
group_name = AdWordsUtils.ExtractGroupNameFromUrl(namespace)
methodattrs['xmlns:' + group_name] = namespace
methodattrs['xmlns'] = self._namespace
self._soappyservice.soapproxy.methodattrs = methodattrs
def _SetHeaders(self):
"""Sets the SOAP headers for this service's requests."""
now = time.time()
if ((('authToken' not in self._headers and
'auth_token_epoch' not in self._config) or
int(now - self._config['auth_token_epoch']) >= AUTH_TOKEN_EXPIRE) and
not self._headers.get('oauth2credentials')):
if ('email' not in self._headers or not self._headers['email'] or
'password' not in self._headers or not self._headers['password']):
raise ValidationError('Required authentication headers, \'email\' and '
'\'password\', are missing. Unable to regenerate '
'authentication token.')
self._headers['authToken'] = Utils.GetAuthToken(
self._headers['email'], self._headers['password'], AUTH_TOKEN_SERVICE,
LIB_SIG, self._config['proxy'])
self._config['auth_token_epoch'] = time.time()
# Apply headers to the SOAPpy service.
header_attrs = {
'xmlns': self._namespace,
'xmlns:cm': ('https://adwords.google.com/api/adwords/cm/' +
self._op_config['version'])
}
soap_headers = SOAPpy.Types.headerType(attrs=header_attrs)
request_header_data = {}
for key in GenericAdWordsService._POSSIBLE_ADWORDS_REQUEST_HEADERS:
if (key in GenericAdWordsService._OAUTH_IGNORE_HEADERS
and self._headers.get('oauth2credentials')):
continue
if key in self._headers and self._headers[key]:
value = self._headers[key]
if key in GenericAdWordsService._STR_CONVERT:
value = str(value)
elif key == 'userAgent':
value = ''.join([value, LIB_SIG])
request_header_data['cm:' + key] = SOAPpy.Types.stringType(
value)
request_header = SOAPpy.Types.structType(
data=request_header_data, name='RequestHeader', typed=0)
soap_headers.RequestHeader = request_header
self._soappyservice.soapproxy.header = soap_headers
def _GetMethodInfo(self, method_name):
"""Pulls all of the relevant data about a method from a SOAPpy service.
The return dictionary has two keys, MethodInfoKeys.INPUTS and
MethodInfoKeys.OUTPUTS. Each of these keys has a list value. These lists
contain a dictionary of information on the input/output parameter list, in
order.
Args:
method_name: string The name of the method to pull information for.
Returns:
dict A dictionary containing information about a SOAP method.
"""
rval = {}
rval[MethodInfoKeys.INPUTS] = []
for i in range(len(self._soappyservice.wsdl.types[
self._namespace].elements[method_name].content.content.content)):
param_attributes = self._soappyservice.wsdl.types[
self._namespace].elements[method_name].content.content.content[
i].attributes
inparam = {
MethodInfoKeys.ELEMENT_NAME: param_attributes['name'],
MethodInfoKeys.NS: param_attributes['type'].getTargetNamespace(),
MethodInfoKeys.TYPE: param_attributes['type'].getName(),
MethodInfoKeys.MAX_OCCURS: param_attributes['maxOccurs']
}
rval[MethodInfoKeys.INPUTS].append(inparam)
rval[MethodInfoKeys.OUTPUTS] = []
for i in range(len(self._soappyservice.wsdl.types[
self._namespace].elements[
method_name + 'Response'].content.content.content)):
param_attributes = self._soappyservice.wsdl.types[
self._namespace].elements[
method_name + 'Response'].content.content.content[i].attributes
outparam = {
MethodInfoKeys.ELEMENT_NAME: param_attributes['name'],
MethodInfoKeys.NS: param_attributes['type'].getTargetNamespace(),
MethodInfoKeys.TYPE: param_attributes['type'].getName(),
MethodInfoKeys.MAX_OCCURS: param_attributes['maxOccurs']
}
rval[MethodInfoKeys.OUTPUTS].append(outparam)
return rval
def _TakeActionOnSoapCall(self, method_name, args):
"""Gives the service a chance to take product-specific action on raw inputs.
AdWords will support legacy xsi_typing for the BulkMutateJobService.
Args:
method_name: string The name of the SOAP operation being called.
args: tuple The arguments passed into the SOAP operation.
Returns:
tuple The method arguments, possibly modified.
"""
if (self._service_name == 'BulkMutateJobService' and
method_name.lower() == 'mutate'):
AdWordsUtils.TransformJobOperationXsi(args[0])
elif (self._service_name == 'UserListService' and
method_name.lower() == 'mutate'):
if isinstance(args[0], (list, tuple)):
for operation in args[0]:
if isinstance(operation, dict) and 'operand' in operation:
AdWordsUtils.TransformUserListRuleOperands(operation['operand'])
return args
def _HandleLogsAndErrors(self, buf, start_time, stop_time, error=None):
"""Manage SOAP XML message.
Args:
buf: SoapBuffer SOAP buffer.
start_time: str Time before service call was invoked.
stop_time: str Time after service call was invoked.
[optional]
error: dict Error, if any.
"""
if error is None:
error = {}
try:
# Update the number of units and operations consumed by API call.
if buf.GetCallUnits() and buf.GetCallOperations():
self._config['units'][0] += int(buf.GetCallUnits())
self._config['operations'][0] += int(buf.GetCallOperations())
self._config['last_units'][0] = int(buf.GetCallUnits())
self._config['last_operations'][0] = int(buf.GetCallOperations())
handlers = self.__GetLogHandlers(buf)
fault = super(GenericAdWordsService, self)._ManageSoap(
buf, handlers, LIB_URL, start_time, stop_time, error)
if fault:
# Raise a specific error, subclass of AdWordsApiError.
if 'detail' in fault and fault['detail']:
if 'errors' in fault['detail']:
error_type = fault['detail']['errors'][0]['type']
if error_type in ERRORS: raise ERRORS[str(error_type)](fault)
if isinstance(fault, basestring):
raise AdWordsError(fault)
elif isinstance(fault, dict):
raise AdWordsApiError(fault)
except AdWordsApiError, e:
raise e
except AdWordsError, e:
raise e
except Error, e:
if error: e = error
raise Error(e)
def __GetLogHandlers(self, buf):
"""Gets a list of log handlers for the AdWords library.
Args:
buf: SoapBuffer SOAP buffer from which calls are retrieved for logging.
Returns:
list Log handlers for the AdWords library.
"""
return [
{
'tag': 'xml_log',
'name': 'soap_xml',
'data': ''
},
{
'tag': 'request_log',
'name': 'request_info',
'data': str('host=%s service=%s method=%s operator=%s '
'responseTime=%s operations=%s units=%s requestId=%s'
% (Utils.GetNetLocFromUrl(self._service_url),
self._service_name, buf.GetCallName(),
buf.GetOperatorName(), buf.GetCallResponseTime(),
buf.GetCallOperations(), buf.GetCallUnits(),
buf.GetCallRequestId()))
},
{
'tag': '',
'name': 'adwords_api_lib',
'data': ''
}
]
def _DetermineNamespacePrefix(url):
"""Returns the SOAP prefix to use for definitions within the given namespace.
Args:
url: string The URL of the namespace.
Returns:
string The SOAP namespace prefix to use for the given namespace.
"""
return AdWordsUtils.ExtractGroupNameFromUrl(url) + ':'
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class BatchNormalizationTest(test.TestCase):
def _npBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) / np.sqrt(v + epsilon)
y = y * gamma if scale_after_normalization else y
return y + beta if shift_after_normalization else y
def _opsBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) * math_ops.rsqrt(v + epsilon)
if scale_after_normalization:
y = gamma * y
return y + beta if shift_after_normalization else y
def _tfBatchNormV1(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Original implementation."""
# _batch_norm_with_global_normalization is deprecated in v9
ops.get_default_graph().graph_def_versions.producer = 8
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
# pylint: enable=protected-access
def _tfBatchNormV1BW(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Re-implementation of the original kernel for backward compatibility."""
return nn_impl.batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
def _tfBatchNormV2(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
"""New implementation."""
return nn_impl.batch_normalization(x, m, v, beta if
shift_after_normalization else None,
gamma if scale_after_normalization else
None, epsilon)
def testBatchNorm(self):
x_shape = [3, 5, 4, 2]
param_shape = [2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn2 = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
bn1bw = self._tfBatchNormV1BW(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
bn1 = self._tfBatchNormV1(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
on = self._opsBatchNorm(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
np_bn = self._npBatchNorm(x_val, m_val, v_val, beta_val, gamma_val,
epsilon, scale_after_normalization,
shift_after_normalization)
tf_bn_v2, tf_bn_v1bw, tf_bn_v1, ops_bn = sess.run(
[bn2, bn1bw, bn1, on])
self.assertAllClose(np_bn, ops_bn, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v2, atol=0.00001)
self.assertAllClose(tf_bn_v2, ops_bn, atol=0.00001)
# shift_after_normalization=False is not supported in v1.
if shift_after_normalization:
self.assertAllClose(np_bn, tf_bn_v1bw, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v1, atol=0.00001)
self.assertAllClose(tf_bn_v1, ops_bn, atol=0.00001)
self.assertAllClose(tf_bn_v1bw, ops_bn, atol=0.00001)
def _testBatchNormGradient(self,
param_index,
tag,
scale_after_normalization,
shift_after_normalization,
version,
err_tolerance=1e-11):
x_shape = [3, 5, 4, 5]
param_shape = [5]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
m_val = np.random.random_sample(param_shape).astype(np.float64)
v_val = np.random.random_sample(param_shape).astype(np.float64)
beta_val = np.random.random_sample(param_shape).astype(np.float64)
gamma_val = np.random.random_sample(param_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
if version == 1:
output = self._tfBatchNormV1(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
elif version == 2:
output = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
else:
print("Invalid version", version)
raise ValueError()
all_params = [x, m, v, beta, gamma]
all_shapes = [x_shape, param_shape, param_shape, param_shape, param_shape]
err = gradient_checker.compute_gradient_error(all_params[param_index],
all_shapes[param_index],
output, x_shape)
print("Batch normalization v%d %s gradient %s scale and %s shift err = " %
(version, tag, "with" if scale_after_normalization else "without",
"with" if shift_after_normalization else "without"), err)
self.assertLess(err, err_tolerance)
def _testBatchNormGradientInAllNeedConfigs(self,
param_index,
tag,
err_tolerance=1e-11):
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
# shift_after_normalization=False is not supported in version 1.
for v in ([1, 2] if shift_after_normalization else [2]):
self._testBatchNormGradient(param_index, tag,
scale_after_normalization,
shift_after_normalization, v,
err_tolerance)
def testBatchNormInputGradient(self):
self._testBatchNormGradientInAllNeedConfigs(0, "x")
def testBatchNormMeanGradient(self):
self._testBatchNormGradientInAllNeedConfigs(1, "mean")
def testBatchNormVarianceGradient(self):
self._testBatchNormGradientInAllNeedConfigs(
2, "variance", err_tolerance=1e-03)
def testBatchNormBetaGradient(self):
# Since beta does not exist when scale_after_normalization=False, we only
# test for scale_after_normalization=True.
for scale_after_normalization in [True, False]:
for v in [1, 2]:
self._testBatchNormGradient(3, "beta", scale_after_normalization, True,
v)
def testBatchNormGammaGradient(self):
# If scale_after_normalization is False, backprop for gamma in v1
# will be 0. In version 2 of the API, if scale_after_normalization is False,
# gamma is not used at all, and the gradient is None, which displeases the
# gradient checker.
for scale_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", scale_after_normalization, True,
1)
for shift_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", True, shift_after_normalization,
2)
def testBatchNormGradImpl(self):
x_shape = [7, 5, 4, 6]
param_shape = [6]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
backprop_val = np.random.random_sample(x_shape).astype(np.float32)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
backprop = constant_op.constant(backprop_val, name="backprop")
epsilon = 0.001
for scale_after_normalization in [True, False]:
# _batch_norm_with_global_normalization_grad is deprecated in v9
ops.get_default_graph().graph_def_versions.producer = 8
grad = gen_nn_ops._batch_norm_with_global_normalization_grad(
x, m, v, gamma, backprop, epsilon, scale_after_normalization)
dx, dm, dv, db, dg = grad
self.assertEqual(grad.dx, dx)
self.assertEqual(grad.dm, dm)
self.assertEqual(grad.dv, dv)
self.assertEqual(grad.db, db)
self.assertEqual(grad.dg, dg)
on = self._opsBatchNorm(x, m, v, beta, gamma, epsilon,
scale_after_normalization, True)
odx, odm, odv, odb, odg = gradients_impl.gradients(
[on], [x, m, v, beta, gamma], [backprop])
if scale_after_normalization:
all_grads = sess.run([dx, dm, dv, db, dg, odx, odm, odv, odb, odg])
to_check = ["dx", "dm", "dv", "db", "dg"]
else:
all_grads = sess.run([dx, dm, dv, db, odx, odm, odv, odb])
to_check = ["dx", "dm", "dv", "db"]
for i, _ in enumerate(to_check):
self.assertAllClose(
all_grads[i + len(to_check)], all_grads[i], atol=0.000001)
def testBatchNormKeepDims(self):
"""Test for tf.nn.moments(..., keep_dims=True / False).
Make sure that parameters with shape (1, 1, 1, depth) yield the same
result as parameters with shape (depth)
"""
x_shape = (3, 5, 4, 2)
param_shape = (2)
keep_dims_param_shape = (1, 1, 1, 2)
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
keep_dims_m = array_ops.reshape(
m, keep_dims_param_shape, name="keep_dims_m")
keep_dims_v = array_ops.reshape(
v, keep_dims_param_shape, name="keep_dims_v")
keep_dims_beta = array_ops.reshape(
beta, keep_dims_param_shape, name="keep_dims_beta")
keep_dims_gamma = array_ops.reshape(
gamma, keep_dims_param_shape, name="keep_dims_gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
keep_dims_bn = self._tfBatchNormV2(x, keep_dims_m, keep_dims_v,
keep_dims_beta, keep_dims_gamma,
epsilon,
scale_after_normalization,
shift_after_normalization)
tf_batch_norm, keep_dims_tf_batch_norm = sess.run(
[bn, keep_dims_bn])
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertEquals(x_shape, keep_dims_tf_batch_norm.shape)
self.assertAllClose(
tf_batch_norm, keep_dims_tf_batch_norm, atol=0.000001)
def _testBatchNormArbitraryShapes(self, x_shape, param_shape, atol=0.0001):
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
np_batch_norm = self._npBatchNorm(x_val, m_val, v_val, beta_val,
gamma_val, epsilon,
scale_after_normalization,
shift_after_normalization)
[tf_batch_norm] = sess.run([bn])
self.assertEquals(x_shape, np_batch_norm.shape)
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertAllClose(np_batch_norm, tf_batch_norm, atol=atol)
def testBatchNormArbitraryShapes(self):
"""Test for a variety of shapes and moments.
Batch normalization is expected to work regardless of the position and
dimensionality of the 'depth' axis/axes.
"""
self._testBatchNormArbitraryShapes((3, 3), (1, 3))
self._testBatchNormArbitraryShapes((3, 3), (3, 1))
self._testBatchNormArbitraryShapes((3, 2, 4, 5), (1, 2, 1, 1))
self._testBatchNormArbitraryShapes(
(2, 3, 2, 4, 5), (1, 1, 1, 4, 5), atol=0.005)
class SufficientStatisticsTest(test.TestCase):
def _npSuffStats(self, x, axes, shift, keep_dims):
axis = tuple(axes)
if shift is not None:
m_ss = np.sum(x - shift, axis=axis, keepdims=keep_dims)
v_ss = np.sum((x - shift) * (x - shift), axis=axis, keepdims=keep_dims)
else:
m_ss = np.sum(x, axis=axis, keepdims=keep_dims)
v_ss = np.sum(x * x, axis=axis, keepdims=keep_dims)
count = 1.0
for d in xrange(x.ndim):
if d in set(axes):
count *= x.shape[d]
if not keep_dims:
shift = np.squeeze(shift, axis=axis)
return count, m_ss, v_ss, shift
def _opSuffStats(self, x, axes, shift, keep_dims):
return nn_impl.sufficient_statistics(x, axes, shift, keep_dims)
def _testSuffStats(self, x_shape, axes, shift, keep_dims, has_shape):
x_val = np.random.random_sample(x_shape).astype(np.float32)
np_c, np_m, np_v, np_s = self._npSuffStats(x_val, axes, shift, keep_dims)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
if has_shape:
x = constant_op.constant(x_val, name="x")
x.set_shape(x_shape)
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s])
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v])
else:
x = array_ops.placeholder(
dtype=dtypes.float32, shape=[None] * len(x_shape), name="x")
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s],
feed_dict={x: x_val})
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v],
feed_dict={x: x_val})
self.assertAllClose(np_c, tf_c, atol=0.000001)
self.assertAllClose(np_m, tf_m, atol=0.000001)
self.assertAllClose(np_v, tf_v, atol=0.000001)
if shift:
self.assertAllClose(np_s, tf_s, atol=0.000001)
def testSuffStats(self):
for has_shape in [True, False]:
for keep_dims in [True, False]:
for shift in [None, 1.0]:
self._testSuffStats([2, 3], [1], shift, keep_dims, has_shape)
self._testSuffStats([2, 3], [0], shift, keep_dims, has_shape)
self._testSuffStats([1, 2, 3], [0, 2], shift, keep_dims, has_shape)
class NormalizeMomentsTest(test.TestCase):
def _npNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
mean = mean_ss / counts
variance = variance_ss / counts - mean * mean
if shift is not None:
mean += shift
return mean, variance
def _opNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
return nn_impl.normalize_moments(counts, mean_ss, variance_ss, shift)
def _testNormalizeMoments(self, shape, shift):
counts = np.ones([1]).astype(np.float32)
mean_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss *= variance_ss
if shift:
shift_v = np.random.random_sample(shape).astype(np.float32)
else:
shift_v = None
npm, npv = self._npNormalizeMoments(counts, mean_ss, variance_ss, shift_v)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
tf_counts = constant_op.constant(counts, name="counts")
tf_mean_ss = constant_op.constant(mean_ss, name="mean_ss")
tf_variance_ss = constant_op.constant(variance_ss, name="variance_ss")
if shift:
tf_shift_v = constant_op.constant(shift_v, name="shift")
else:
tf_shift_v = None
opm, opv = self._opNormalizeMoments(tf_counts, tf_mean_ss,
tf_variance_ss, tf_shift_v)
tfm, tfv = sess.run([opm, opv])
self.assertAllClose(npm, tfm, atol=0.000001)
self.assertAllClose(npv, tfv, atol=0.000001)
def testNormalizeMoments(self):
for shift in [None, 4.0]:
self._testNormalizeMoments([3], shift)
self._testNormalizeMoments([2, 3], shift)
class MomentsTest(test.TestCase):
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
# Method to compute moments of `x` wrt `axes`.
#
# This is exposed so WeightedMomentsTest can inherit the tests and
# assertions from MomentsTest; the extra_out_grads argument allows
# its inherited gradient tests to assert gradients against the
# weights as well as the input values.
return nn_impl.moments(x, axes, keep_dims=keep_dims)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = array_ops.placeholder(dtype, shape=[None] * len(shape))
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(x_numpy, axis=ax,
keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(
expected_mean, mean.eval(feed_dict={x: x_numpy}))
self.assertAllCloseAccordingToType(
expected_variance, var.eval(feed_dict={x: x_numpy}))
def RunMomentTest(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = math_ops.cast(constant_op.constant(x_numpy), dtype=dtype)
# Compute the expected values at high precision since the method
# is prone to catastrophic cancellation:
x_numpy = x_numpy.astype(np.float128)
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(x_numpy, axis=ax,
keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(expected_mean, mean.eval())
self.assertAllCloseAccordingToType(expected_variance, var.eval())
def testBasic(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims, dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims, dtype=dtype)
def testGlobalNormalization(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
def testAxes(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
def _testGlobalGradient(self, from_y="mean"):
with self.test_session():
x_shape = [3, 5, 4, 2]
x_val = np.random.random_sample(x_shape).astype(np.float64)
x = constant_op.constant(x_val)
x.set_shape(x_shape)
axes = [0, 1, 2]
y_shape = [2] # Depth of x
inputs_to_compute_gradients_for = [x]
out_mean, out_var = self._unweighted_moments(
x, axes, extra_out_grads=inputs_to_compute_gradients_for)
if from_y == "mean":
y = out_mean
elif from_y == "var":
y = out_var
for (i, v) in enumerate(inputs_to_compute_gradients_for):
err = gradient_checker.compute_gradient_error(v,
v.get_shape().as_list(),
y, y_shape)
print("Moments %s gradient err vs input %d = %g" % (from_y, i, err))
self.assertLess(err, 1e-11)
def testMeanGlobalGradient(self):
self._testGlobalGradient(from_y="mean")
def testVarGlobalGradient(self):
self._testGlobalGradient(from_y="var")
class WeightedMomentsTest(MomentsTest):
"""Tests for nn.weighted_moments.
Note that this test inherits from MomentsTest, inheriting all its
test methods!
It modifies MomentsTest in two ways:
a) By overriding _unweighted_moments, all the codepaths in
MomentsTest are executed, but with calls to tf.nn.moments()
replaced by calls to tf.nn.weighted_moments() with a constant
weight of 1.
b) By overriding RunMomentTest and RunMomentTestWithDynamicShape,
this test adds multiple additional calls to
RunWeightedMomentsTest() to exercise correctness with
non-constant weights and varying broadcasting situations. (It
also continues to call MomentsTest.Run(Weighted)?MomentsTest as
well.)
"""
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
weights = constant_op.constant(1, dtype=x.dtype)
if extra_out_grads is not None:
# We want to assert gradients WRT weights as well as X!
extra_out_grads.append(weights)
return nn_impl.weighted_moments(x, axes, weights, keep_dims=keep_dims)
def RunMomentTest(self, shape, axes, keep_dims, dtype, dynshapes=False):
if not dynshapes:
super(WeightedMomentsTest, self).RunMomentTest(shape, axes, keep_dims,
dtype)
else:
super(WeightedMomentsTest, self).RunMomentTestWithDynamicShape(shape,
axes,
keep_dims,
dtype)
# 1:1 weights and inputs
self.RunWeightedMomentTest(shape, shape, axes, keep_dims, dtype)
# Various broadcasting combinations
for idx in range(len(shape)):
# try broadcasting weights in all positions
weight_shape = [1] * len(shape)
weight_shape[idx] = shape[idx]
self.RunWeightedMomentTest(shape, weight_shape, axes, keep_dims, dtype)
# Also try broadcasting with a suffix of length n
weight_shape = shape[-(idx + 1):]
self.RunWeightedMomentTest(
shape, weight_shape, axes, keep_dims, dtype, dynshapes=dynshapes)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
self.RunMomentTest(shape, axes, keep_dims, dtype, dynshapes=True)
def RunWeightedMomentTest(self,
shape,
weights_shape,
axes,
keep_dims,
dtype,
dynshapes=False):
with self.test_session() as s:
x_numpy = np.random.normal(size=shape).astype(np.float32)
weights_numpy = np.absolute( # weights must be positive
np.random.normal(
size=weights_shape, loc=1.0).astype(np.float32))
# Expand the numpy version to higher precision
x_numpy = x_numpy.astype(np.float128)
weights_numpy = weights_numpy.astype(np.float128)
x_shape = [None] * len(shape) if dynshapes else shape
weights_shape = ([None] * len(weights_shape) if dynshapes else
weights_shape)
x = array_ops.placeholder(dtype, shape=x_shape)
weights = array_ops.placeholder(dtype, shape=weights_shape)
mean, var = nn_impl.weighted_moments(
x, axes, weights, keep_dims=keep_dims)
ax = tuple(axes)
def _np_weighted_sum(v):
return np.sum(weights_numpy * v, axis=ax, keepdims=keep_dims)
weight_sum = _np_weighted_sum(np.ones_like(x_numpy))
expected_mean = _np_weighted_sum(x_numpy) / weight_sum
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = (_np_weighted_sum(np.multiply(x_numpy, x_numpy)) /
weight_sum)
expected_variance = expected_x_squared - expected_mean_squared
mean_v, var_v = s.run([mean, var],
feed_dict={x: x_numpy,
weights: weights_numpy})
self.assertAllCloseAccordingToType(expected_mean, mean_v)
self.assertAllCloseAccordingToType(expected_variance, var_v)
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.testing import assert_allclose
from ... import units as u
from ...tests.helper import pytest
from ..angles import Longitude, Latitude, Angle
from ..distances import Distance
from ..representation import (SphericalRepresentation,
UnitSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation,
PhysicsSphericalRepresentation)
def assert_allclose_quantity(q1, q2):
assert_allclose(q1.value, q2.to(q1.unit).value)
class TestSphericalRepresentation(object):
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_lonlat(self):
s2 = SphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg),
Distance(10, u.kpc))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert s2.distance == 10. * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(Longitude(-90, u.degree,
wrap_angle=180*u.degree),
Latitude(-45, u.degree),
Distance(1., u.Rsun))
assert s3.lon == -90. * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1., 2.]), u.degree)
lat = Latitude(np.float32([3., 4.]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values['lon'].dtype == np.float32
assert s1._values['lat'].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
def test_broadcasting(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=10 * u.kpc)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters lon, lat, and distance cannot be broadcast"
# We deliberately disallow anything that is not directly a Quantity in
# these low-level classes, so we now check that initializing from a
# string or mixed unit lists raises a TypeError.
def test_init_str(self):
with pytest.raises(TypeError) as exc:
s1 = SphericalRepresentation(lon='2h6m3.3s',
lat='0.1rad',
distance=1 * u.kpc)
assert exc.value.args[0] == "lon should be a Quantity, Angle, or Longitude"
def test_mixed_units(self):
with pytest.raises(TypeError) as exc:
s1 = SphericalRepresentation(lon=[8 * u.hourangle, 135 * u.deg],
lat=[5 * u.deg, (6 * np.pi / 180) * u.rad],
distance=1 * u.kpc)
assert exc.value.args[0] == "lon should be a Quantity, Angle, or Longitude"
def test_readonly(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg,
distance=1. * u.kpc)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
with pytest.raises(AttributeError):
s1.distance = 1. * u.kpc
def test_getitem(self):
s = SphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg,
distance=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg,
distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
class TestUnitSphericalRepresentation(object):
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = UnitSphericalRepresentation()
def test_init_quantity(self):
s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
def test_init_lonlat(self):
s2 = UnitSphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
def test_init_array(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
def test_reprobj(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
s2 = UnitSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
def test_broadcasting(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = UnitSphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg)
assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast"
# We deliberately disallow anything that is not directly a Quantity in
# these low-level classes, so we now check that initializing from a
# string or mixed unit lists raises a TypeError.
def test_init_str(self):
with pytest.raises(TypeError) as exc:
s1 = UnitSphericalRepresentation(lon='2h6m3.3s', lat='0.1rad')
assert exc.value.args[0] == "lon should be a Quantity, Angle, or Longitude"
def test_mixed_units(self):
with pytest.raises(TypeError) as exc:
s1 = UnitSphericalRepresentation(lon=[8 * u.hourangle, 135 * u.deg],
lat=[5 * u.deg, (6 * np.pi / 180) * u.rad])
assert exc.value.args[0] == "lon should be a Quantity, Angle, or Longitude"
def test_readonly(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
def test_getitem(self):
s = UnitSphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
def test_getitem_scalar(self):
s = UnitSphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg)
with pytest.raises(TypeError):
s_slc = s[0]
class TestPhysicsSphericalRepresentation(object):
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
assert s3.phi == 8. * u.hourangle
assert s3.theta == 5. * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(Angle(8, u.hour),
Angle(5, u.deg),
Distance(10, u.kpc))
assert s2.phi == 8. * u.hourangle
assert s2.theta == 5. * u.deg
assert s2.r == 10. * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8. * u.hourangle)
assert_allclose_quantity(s2.theta, 5. * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=10 * u.kpc)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = PhysicsSphericalRepresentation(phi=[8, 9, 10] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters phi, theta, and r cannot be broadcast"
# We deliberately disallow anything that is not directly a Quantity in
# these low-level classes, so we now check that initializing from a
# string or mixed unit lists raises a TypeError.
def test_init_str(self):
with pytest.raises(TypeError) as exc:
s1 = PhysicsSphericalRepresentation(phi='2h6m3.3s', theta='0.1rad', r=1 * u.kpc)
assert exc.value.args[0] == "phi should be a Quantity or Angle"
def test_mixed_units(self):
with pytest.raises(TypeError) as exc:
s1 = PhysicsSphericalRepresentation(phi=[8 * u.hourangle, 135 * u.deg],
theta=[5 * u.deg, (6 * np.pi / 180) * u.rad],
r=[1. * u.kpc, 500 * u.pc])
assert exc.value.args[0] == "phi should be a Quantity or Angle"
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[10, 20] * u.kpc)
with pytest.raises(AttributeError):
s1.phi = 1. * u.deg
with pytest.raises(AttributeError):
s1.theta = 1. * u.deg
with pytest.raises(AttributeError):
s1.r = 1. * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(phi=np.arange(10) * u.deg,
theta=np.arange(5, 15) * u.deg,
r=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg,
theta=2 * u.deg,
r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
class TestCartesianRepresentation(object):
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CartesianRepresentation()
def test_init_quantity(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_singleunit(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2* u.kpc, z=3* u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc,
y=[2, 3, 4] * u.Mpc,
z=[3, 4, 5] * u.kpc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.Mpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, [1, 2, 3])
assert_allclose(s1.y.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_one_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.pc
assert s1.z.unit is u.pc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_one_array_size_fail(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(x=[1, 2, 3, 4] * u.pc)
# exception text differs on Python 2 and Python 3
if hasattr(exc.value, 'args'):
assert exc.value.args[0].startswith("too many values to unpack")
else:
#py 2.6 doesn't have `args`
assert exc.value == 'too many values to unpack'
def test_init_one_array_yz_fail(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc)
assert exc.value.args[0] == "x, y, and z are required to instantiate CartesianRepresentation"
def test_init_array_nocopy(self):
x = [8, 9, 10] * u.pc
y = [5, 6, 7] * u.Mpc
z = [2, 3, 4] * u.kpc
s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False)
x[:] = [1, 2, 3] * u.kpc
y[:] = [9, 9, 8] * u.kpc
z[:] = [1, 2, 1] * u.kpc
assert_allclose_quantity(x, s1.x)
assert_allclose_quantity(y, s1.y)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
s2 = CartesianRepresentation.from_representation(s1)
assert s2.x == 1 * u.kpc
assert s2.y == 2 * u.kpc
assert s2.z == 3 * u.kpc
def test_broadcasting(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc)
assert s1.x.unit == u.kpc
assert s1.y.unit == u.kpc
assert s1.z.unit == u.kpc
assert_allclose(s1.x.value, [1, 2])
assert_allclose(s1.y.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast"
# We deliberately disallow anything that is not directly a Quantity in
# these low-level classes, so we now check that initializing from a
# string or mixed unit lists raises a TypeError.
def test_mixed_units(self):
with pytest.raises(TypeError) as exc:
s1 = CartesianRepresentation(x=[1 * u.kpc, 2 * u.Mpc],
y=[3 * u.kpc, 4 * u.pc],
z=[5. * u.cm, 6 * u.m])
assert exc.value.args[0] == "x should be a Quantity"
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.x = 1. * u.kpc
with pytest.raises(AttributeError):
s1.y = 1. * u.kpc
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def test_xyz(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert isinstance(s1.xyz, u.Quantity)
assert s1.xyz.unit is u.kpc
assert_allclose(s1.xyz.value, [1, 2, 3])
def test_unit_mismatch(self):
q_len = u.Quantity([1], u.km)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
def test_unit_non_length(self):
s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg)
s2 = CartesianRepresentation(x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s)
banana = u.def_unit('banana')
s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana)
def test_getitem(self):
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
def test_getitem_scalar(self):
s = CartesianRepresentation(x=1 * u.m,
y=-2 * u.m,
z=3 * u.km)
with pytest.raises(TypeError):
s_slc = s[0]
class TestCylindricalRepresentation(object):
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CylindricalRepresentation()
def test_init_quantity(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
assert s1.rho.unit is u.kpc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, 1)
assert_allclose(s1.phi.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CylindricalRepresentation(rho=[1, 2, 3] * u.pc,
phi=[2, 3, 4] * u.deg,
z=[3, 4, 5] * u.kpc)
assert s1.rho.unit is u.pc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, [1, 2, 3])
assert_allclose(s1.phi.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_array_nocopy(self):
rho = [8, 9, 10] * u.pc
phi = [5, 6, 7] * u.deg
z = [2, 3, 4] * u.kpc
s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False)
rho[:] = [9, 2, 3] * u.kpc
phi[:] = [1, 2, 3] * u.arcmin
z[:] = [-2, 3, 8] * u.kpc
assert_allclose_quantity(rho, s1.rho)
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
s2 = CylindricalRepresentation.from_representation(s1)
assert s2.rho == 1 * u.kpc
assert s2.phi == 2 * u.deg
assert s2.z == 3 * u.kpc
def test_broadcasting(self):
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc)
assert s1.rho.unit == u.kpc
assert s1.phi.unit == u.deg
assert s1.z.unit == u.kpc
assert_allclose(s1.rho.value, [1, 2])
assert_allclose(s1.phi.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters rho, phi, and z cannot be broadcast"
# We deliberately disallow anything that is not directly a Quantity in
# these low-level classes, so we now check that initializing from a
# string or mixed unit lists raises a TypeError.
def test_mixed_units(self):
with pytest.raises(TypeError) as exc:
s1 = CylindricalRepresentation(rho=[1 * u.kpc, 2 * u.Mpc],
phi=[3 * u.deg, 4 * u.arcmin],
z=[5. * u.cm, 6 * u.m])
assert exc.value.args[0] == "phi should be a Quantity or Angle"
def test_readonly(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc,
phi=20 * u.deg,
z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.rho = 1. * u.kpc
with pytest.raises(AttributeError):
s1.phi = 20 * u.deg
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def unit_mismatch(self):
q_len = u.Quantity([1], u.kpc)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len)
assert exc.value.args[0] == "rho and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen)
assert exc.value.args[0] == "rho and z should have matching physical types"
def test_getitem(self):
s = CylindricalRepresentation(rho=np.arange(10) * u.pc,
phi=-np.arange(10) * u.deg,
z=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc)
assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = CylindricalRepresentation(rho=1 * u.pc,
phi=-2 * u.deg,
z=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_cartesian_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = SphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = SphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.lon, s4.lon)
assert_allclose_quantity(s2.lat, s4.lat)
assert_allclose_quantity(s2.distance, s4.distance)
def test_cartesian_physics_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
def test_spherical_physics_spherical_roundtrip():
s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s3.lon)
assert_allclose_quantity(s1.lat, s3.lat)
assert_allclose_quantity(s1.distance, s3.distance)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
assert_allclose_quantity(s1.lon, s4.phi)
assert_allclose_quantity(s1.lat, 90. * u.deg - s4.theta)
assert_allclose_quantity(s1.distance, s4.r)
def test_cartesian_cylindrical_roundtrip():
s1 = CartesianRepresentation(x=np.array([1., 2000.]) * u.kpc,
y=np.array([3000., 4.]) * u.pc,
z=np.array([5., 600.]) * u.cm)
s2 = CylindricalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = CylindricalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.rho, s4.rho)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.z, s4.z)
def test_unit_spherical_roundtrip():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = CartesianRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = UnitSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s4.lon)
assert_allclose_quantity(s1.lat, s4.lat)
def test_representation_repr():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert repr(r1) == ('<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n'
' (1.0, 2.5, 1.0)>')
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert repr(r2) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' (1.0, 2.0, 3.0)>')
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' [(1.0, 4.0, 9.0), (2.0, 4.0, 10.0), (3.0, 4.0, 11.0)]>')
def test_representation_str():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert str(r1) == '(1.0, 2.5, 1.0) (deg, deg, kpc)'
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert str(r2) == '(1.0, 2.0, 3.0) kpc'
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
assert str(r3) == '[(1.0, 4.0, 9.0) (2.0, 4.0, 10.0) (3.0, 4.0, 11.0)] kpc'
def test_subclass_representation():
from ...utils import OrderedDict
from ..builtin_frames import ICRS
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super(Longitude180, cls).__new__(cls, angle, unit=unit,
wrap_angle=wrap_angle, **kwargs)
return self
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = OrderedDict([('lon', Longitude180),
('lat', Latitude),
('distance', u.Quantity)])
recommended_units = {'lon': u.deg, 'lat': u.deg}
class ICRSWrap180(ICRS):
frame_specific_representation_info = ICRS._frame_specific_representation_info.copy()
frame_specific_representation_info['sphericalwrap180'] = \
frame_specific_representation_info['spherical']
default_representation = SphericalWrap180Representation
c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m)
assert c.ra.value == -1
assert c.ra.unit is u.deg
assert c.dec.value == -2
assert c.dec.unit is u.deg
|
|
"""Contains the WidgetPropertiesDialog class."""
__all__ = ['WidgetPropertiesDialog']
from direct.showbase.TkGlobal import *
import Pmw, sys
"""
TODO:
Checkboxes for None?
Floaters to adjust float values
OK and Cancel to allow changes to be delayed
Something other than Return to accept a new value
"""
class WidgetPropertiesDialog(Toplevel):
"""Class to open dialogs to adjust widget properties."""
def __init__(self, propertyDict, propertyList = None, parent = None,
title = 'Widget Properties'):
"""Initialize a dialog.
Arguments:
propertyDict -- a dictionary of properties to be edited
parent -- a parent window (the application window)
title -- the dialog title
"""
# Record property list
self.propertyDict = propertyDict
self.propertyList = propertyList
if self.propertyList is None:
self.propertyList = list(self.propertyDict.keys())
self.propertyList.sort()
# Use default parent if none specified
if not parent:
if sys.version_info >= (3, 0):
import tkinter
parent = tkinter._default_root
else:
import Tkinter
parent = Tkinter._default_root
# Create toplevel window
Toplevel.__init__(self, parent)
self.transient(parent)
# Set title
if title:
self.title(title)
# Record parent
self.parent = parent
# Initialize modifications
self.modifiedDict = {}
# Create body
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
# Create OK Cancel button
self.buttonbox()
# Initialize window state
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def destroy(self):
"""Destroy the window"""
self.propertyDict = {}
self.initial_focus = None
# Clean up balloons!
for balloon in self.balloonList:
balloon.withdraw()
Toplevel.destroy(self)
#
# construction hooks
def body(self, master):
"""create dialog body.
return entry that should have initial focus.
This method should be overridden, and is called
by the __init__ method.
"""
count = 0
entryList = []
self.balloonList = []
for property in self.propertyList:
propertySet = self.propertyDict[property]
# Widget
widget = propertySet.get('widget', None)
# Get initial value
initialvalue = widget[property]
# Type of entry
entryType = propertySet.get('type', 'real')
# Is None an allowable value?
fAllowNone = propertySet.get('fNone', 0)
# Help string specified?
helpString = propertySet.get('help', None)
# Create label
label = Label(master, text=property, justify=LEFT)
label.grid(row=count, column = 0, padx=5, sticky=W)
# Create entry
entry = Pmw.EntryField(master, entry_justify = 'right')
entry.grid(row=count, column = 1, padx=5, sticky=W+E)
if initialvalue is None:
entry.insert(0, 'None')
else:
entry.insert(0, initialvalue)
# Create balloon for help
balloon = Pmw.Balloon(state = 'balloon')
self.balloonList.append(balloon)
# extra info if None is allowed value
if helpString is None:
if fAllowNone:
extra = ' or None'
else:
extra = ''
# Set up help string and validator based upon type
if entryType == 'real':
# Only allow real numbers
if fAllowNone:
entry['validate'] = { 'validator': self.realOrNone }
else:
entry['validate'] = { 'validator': 'real' }
if helpString is None:
helpString = 'Enter a floating point number' + extra + '.'
elif entryType == 'integer':
# Only allow integer values
if fAllowNone:
entry['validate'] = { 'validator': self.intOrNone }
else:
entry['validate'] = { 'validator': 'integer' }
if helpString is None:
helpString = 'Enter an integer' + extra + '.'
else:
# Anything goes with a string widget
if helpString is None:
helpString = 'Enter a string' + extra + '.'
# Bind balloon with help string to entry
balloon.bind(entry, helpString)
# Create callback to execute whenever a value is changed
modifiedCallback = (lambda f=self.modified, w=widget, e=entry,
p=property, t=entryType, fn=fAllowNone:
f(w, e, p, t, fn))
entry['modifiedcommand'] = modifiedCallback
# Keep track of the entrys
entryList.append(entry)
count += 1
# Set initial focus
if len(entryList) > 0:
entry = entryList[0]
entry.select_range(0, END)
# Set initial focus to first entry in the list
return entryList[0]
else:
# Just set initial focus to self
return self
def modified(self, widget, entry, property, type, fNone):
self.modifiedDict[property] = (widget, entry, type, fNone)
def buttonbox(self):
"""add standard button box buttons.
"""
box = Frame(self)
# Create buttons
w = Button(box, text="OK", width=10, command=self.ok)
w.pack(side=LEFT, padx=5, pady=5)
# Create buttons
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
# Bind commands
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
# Pack
box.pack()
def realOrNone(self, val):
val = val.lower()
if 'none'.find(val) != -1:
if val == 'none':
return Pmw.OK
else:
return Pmw.PARTIAL
return Pmw.realvalidator(val)
def intOrNone(self, val):
val = val.lower()
if 'none'.find(val) != -1:
if val == 'none':
return Pmw.OK
else:
return Pmw.PARTIAL
return Pmw.integervalidator(val)
#
# standard button semantics
def ok(self, event=None):
self.withdraw()
self.update_idletasks()
self.validateChanges()
self.apply()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
def validateChanges(self):
for property in self.modifiedDict:
tuple = self.modifiedDict[property]
widget = tuple[0]
entry = tuple[1]
type = tuple[2]
fNone = tuple[3]
value = entry.get()
lValue = value.lower()
if 'none'.find(lValue) != -1:
if fNone and (lValue == 'none'):
widget[property] = None
else:
if type == 'real':
value = float(value)
elif type == 'integer':
value = int(value)
widget[property] = value
def apply(self):
"""process the data
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
"""
pass # override
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from oslo.config import cfg
# TODO(deva): import MultipleResultsFound and handle it appropriately
from sqlalchemy.orm.exc import NoResultFound
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.db import api
from ironic.db.sqlalchemy import models
from ironic import objects
from ironic.openstack.common.db.sqlalchemy import session as db_session
from ironic.openstack.common.db.sqlalchemy import utils as db_utils
from ironic.openstack.common import log
from ironic.openstack.common import uuidutils
CONF = cfg.CONF
CONF.import_opt('connection',
'ironic.openstack.common.db.sqlalchemy.session',
group='database')
LOG = log.getLogger(__name__)
get_engine = db_session.get_engine
get_session = db_session.get_session
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_int_like(value):
return query.filter_by(id=value)
elif uuidutils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def add_filter_by_many_identities(query, model, values):
"""Adds an identity filter to a query for values list.
Filters results by ID, if supplied values contain a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param model: Model for filter.
:param values: Values for filtering results by.
:return: tuple (Modified query, filter field name).
"""
if not values:
raise exception.InvalidIdentity(identity=values)
value = values[0]
if utils.is_int_like(value):
return query.filter(getattr(model, 'id').in_(values)), 'id'
elif uuidutils.is_uuid_like(value):
return query.filter(getattr(model, 'uuid').in_(values)), 'uuid'
else:
raise exception.InvalidIdentity(identity=value)
def add_port_filter(query, value):
"""Adds a port-specific filter to a query.
Filters results by address, if supplied value is a valid MAC
address. Otherwise attempts to filter results by identity.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_valid_mac(value):
return query.filter_by(address=value)
else:
return add_identity_filter(query, value)
def add_port_filter_by_node(query, value):
if utils.is_int_like(value):
return query.filter_by(node_id=value)
else:
query = query.join(models.Node,
models.Port.node_id == models.Node.id)
return query.filter(models.Node.uuid == value)
def add_node_filter_by_chassis(query, value):
if utils.is_int_like(value):
return query.filter_by(chassis_id=value)
else:
query = query.join(models.Chassis,
models.Node.chassis_id == models.Chassis.id)
return query.filter(models.Chassis.uuid == value)
def _check_port_change_forbidden(port, session):
node_id = port['node_id']
if node_id is not None:
query = model_query(models.Node, session=session)
query = query.filter_by(id=node_id)
node_ref = query.one()
if node_ref['reservation'] is not None:
raise exception.NodeLocked(node=node_id)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
return query.all()
def _check_node_already_locked(query, query_by):
no_reserv = None
locked_ref = query.filter(models.Node.reservation != no_reserv).first()
if locked_ref:
raise exception.NodeLocked(node=locked_ref[query_by])
def _handle_node_lock_not_found(nodes, query, query_by):
refs = query.all()
existing = [ref[query_by] for ref in refs]
missing = set(nodes) - set(existing)
raise exception.NodeNotFound(node=missing.pop())
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
@objects.objectify(objects.Node)
def get_nodes(self, columns):
pass
@objects.objectify(objects.Node)
def get_node_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir)
@objects.objectify(objects.Node)
def get_nodes_by_chassis(self, chassis, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Node)
query = add_node_filter_by_chassis(query, chassis)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
@objects.objectify(objects.Node)
def get_associated_nodes(self):
pass
@objects.objectify(objects.Node)
def get_unassociated_nodes(self):
pass
@objects.objectify(objects.Node)
def reserve_nodes(self, tag, nodes):
# assume nodes does not contain duplicates
# Ensure consistent sort order so we don't run into deadlocks.
nodes.sort()
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query, query_by = add_filter_by_many_identities(query, models.Node,
nodes)
# Be optimistic and assume we usually get a reservation.
_check_node_already_locked(query, query_by)
count = query.update({'reservation': tag},
synchronize_session=False)
if count != len(nodes):
# one or more node id not found
_handle_node_lock_not_found(nodes, query, query_by)
return query.all()
def release_nodes(self, tag, nodes):
# assume nodes does not contain duplicates
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query, query_by = add_filter_by_many_identities(query, models.Node,
nodes)
# be optimistic and assume we usually release a reservation
count = query.filter_by(reservation=tag).\
update({'reservation': None}, synchronize_session=False)
if count != len(nodes):
# we updated not all nodes
if len(nodes) != query.count():
# one or more node id not found
_handle_node_lock_not_found(nodes, query, query_by)
else:
# one or more node had reservation != tag
_check_node_already_locked(query, query_by)
@objects.objectify(objects.Node)
def create_node(self, values):
# ensure defaults are present for new nodes
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
if not values.get('power_state'):
values['power_state'] = states.NOSTATE
if not values.get('provision_state'):
values['provision_state'] = states.NOSTATE
if not values.get('properties'):
values['properties'] = '{}'
if not values.get('extra'):
values['extra'] = '{}'
if not values.get('driver_info'):
values['driver_info'] = '{}'
node = models.Node()
node.update(values)
node.save()
return node
@objects.objectify(objects.Node)
def get_node(self, node):
query = model_query(models.Node)
query = add_identity_filter(query, node)
try:
result = query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node)
return result
@objects.objectify(objects.Node)
def get_node_by_instance(self, instance):
query = model_query(models.Node)
if uuidutils.is_uuid_like(instance):
query = query.filter_by(instance_uuid=instance)
else:
query = query.filter_by(instance_name=instance)
try:
result = query.one()
except NoResultFound:
raise exception.InstanceNotFound(instance=instance)
return result
def destroy_node(self, node):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node)
try:
node_ref = query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node)
if node_ref['reservation'] is not None:
raise exception.NodeLocked(node=node)
# Get node ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the node.
if uuidutils.is_uuid_like(node):
node_id = node_ref['id']
else:
node_id = node
port_query = model_query(models.Port, session=session)
port_query = add_port_filter_by_node(port_query, node_id)
port_query.delete()
query.delete()
@objects.objectify(objects.Node)
def update_node(self, node, values):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exception.NodeNotFound(node=node)
ref = query.one()
return ref
@objects.objectify(objects.Port)
def get_port(self, port):
query = model_query(models.Port)
query = add_port_filter(query, port)
try:
result = query.one()
except NoResultFound:
raise exception.PortNotFound(port=port)
return result
@objects.objectify(objects.Port)
def get_port_by_vif(self, vif):
pass
@objects.objectify(objects.Port)
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir)
@objects.objectify(objects.Port)
def get_ports_by_node(self, node, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Port)
query = add_port_filter_by_node(query, node)
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
@objects.objectify(objects.Port)
def create_port(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
if not values.get('extra'):
values['extra'] = '{}'
port = models.Port()
port.update(values)
port.save()
return port
@objects.objectify(objects.Port)
def update_port(self, port, values):
session = get_session()
with session.begin():
query = model_query(models.Port, session=session)
query = add_port_filter(query, port)
try:
ref = query.one()
except NoResultFound:
raise exception.PortNotFound(port=port)
_check_port_change_forbidden(ref, session)
ref.update(values)
return ref
def destroy_port(self, port):
session = get_session()
with session.begin():
query = model_query(models.Port, session=session)
query = add_port_filter(query, port)
try:
ref = query.one()
except NoResultFound:
raise exception.PortNotFound(port=port)
_check_port_change_forbidden(ref, session)
query.delete()
@objects.objectify(objects.Chassis)
def get_chassis(self, chassis):
query = model_query(models.Chassis)
query = add_identity_filter(query, chassis)
try:
return query.one()
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis)
@objects.objectify(objects.Chassis)
def get_chassis_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return _paginate_query(models.Chassis, limit, marker,
sort_key, sort_dir)
@objects.objectify(objects.Chassis)
def create_chassis(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
if not values.get('extra'):
values['extra'] = '{}'
chassis = models.Chassis()
chassis.update(values)
chassis.save()
return chassis
@objects.objectify(objects.Chassis)
def update_chassis(self, chassis, values):
session = get_session()
with session.begin():
query = model_query(models.Chassis, session=session)
query = add_identity_filter(query, chassis)
count = query.update(values)
if count != 1:
raise exception.ChassisNotFound(chassis=chassis)
ref = query.one()
return ref
def destroy_chassis(self, chassis):
def chassis_not_empty(session):
"""Checks whether the chassis does not have nodes."""
query = model_query(models.Node, session=session)
query = add_node_filter_by_chassis(query, chassis)
return query.count() != 0
session = get_session()
with session.begin():
if chassis_not_empty(session):
raise exception.ChassisNotEmpty(chassis=chassis)
query = model_query(models.Chassis, session=session)
query = add_identity_filter(query, chassis)
count = query.delete()
if count != 1:
raise exception.ChassisNotFound(chassis=chassis)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compatibility functionality for Windbg users.
"""
import argparse
import codecs
import math
import sys
from builtins import str
import gdb
import pwndbg.arch
import pwndbg.commands
import pwndbg.memory
import pwndbg.strings
import pwndbg.symbol
import pwndbg.typeinfo
def get_type(size):
return {
1: pwndbg.typeinfo.uint8,
2: pwndbg.typeinfo.uint16,
4: pwndbg.typeinfo.uint32,
8: pwndbg.typeinfo.uint64,
}[size]
parser = argparse.ArgumentParser(description="Starting at the specified address, dump N bytes.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to dump from.")
parser.add_argument("count", type=pwndbg.commands.AddressExpr, default=64, nargs="?", help="The number of bytes to dump.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def db(address, count=64):
"""
Starting at the specified address, dump N bytes
(default 64).
"""
return dX(1, address, count, repeat=db.repeat)
parser = argparse.ArgumentParser(description="Starting at the specified address, dump N words.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to dump from.")
parser.add_argument("count", type=pwndbg.commands.AddressExpr, default=32, nargs="?", help="The number of words to dump.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def dw(address, count=32):
"""
Starting at the specified address, dump N words
(default 32).
"""
return dX(2, address, count, repeat=dw.repeat)
parser = argparse.ArgumentParser(description="Starting at the specified address, dump N dwords.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to dump from.")
parser.add_argument("count", type=pwndbg.commands.AddressExpr, default=16, nargs="?", help="The number of dwords to dump.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def dd(address, count=16):
"""
Starting at the specified address, dump N dwords
(default 16).
"""
return dX(4, address, count, repeat=dd.repeat)
parser = argparse.ArgumentParser(description="Starting at the specified address, dump N qwords.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to dump from.")
parser.add_argument("count", type=pwndbg.commands.AddressExpr, default=8, nargs="?", help="The number of qwords to dump.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def dq(address, count=8):
"""
Starting at the specified address, dump N qwords
(default 8).
"""
return dX(8, address, count, repeat=dq.repeat)
parser = argparse.ArgumentParser(description="Starting at the specified address, hexdump.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to dump from.")
parser.add_argument("count", type=pwndbg.commands.AddressExpr, default=8, nargs="?", help="The number of bytes to hexdump.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def dc(address, count=8):
return pwndbg.commands.hexdump.hexdump(address=address, count=count)
def dX(size, address, count, to_string=False, repeat=False):
"""
Traditionally, windbg will display 16 bytes of data per line.
"""
values = []
if repeat:
count = dX.last_count
address = dX.last_address
else:
address = int(address) & pwndbg.arch.ptrmask
count = int(count)
type = get_type(size)
for i in range(count):
try:
gval = pwndbg.memory.poi(type, address + i * size)
# print(str(gval))
values.append(int(gval))
except gdb.MemoryError:
break
if not values:
print('Could not access the provided address')
return
n_rows = int(math.ceil(count * size / float(16)))
row_sz = int(16 / size)
rows = [values[i*row_sz:(i+1)*row_sz] for i in range(n_rows)]
lines = []
# sys.stdout.write(repr(rows) + '\n')
for i, row in enumerate(rows):
if not row:
continue
line = [enhex(pwndbg.arch.ptrsize, address + (i*16)),' ']
for value in row:
line.append(enhex(size, value))
lines.append(' '.join(line))
if not to_string:
print('\n'.join(lines))
dX.last_count = count
dX.last_address = address + len(rows)*16
return lines
def enhex(size, value):
value = value & pwndbg.arch.ptrmask
x = "%x" % abs(value)
x = x.rjust(size * 2, '0')
return x
parser = argparse.ArgumentParser(description="Write hex bytes at the specified address.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to write to.")
parser.add_argument("data", type=str, nargs="*", help="The bytes to write.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def eb(address, data):
"""
Write hex bytes at the specified address.
"""
return eX(1, address, data)
parser = argparse.ArgumentParser(description="Write hex words at the specified address.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to write to.")
parser.add_argument("data", type=str, nargs="*", help="The words to write.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def ew(address, data):
"""
Write hex words at the specified address.
"""
return eX(2, address, data)
parser = argparse.ArgumentParser(description="Write hex dwords at the specified address.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to write to.")
parser.add_argument("data", type=str, nargs="*", help="The dwords to write.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def ed(address, data):
"""
Write hex dwords at the specified address.
"""
return eX(4, address, data)
parser = argparse.ArgumentParser(description="Write hex qwords at the specified address.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to write to.")
parser.add_argument("data", type=str, nargs="*", help="The qwords to write.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def eq(address, data):
"""
Write hex qwords at the specified address.
"""
return eX(8, address, data)
parser = argparse.ArgumentParser(description="Write a string at the specified address.")
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to write to.")
parser.add_argument("data", type=str, help="The string to write.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def ez(address, data):
"""
Write a character at the specified address.
"""
return eX(1, address, data, hex=False)
parser = argparse.ArgumentParser(description="Write a string at the specified address.") #TODO Is eza just ez? If so just alias. I had trouble finding windbg documentation defining ez
parser.add_argument("address", type=pwndbg.commands.HexOrAddressExpr, help="The address to write to.")
parser.add_argument("data", type=str, help="The string to write.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def eza(address, data):
"""
Write a string at the specified address.
"""
return ez(address, data)
def eX(size, address, data, hex=True):
"""
This relies on windbg's default hex encoding being enforced
"""
if not data:
print('Cannot write empty data into memory.')
return
if hex:
# Early validation if all data is hex
for string in data:
if string.startswith('0x'):
string = string[2:]
if any(ch not in '0123456789abcdefABCDEF' for ch in string):
print('Incorrect data format: it must all be a hex value (0x1234 or 1234, both interpreted as 0x1234)')
return
writes = 0
for i, string in enumerate(data):
if hex:
if string.startswith('0x'):
string = string[2:]
string = string.rjust(size*2, '0')
data = codecs.decode(string, 'hex')
else:
data = string
if pwndbg.arch.endian == 'little':
data = data[::-1]
try:
pwndbg.memory.write(address + (i * size), data)
writes += 1
except gdb.error:
print('Cannot access memory at address %#x' % address)
if writes > 0:
print('(Made %d writes to memory; skipping further writes)' % writes)
return
parser = argparse.ArgumentParser(description="Dump pointers and symbols at the specified address.")
parser.add_argument("addr", type=pwndbg.commands.HexOrAddressExpr, help="The address to dump from.")
@pwndbg.commands.ArgparsedCommand(parser,aliases=['kd','dps','dqs']) #TODO are these really all the same? They had identical implementation...
@pwndbg.commands.OnlyWhenRunning
def dds(addr):
"""
Dump pointers and symbols at the specified address.
"""
return pwndbg.commands.telescope.telescope(addr)
da_parser = argparse.ArgumentParser()
da_parser.description = 'Dump a string at the specified address.'
da_parser.add_argument('address', type=pwndbg.commands.HexOrAddressExpr, help='Address to dump')
da_parser.add_argument('max', type=int, nargs='?', default=256,
help='Maximum string length')
@pwndbg.commands.ArgparsedCommand(da_parser)
@pwndbg.commands.OnlyWhenRunning
def da(address, max):
print("%x" % address, repr(pwndbg.strings.get(address, max)))
ds_parser = argparse.ArgumentParser()
ds_parser.description = 'Dump a string at the specified address.'
ds_parser.add_argument('address', type=pwndbg.commands.HexOrAddressExpr, help='Address to dump')
ds_parser.add_argument('max', type=int, nargs='?', default=256,
help='Maximum string length')
@pwndbg.commands.ArgparsedCommand(ds_parser)
@pwndbg.commands.OnlyWhenRunning
def ds(address, max):
# We do change the max length to the default if its too low
# because the truncated display is not that ideal/not the same as GDB's yet
# (ours: "truncated ...", GDBs: "truncated "...)
if max < 256:
print('Max str len of %d too low, changing to 256' % max)
max = 256
string = pwndbg.strings.get(address, max, maxread=4096)
if string:
print("%x %r" % (address, string))
else:
print("Data at address can't be dereferenced or is not a printable null-terminated string or is too short.")
print("Perhaps try: db <address> <count> or hexdump <address>")
@pwndbg.commands.ArgparsedCommand("List breakpoints.")
def bl():
"""
List breakpoints
"""
gdb.execute('info breakpoints')
parser = argparse.ArgumentParser(description="Disable the breakpoint with the specified index.")
parser.add_argument("which", nargs="?", type=str, default='*', help="Index of the breakpoint to disable.")
@pwndbg.commands.ArgparsedCommand(parser)
def bd(which = '*'):
"""
Disable the breakpoint with the specified index.
"""
if which == '*':
gdb.execute('disable breakpoints')
else:
gdb.execute('disable breakpoints %s' % which)
parser = argparse.ArgumentParser(description="Enable the breakpoint with the specified index.")
parser.add_argument("which", nargs="?", type=str, default='*', help="Index of the breakpoint to enable.")
@pwndbg.commands.ArgparsedCommand(parser)
def be(which = '*'):
"""
Enable the breakpoint with the specified index.
"""
if which == '*':
gdb.execute('enable breakpoints')
else:
gdb.execute('enable breakpoints %s' % which)
parser = argparse.ArgumentParser(description="Clear the breakpoint with the specified index.")
parser.add_argument("which", nargs="?", type=str, default='*', help="Index of the breakpoint to clear.")
@pwndbg.commands.ArgparsedCommand(parser)
def bc(which = '*'):
"""
Clear the breakpoint with the specified index.
"""
if which == '*':
gdb.execute('delete breakpoints')
else:
gdb.execute('delete breakpoints %s' % which)
parser = argparse.ArgumentParser(description="Set a breakpoint at the specified address.")
parser.add_argument("where", type=int, help="The address to break at.")
@pwndbg.commands.ArgparsedCommand(parser)
def bp(where):
"""
Set a breakpoint at the specified address.
"""
result = pwndbg.commands.fix(where)
if result is not None:
gdb.execute('break *%#x' % int(result))
parser = argparse.ArgumentParser(description="Starting at the specified address, disassemble N instructions.")
parser.add_argument("where", type=int, nargs="?", default=None, help="The address to disassemble at.")
parser.add_argument("n", type=int, nargs="?", default=5, help="The number of instructions to disassemble.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def u(where=None, n=5, to_string=False):
"""
Starting at the specified address, disassemble
N instructions (default 5).
"""
if where is None:
where = pwndbg.regs.pc
return pwndbg.commands.nearpc.nearpc(where, n, to_string)
@pwndbg.commands.ArgparsedCommand("Print a backtrace (alias 'bt').")
@pwndbg.commands.OnlyWhenRunning
def k():
"""
Print a backtrace (alias 'bt')
"""
gdb.execute('bt')
parser = argparse.ArgumentParser(description="List the symbols nearest to the provided value.")
parser.add_argument("value", type=int, nargs="?", default=None, help="The address you want the name of.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def ln(value=None):
"""
List the symbols nearest to the provided value.
"""
if value is None: value = pwndbg.regs.pc
value = int(value)
x = pwndbg.symbol.get(value)
if x:
result = '(%#x) %s' % (value, x)
print(result)
# The three commands are aliases for `vmmap` and are set so in vmmap.py
# lm
# address
# vprot
@pwndbg.commands.ArgparsedCommand("Not be windows.")
@pwndbg.commands.OnlyWhenRunning
def peb():
print("This isn't Windows!")
@pwndbg.commands.ArgparsedCommand("Windbg compatibility alias for 'continue' command.")
@pwndbg.commands.OnlyWhenRunning
def go():
'''
Windbg compatibility alias for 'continue' command.
'''
gdb.execute('continue')
@pwndbg.commands.ArgparsedCommand("Windbg compatibility alias for 'nextcall' command.")
@pwndbg.commands.OnlyWhenRunning
def pc():
'''
Windbg compatibility alias for 'nextcall' command.
'''
return pwndbg.commands.next.nextcall()
|
|
# Copyright 2013 Big Switch Networks Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang, Big Switch Networks Inc.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import sys
import mox
from neutronclient.neutron.v2_0.fw import firewallpolicy
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20FirewallPolicyJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20FirewallPolicyJSON, self).setUp()
def test_create_firewall_policy_with_mandatory_params(self):
"""firewall-policy-create with mandatory (none) params only."""
resource = 'firewall_policy'
cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
tenant_id = 'my-tenant'
name = 'my-name'
my_id = 'myid'
args = ['--tenant-id', tenant_id,
'--admin-state_up',
name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
admin_state_up=True, tenant_id=tenant_id)
def test_create_firewall_policy_with_all_params(self):
"""firewall-policy-create with all params set."""
resource = 'firewall_policy'
cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
name = 'my-name'
description = 'my-desc'
firewall_rules_arg = 'rule_id1 rule_id2'
firewall_rules_res = ['rule_id1', 'rule_id2']
tenant_id = 'my-tenant'
my_id = 'myid'
args = ['--description', description,
'--shared',
'--firewall-rules', firewall_rules_arg,
'--audited',
'--tenant-id', tenant_id,
'--admin-state_up',
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
description=description, shared=True,
firewall_rules=firewall_rules_res,
audited=True, admin_state_up=True,
tenant_id=tenant_id)
def test_list_firewall_policies(self):
"""firewall-policy-list."""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, True)
def test_list_firewall_policies_pagination(self):
"""firewall-policy-list."""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_firewall_policies_sort(self):
"""sorted list: firewall-policy-list --sort-key name --sort-key id
--sort-key asc --sort-key desc
"""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_firewall_policies_limit(self):
"""size (1000) limited list: firewall-policy-list -P."""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_firewall_policy_id(self):
"""firewall-policy-show test_id."""
resource = 'firewall_policy'
cmd = firewallpolicy.ShowFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_firewall_policy_id_name(self):
"""firewall-policy-show."""
resource = 'firewall_policy'
cmd = firewallpolicy.ShowFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_firewall_policy(self):
"""firewall-policy-update myid --name newname."""
resource = 'firewall_policy'
cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname'],
{'name': 'newname', })
def test_delete_firewall_policy(self):
"""firewall-policy-delete my-id."""
resource = 'firewall_policy'
cmd = firewallpolicy.DeleteFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
my_id = 'myid1'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
def test_insert_firewall_rule(self):
"""firewall-policy-insert-rule myid newruleid
--insert-before ruleAid
--insert-after ruleBid
"""
resource = 'firewall_policy'
cmd = firewallpolicy.FirewallPolicyInsertRule(
test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = ['myid', 'newrule',
'--insert-before', 'rule2',
'--insert-after', 'rule1']
extrafields = {'firewall_rule_id': 'newrule',
'insert_before': 'rule2',
'insert_after': 'rule1'}
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
body = extrafields
path = getattr(self.client, resource + "_insert_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path % myid, format=self.format),
self.client),
'PUT', body=test_cli20.MyComparator(body, self.client),
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn((test_cli20.MyResp(204), None))
args.extend(['--request-format', self.format])
self.mox.ReplayAll()
cmd_parser = cmd.get_parser(resource + "_insert_rule")
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_remove_firewall_rule(self):
"""firewall-policy-remove-rule myid ruleid
"""
resource = 'firewall_policy'
cmd = firewallpolicy.FirewallPolicyRemoveRule(
test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = ['myid', 'removerule']
extrafields = {'firewall_rule_id': 'removerule', }
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
body = extrafields
path = getattr(self.client, resource + "_remove_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path % myid, format=self.format),
self.client),
'PUT', body=test_cli20.MyComparator(body, self.client),
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn((test_cli20.MyResp(204), None))
args.extend(['--request-format', self.format])
self.mox.ReplayAll()
cmd_parser = cmd.get_parser(resource + "_remove_rule")
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
class CLITestV20FirewallPolicyXML(CLITestV20FirewallPolicyJSON):
format = 'xml'
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import json
import time
from c7n.exceptions import PolicyValidationError
from .common import BaseTest, functional
class TestOpsCenter(BaseTest):
def test_post_ops_item(self):
factory = self.replay_flight_data('test_post_ops_item')
p = self.load_policy({
'name': 'checking-lambdas',
'description': 'something good',
'resource': 'aws.lambda',
'source': 'config',
'query': [
{'clause': "resourceId = 'custodian-aws'"}],
'actions': [{
'type': 'post-item'}]},
session_factory=factory, config={'region': 'us-east-1'})
resources = p.run()
client = factory().client('ssm', region_name='us-east-1')
item = client.get_ops_item(
OpsItemId=resources[0]['c7n:opsitem']).get('OpsItem')
arn = p.resource_manager.get_arns(resources)[0]
self.assertTrue(
arn in item['OperationalData']['/aws/resources']['Value'])
self.assertTrue(item['OperationalData']['/aws/dedup'])
self.assertEqual(item['Title'], p.name)
self.assertEqual(item['Description'], p.data['description'])
def test_ops_item_filter(self):
factory = self.replay_flight_data('test_ops_item_filter')
p = self.load_policy({
'name': 'checking-lambdas',
'description': 'something good',
'resource': 'aws.lambda',
'source': 'config',
'query': [
{'clause': "resourceId = 'custodian-aws'"}],
'filters': [{
'type': 'ops-item',
'priority': [3, 4, 5],
'title': 'checking-lambdas',
'source': 'Cloud Custodian',
}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['c7n:opsitems'],
['oi-9be57440dcb3'])
def test_post_ops_item_update(self):
factory = self.replay_flight_data('test_post_ops_item_update')
p = self.load_policy({
'name': 'checking-lambdas',
'description': 'something good',
'resource': 'aws.lambda',
'source': 'config',
'query': [
{'clause': "resourceId = 'custodian-nuke-emr'"}],
'actions': [{
'type': 'post-item'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('ssm', region_name='us-east-1')
item = client.get_ops_item(
OpsItemId=resources[0]['c7n:opsitem']).get('OpsItem')
self.assertEqual(
json.loads(item['OperationalData']['/aws/resources']['Value']),
[{'arn': 'arn:aws:lambda:us-east-1::function:custodian-aws'},
{'arn': 'arn:aws:lambda:us-east-1::function:custodian-nuke-emr'}])
def test_update_ops_item(self):
factory = self.replay_flight_data('test_update_ops_item')
p = self.load_policy({
'name': 'checking-lambdas',
'description': 'something good',
'resource': 'aws.ops-item',
'query': [
{'Key': 'Status', 'Operator': 'Equal', 'Values': ['Open']}
],
'actions': [{
'type': 'update',
'topics': ['arn:aws:sns:us-west-2:644160558196:aws-command'],
'status': 'Resolved',
}]},
config={'region': 'us-west-2'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('ssm', region_name='us-west-2')
if self.recording:
time.sleep(5)
item = client.get_ops_item(
OpsItemId=resources[0]['OpsItemId'])['OpsItem']
self.assertEqual(item['Status'], 'Resolved')
self.assertEqual(
item['Notifications'],
[{'Arn': 'arn:aws:sns:us-west-2:644160558196:aws-command'}])
def test_invalid_resource_query(self):
self.assertRaises(
PolicyValidationError, self.load_policy,
{'name': 'value',
'resource': 'aws.ops-item',
'query': [
{'Key': 'Status', 'Operator': 'Equals', 'Values': ['Open']}]},
validate=True)
def test_get_resources(self):
factory = self.replay_flight_data('test_ops_item_get_resources')
p = self.load_policy({
'name': 'foo',
'resource': 'aws.ops-item'},
session_factory=factory,
config={'region': 'us-east-1'})
resources = p.resource_manager.get_resources('oi-5aa4c36439ed')
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['OpsItemId'], 'oi-5aa4c36439ed')
class TestSSM(BaseTest):
def test_ec2_ssm_send_command_validate(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{'name': 'ssm-instances',
'resource': 'aws.ec2',
'actions': [
{'type': 'send-command',
'command': {
'DocumentName': 'AWS-RunShellScript'}}]},
validate=True)
def test_ssm_send_command(self):
factory = self.replay_flight_data('test_ssm_send_command')
p = self.load_policy({
'name': 'ssm-instances',
'resource': 'ssm-managed-instance',
'filters': [{"PingStatus": "Online"}],
'actions': [
{'type': 'send-command',
'command': {
'DocumentName': 'AWS-RunShellScript',
'Parameters': {
'commands': [
'wget https://pkg.osquery.io/deb/osquery_3.3.0_1.linux.amd64.deb',
'dpkg -i osquery_3.3.0_1.linux.amd64.deb']}}}]},
session_factory=factory, config={'region': 'us-east-2'})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertTrue('c7n:SendCommand' in resources[0])
if self.recording:
time.sleep(5)
result = factory().client('ssm').get_command_invocation(
InstanceId=resources[0]['InstanceId'],
CommandId=resources[0]['c7n:SendCommand'][0])
self.assertEqual(result['Status'], 'Success')
def test_ssm_parameter_delete(self):
session_factory = self.replay_flight_data("test_ssm_parameter_delete")
p = self.load_policy({
'name': 'ssm-param-tags',
'resource': 'ssm-parameter',
'actions': ['delete']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Name'], 'not_secret')
client = session_factory().client('ssm')
if self.recording:
time.sleep(1)
self.assertEqual(
client.describe_parameters(
Filters=[{'Key': 'Name', 'Values': [resources[0]['Name']]}])['Parameters'],
[])
def test_ssm_parameter_delete_non_existant(self):
session_factory = self.replay_flight_data("test_ssm_parameter_delete_non_existant")
p = self.load_policy({
'name': 'ssm-param-tags',
'resource': 'ssm-parameter',
'actions': ['delete']},
session_factory=session_factory)
# if it raises the test fails
p.resource_manager.actions[0].process(
[{'Name': 'unicorn'}])
def test_ssm_parameter_tag_arn(self):
session_factory = self.replay_flight_data("test_ssm_parameter_tag_arn")
p = self.load_policy({
'name': 'ssm-param-tags',
'resource': 'ssm-parameter',
'filters': [{'tag:Env': 'present'}]},
config={'account_id': '123456789123'},
session_factory=session_factory)
resources = p.resource_manager.get_resources(['/gittersearch/token'])
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['Tags'],
[{'Key': 'App', 'Value': 'GitterSearch'},
{'Key': 'Env', 'Value': 'Dev'}])
@functional
def test_ssm_parameter_not_secure(self):
session_factory = self.replay_flight_data("test_ssm_parameter_not_secure")
client = session_factory().client("ssm")
client.put_parameter(Name='test-name',
Type='String',
Overwrite=True,
Value='test-value')
client.put_parameter(Name='secure-test-name',
Type='SecureString',
Overwrite=True,
Value='secure-test-value')
p = self.load_policy(
{
"name": "ssm-parameter-not-secure",
"resource": "ssm-parameter",
"filters": [{"type": "value",
"op": "ne",
"key": "Type",
"value": "SecureString"}]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.addCleanup(client.delete_parameters, Names=['test-name', 'secure-test-name'])
def test_ssm_activation_expired(self):
session_factory = self.replay_flight_data("test_ssm_activation_expired")
p = self.load_policy(
{
"name": "ssm-list-expired-activations",
"resource": "ssm-activation",
"filters": [{"type": "value",
"key": "Expired",
"value": True}]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_ssm_get_manager_instances(self):
session_factory = self.replay_flight_data("test_ssm_get_managed_instances")
p = self.load_policy(
{
"name": "ssm-get-managed-instances",
"resource": "ssm-managed-instance"
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["InstanceId"], "mi-1111aa111aa11a111")
|
|
""" This test script will test the set of optimization algorithms.
It tests
- the conformity of interface
- the behavior on simple functions
- the behavior on FitnessEvaluators
- the behavior when optimizing a list or an array
- the behavior when optimizing an Evolvable
- the behavior when optimizing a ParameterContainer
- consistency w.r.t. minimization/maximization
Tests to be added:
- tolerance of problems that have a constant fitness
- tolerance of problems that have adversarial (strictly decreasing) fitness
- handling one-dimensional and high-dimensional spaces
- reasonable results on the linear function
"""
__author__ = 'Tom Schaul, [email protected]'
from inspect import isclass
from scipy import sum, array, ndarray, log10
from random import random, choice
import pybrain.optimization.optimizer as bbo
import pybrain.optimization.populationbased.multiobjective as mobj
import pybrain.optimization as allopts
from pybrain.rl.environments.functions.unimodal import SphereFunction
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.evolvables.evolvable import Evolvable
from pybrain.rl.environments.cartpole.balancetask import BalanceTask
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules.module import Module
# Tasks to be optimized:
# ----------------------
# simple function
sf = lambda x:-sum((x + 1) ** 2)
# FunctionEnvironment class
fe = SphereFunction
# initialized FE
ife1 = fe(1)
ife2 = fe(2)
ife100 = fe(100)
# a Task object
task = BalanceTask()
task.N = 10
# for the simple evolvable class defined below
evoEval = lambda e: e.x
# starting points
# ----------------------
xlist1 = [2.]
xlist2 = [0.2, 10]
xlist100 = list(range(12, 112))
xa1 = array(xlist1)
xa2 = array(xlist2)
xa100 = array(xlist100)
pc1 = ParameterContainer(1)
pc2 = ParameterContainer(2)
pc100 = ParameterContainer(100)
pc1._setParameters(xa1)
pc2._setParameters(xa2)
pc100._setParameters(xa100)
# for the task object, we need a module
nnet = buildNetwork(task.outdim, 2, task.indim)
# a mimimalistic Evolvable subclass that is not (like usual) a ParameterContainer
class SimpleEvo(Evolvable):
def __init__(self, x): self.x = x
def mutate(self): self.x += random() - 0.3
def copy(self): return SimpleEvo(self.x)
def randomize(self): self.x = 10 * random() - 2
def __repr__(self): return '--%.3f--' % self.x
evo1 = SimpleEvo(-3.)
# the test functions
# ----------------------
def testInterface(algo):
""" Tests whether the algorithm is properly implementing the
correct Blackbox-optimization interface."""
# without any arguments, initialization has to work
emptyalgo = algo()
try:
# but not learning
emptyalgo.learn(0)
return "Failed to throw missing evaluator error?"
except AssertionError:
pass
emptyalgo.setEvaluator(sf, xa1)
# not it can run
emptyalgo.learn(0)
# simple functions don't check for dimension mismatch
algo(sf, xa1)
algo(sf, xa100)
# for these, either an initial point or a dimension parameter is required
algo(sf, numParameters=2)
try:
algo(sf)
return "Failed to throw unknown dimension error"
except ValueError:
pass
# FitnessEvaluators do not require that
algo(ife1)
# parameter containers can be used too
algo(ife2, pc2)
return True
def testContinuousInterface(algo):
""" Test the specifics for the interface for ContinuousOptimizers """
if not issubclass(algo, bbo.ContinuousOptimizer):
return True
# list starting points are internally converted to arrays
x = algo(sf, xlist2)
assert isinstance(x.bestEvaluable, ndarray), 'not converted to array'
# check for dimension mismatch
try:
algo(ife1, xa2)
return "Failed to throw dimension mismatch error"
except ValueError:
pass
return True
def testMinMax(algo):
""" Verify that the algorithm is doing the minimization/maximization consistently. """
if (issubclass(algo, bbo.TopologyOptimizer)
or algo == allopts.StochasticHillClimber):
# TODO
return True
xa1[0] = 2
evalx = sf(xa1)
amax1 = algo(sf, xa1, minimize=False)
amax2 = algo(sf, xa1)
amax2.minimize = False
amax3 = algo()
amax3.setEvaluator(sf, xa1)
amax3.minimize = False
amax4 = algo()
amax4.minimize = False
amax4.setEvaluator(sf, xa1)
for i, amax in enumerate([amax1, amax2, amax3, amax4]):
assert amax.minimize is False or amax.mustMinimize, 'Max: Attribute not set correctly.' \
+ str(amax.minimize) + str(amax.mustMinimize) + str(i)
x, xv = amax.learn(1)
assert sf(x) == xv, 'Evaluation does not fit: ' + str((sf(x), xv))
assert xv >= evalx, 'Evaluation did not increase: ' + str(xv) + ' (init: ' + str(evalx) + ')'
xa1[0] = 2
amin1 = algo(sf, xa1, minimize=True)
amin2 = algo(sf, xa1)
amin2.minimize = True
amin3 = algo()
amin3.setEvaluator(sf, xa1)
amin3.minimize = True
amin4 = algo()
amin4.minimize = True
amin4.setEvaluator(sf, xa1)
for i, amin in enumerate([amin1, amin2, amin3, amin4]):
assert amin.minimize is True or amin.mustMaximize, 'Min: Attribute not set correctly.' \
+ str(amin.minimize) + str(amin.mustMaximize) + str(i)
x, xv = amin.learn(1)
assert sf(x) == xv, 'Evaluation does not fit: ' + str((sf(x), xv)) + str(i)
assert xv <= evalx, 'Evaluation did not decrease: ' + str(xv) + ' (init: ' + str(evalx) + ')' + str(i)
assert ((amin.minimize is not amax.minimize)
or not (amin._wasOpposed is amax._wasOpposed)), 'Inconsistent flags.'
return True
def testOnModuleAndTask(algo):
l = algo(task, nnet)
assert isinstance(l._bestFound()[0], Module), 'Did not return a module.'
return True
def testOnEvolvable(algo):
if issubclass(algo, bbo.ContinuousOptimizer):
return True
if issubclass(algo, bbo.TopologyOptimizer):
try:
algo(evoEval, evo1).learn(1)
return "Topology optimizers should not accept arbitrary Evolvables"
except AttributeError:
return True
else:
algo(evoEval, evo1).learn(1)
return True
# the main test procedure
# ------------------------
def testAll(tests, allalgos, tolerant=True):
countgood = 0
for i, algo in enumerate(sorted(allalgos)):
print "%d, %s:" % (i + 1, algo.__name__)
print ' ' * int(log10(i + 1) + 2),
good = True
messages = []
for t in tests:
try:
res = t(algo)
except Exception, e:
if not tolerant:
raise e
res = e
if res is True:
print '.',
else:
good = False
messages.append(res)
print 'F',
if good:
countgood += 1
print '--- OK.'
else:
print '--- NOT OK.'
for m in messages:
if m is not None:
print ' ' * int(log10(i + 1) + 2), '->', m
print
print 'Summary:', countgood, '/', len(allalgos), 'of test were passed.'
if __name__ == '__main__':
from pybrain.optimization import * #@UnusedWildImport
#from pybrain.optimization import CMAES #@UnusedImport
allalgos = filter(lambda c: (isclass(c)
and issubclass(c, bbo.BlackBoxOptimizer)
and not issubclass(c, mobj.MultiObjectiveGA)
),
globals().values())
print 'Optimization algorithms to be tested:', len(allalgos)
print
print 'Note: this collection of tests may take quite some time.'
print
tests = [testInterface,
testContinuousInterface,
testOnModuleAndTask,
testOnEvolvable,
testMinMax,
]
testAll(tests, allalgos, tolerant=True)
|
|
"""Component for wiffi support."""
import asyncio
from datetime import timedelta
import errno
import logging
from wiffi import WiffiTcpServer
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PORT, CONF_TIMEOUT
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.dt import utcnow
from .const import (
CHECK_ENTITIES_SIGNAL,
CREATE_ENTITY_SIGNAL,
DEFAULT_TIMEOUT,
DOMAIN,
UPDATE_ENTITY_SIGNAL,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "binary_sensor"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the wiffi component. config contains data from configuration.yaml."""
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry):
"""Set up wiffi from a config entry, config_entry contains data from config entry database."""
if not config_entry.update_listeners:
config_entry.add_update_listener(async_update_options)
# create api object
api = WiffiIntegrationApi(hass)
api.async_setup(config_entry)
# store api object
hass.data.setdefault(DOMAIN, {})[config_entry.entry_id] = api
try:
await api.server.start_server()
except OSError as exc:
if exc.errno != errno.EADDRINUSE:
_LOGGER.error("Start_server failed, errno: %d", exc.errno)
return False
_LOGGER.error("Port %s already in use", config_entry.data[CONF_PORT])
raise ConfigEntryNotReady from exc
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_update_options(hass: HomeAssistant, config_entry: ConfigEntry):
"""Update options."""
await hass.config_entries.async_reload(config_entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry):
"""Unload a config entry."""
api: "WiffiIntegrationApi" = hass.data[DOMAIN][config_entry.entry_id]
await api.server.close_server()
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
api = hass.data[DOMAIN].pop(config_entry.entry_id)
api.shutdown()
return unload_ok
def generate_unique_id(device, metric):
"""Generate a unique string for the entity."""
return f"{device.mac_address.replace(':', '')}-{metric.name}"
class WiffiIntegrationApi:
"""API object for wiffi handling. Stored in hass.data."""
def __init__(self, hass):
"""Initialize the instance."""
self._hass = hass
self._server = None
self._known_devices = {}
self._periodic_callback = None
def async_setup(self, config_entry):
"""Set up api instance."""
self._server = WiffiTcpServer(config_entry.data[CONF_PORT], self)
self._periodic_callback = async_track_time_interval(
self._hass, self._periodic_tick, timedelta(seconds=10)
)
def shutdown(self):
"""Shutdown wiffi api.
Remove listener for periodic callbacks.
"""
remove_listener = self._periodic_callback
if remove_listener is not None:
remove_listener()
async def __call__(self, device, metrics):
"""Process callback from TCP server if new data arrives from a device."""
if device.mac_address not in self._known_devices:
# add empty set for new device
self._known_devices[device.mac_address] = set()
for metric in metrics:
if metric.id not in self._known_devices[device.mac_address]:
self._known_devices[device.mac_address].add(metric.id)
async_dispatcher_send(self._hass, CREATE_ENTITY_SIGNAL, device, metric)
else:
async_dispatcher_send(
self._hass,
f"{UPDATE_ENTITY_SIGNAL}-{generate_unique_id(device, metric)}",
device,
metric,
)
@property
def server(self):
"""Return TCP server instance for start + close."""
return self._server
@callback
def _periodic_tick(self, now=None):
"""Check if any entity has timed out because it has not been updated."""
async_dispatcher_send(self._hass, CHECK_ENTITIES_SIGNAL)
class WiffiEntity(Entity):
"""Common functionality for all wiffi entities."""
def __init__(self, device, metric, options):
"""Initialize the base elements of a wiffi entity."""
self._id = generate_unique_id(device, metric)
self._device_info = {
"connections": {
(device_registry.CONNECTION_NETWORK_MAC, device.mac_address)
},
"identifiers": {(DOMAIN, device.mac_address)},
"manufacturer": "stall.biz",
"name": f"{device.moduletype} {device.mac_address}",
"model": device.moduletype,
"sw_version": device.sw_version,
}
self._name = metric.description
self._expiration_date = None
self._value = None
self._timeout = options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT)
async def async_added_to_hass(self):
"""Entity has been added to hass."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{UPDATE_ENTITY_SIGNAL}-{self._id}",
self._update_value_callback,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, CHECK_ENTITIES_SIGNAL, self._check_expiration_date
)
)
@property
def should_poll(self):
"""Disable polling because data driven ."""
return False
@property
def device_info(self):
"""Return wiffi device info which is shared between all entities of a device."""
return self._device_info
@property
def unique_id(self):
"""Return unique id for entity."""
return self._id
@property
def name(self):
"""Return entity name."""
return self._name
@property
def available(self):
"""Return true if value is valid."""
return self._value is not None
def reset_expiration_date(self):
"""Reset value expiration date.
Will be called by derived classes after a value update has been received.
"""
self._expiration_date = utcnow() + timedelta(minutes=self._timeout)
@callback
def _update_value_callback(self, device, metric):
"""Update the value of the entity."""
@callback
def _check_expiration_date(self):
"""Periodically check if entity value has been updated.
If there are no more updates from the wiffi device, the value will be
set to unavailable.
"""
if (
self._value is not None
and self._expiration_date is not None
and utcnow() > self._expiration_date
):
self._value = None
self.async_write_ha_state()
|
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import datetime
import hashlib
import ipaddress
import warnings
from enum import Enum
from asn1crypto.keys import PublicKeyInfo
import six
from cryptography import utils
from cryptography.hazmat.primitives import constant_time, serialization
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from cryptography.x509.general_name import GeneralName, IPAddress, OtherName
from cryptography.x509.name import Name, RelativeDistinguishedName
from cryptography.x509.oid import (
CRLEntryExtensionOID, ExtensionOID, ObjectIdentifier
)
def _key_identifier_from_public_key(public_key):
if isinstance(public_key, RSAPublicKey):
data = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.PKCS1,
)
elif isinstance(public_key, EllipticCurvePublicKey):
data = public_key.public_numbers().encode_point()
else:
# This is a very slow way to do this.
serialized = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo
)
data = six.binary_type(PublicKeyInfo.load(serialized)['public_key'])
return hashlib.sha1(data).digest()
class DuplicateExtension(Exception):
def __init__(self, msg, oid):
super(DuplicateExtension, self).__init__(msg)
self.oid = oid
class UnsupportedExtension(Exception):
def __init__(self, msg, oid):
super(UnsupportedExtension, self).__init__(msg)
self.oid = oid
class ExtensionNotFound(Exception):
def __init__(self, msg, oid):
super(ExtensionNotFound, self).__init__(msg)
self.oid = oid
@six.add_metaclass(abc.ABCMeta)
class ExtensionType(object):
@abc.abstractproperty
def oid(self):
"""
Returns the oid associated with the given extension type.
"""
class Extensions(object):
def __init__(self, extensions):
self._extensions = extensions
def get_extension_for_oid(self, oid):
for ext in self:
if ext.oid == oid:
return ext
raise ExtensionNotFound("No {0} extension was found".format(oid), oid)
def get_extension_for_class(self, extclass):
if extclass is UnrecognizedExtension:
raise TypeError(
"UnrecognizedExtension can't be used with "
"get_extension_for_class because more than one instance of the"
" class may be present."
)
for ext in self:
if isinstance(ext.value, extclass):
return ext
raise ExtensionNotFound(
"No {0} extension was found".format(extclass), extclass.oid
)
def __iter__(self):
return iter(self._extensions)
def __len__(self):
return len(self._extensions)
def __getitem__(self, idx):
return self._extensions[idx]
def __repr__(self):
return (
"<Extensions({0})>".format(self._extensions)
)
@utils.register_interface(ExtensionType)
class CRLNumber(object):
oid = ExtensionOID.CRL_NUMBER
def __init__(self, crl_number):
if not isinstance(crl_number, six.integer_types):
raise TypeError("crl_number must be an integer")
self._crl_number = crl_number
def __eq__(self, other):
if not isinstance(other, CRLNumber):
return NotImplemented
return self.crl_number == other.crl_number
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.crl_number)
def __repr__(self):
return "<CRLNumber({0})>".format(self.crl_number)
crl_number = utils.read_only_property("_crl_number")
@utils.register_interface(ExtensionType)
class AuthorityKeyIdentifier(object):
oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER
def __init__(self, key_identifier, authority_cert_issuer,
authority_cert_serial_number):
if (authority_cert_issuer is None) != (
authority_cert_serial_number is None
):
raise ValueError(
"authority_cert_issuer and authority_cert_serial_number "
"must both be present or both None"
)
if authority_cert_issuer is not None:
authority_cert_issuer = list(authority_cert_issuer)
if not all(
isinstance(x, GeneralName) for x in authority_cert_issuer
):
raise TypeError(
"authority_cert_issuer must be a list of GeneralName "
"objects"
)
if authority_cert_serial_number is not None and not isinstance(
authority_cert_serial_number, six.integer_types
):
raise TypeError(
"authority_cert_serial_number must be an integer"
)
self._key_identifier = key_identifier
self._authority_cert_issuer = authority_cert_issuer
self._authority_cert_serial_number = authority_cert_serial_number
@classmethod
def from_issuer_public_key(cls, public_key):
digest = _key_identifier_from_public_key(public_key)
return cls(
key_identifier=digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
@classmethod
def from_issuer_subject_key_identifier(cls, ski):
return cls(
key_identifier=ski.value.digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
def __repr__(self):
return (
"<AuthorityKeyIdentifier(key_identifier={0.key_identifier!r}, "
"authority_cert_issuer={0.authority_cert_issuer}, "
"authority_cert_serial_number={0.authority_cert_serial_number}"
")>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AuthorityKeyIdentifier):
return NotImplemented
return (
self.key_identifier == other.key_identifier and
self.authority_cert_issuer == other.authority_cert_issuer and
self.authority_cert_serial_number ==
other.authority_cert_serial_number
)
def __ne__(self, other):
return not self == other
key_identifier = utils.read_only_property("_key_identifier")
authority_cert_issuer = utils.read_only_property("_authority_cert_issuer")
authority_cert_serial_number = utils.read_only_property(
"_authority_cert_serial_number"
)
@utils.register_interface(ExtensionType)
class SubjectKeyIdentifier(object):
oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER
def __init__(self, digest):
self._digest = digest
@classmethod
def from_public_key(cls, public_key):
return cls(_key_identifier_from_public_key(public_key))
digest = utils.read_only_property("_digest")
def __repr__(self):
return "<SubjectKeyIdentifier(digest={0!r})>".format(self.digest)
def __eq__(self, other):
if not isinstance(other, SubjectKeyIdentifier):
return NotImplemented
return constant_time.bytes_eq(self.digest, other.digest)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.digest)
@utils.register_interface(ExtensionType)
class AuthorityInformationAccess(object):
oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS
def __init__(self, descriptions):
descriptions = list(descriptions)
if not all(isinstance(x, AccessDescription) for x in descriptions):
raise TypeError(
"Every item in the descriptions list must be an "
"AccessDescription"
)
self._descriptions = descriptions
def __iter__(self):
return iter(self._descriptions)
def __len__(self):
return len(self._descriptions)
def __repr__(self):
return "<AuthorityInformationAccess({0})>".format(self._descriptions)
def __eq__(self, other):
if not isinstance(other, AuthorityInformationAccess):
return NotImplemented
return self._descriptions == other._descriptions
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._descriptions[idx]
class AccessDescription(object):
def __init__(self, access_method, access_location):
if not isinstance(access_method, ObjectIdentifier):
raise TypeError("access_method must be an ObjectIdentifier")
if not isinstance(access_location, GeneralName):
raise TypeError("access_location must be a GeneralName")
self._access_method = access_method
self._access_location = access_location
def __repr__(self):
return (
"<AccessDescription(access_method={0.access_method}, access_locati"
"on={0.access_location})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AccessDescription):
return NotImplemented
return (
self.access_method == other.access_method and
self.access_location == other.access_location
)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.access_method, self.access_location))
access_method = utils.read_only_property("_access_method")
access_location = utils.read_only_property("_access_location")
@utils.register_interface(ExtensionType)
class BasicConstraints(object):
oid = ExtensionOID.BASIC_CONSTRAINTS
def __init__(self, ca, path_length):
if not isinstance(ca, bool):
raise TypeError("ca must be a boolean value")
if path_length is not None and not ca:
raise ValueError("path_length must be None when ca is False")
if (
path_length is not None and
(not isinstance(path_length, six.integer_types) or path_length < 0)
):
raise TypeError(
"path_length must be a non-negative integer or None"
)
self._ca = ca
self._path_length = path_length
ca = utils.read_only_property("_ca")
path_length = utils.read_only_property("_path_length")
def __repr__(self):
return ("<BasicConstraints(ca={0.ca}, "
"path_length={0.path_length})>").format(self)
def __eq__(self, other):
if not isinstance(other, BasicConstraints):
return NotImplemented
return self.ca == other.ca and self.path_length == other.path_length
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.ca, self.path_length))
@utils.register_interface(ExtensionType)
class CRLDistributionPoints(object):
oid = ExtensionOID.CRL_DISTRIBUTION_POINTS
def __init__(self, distribution_points):
distribution_points = list(distribution_points)
if not all(
isinstance(x, DistributionPoint) for x in distribution_points
):
raise TypeError(
"distribution_points must be a list of DistributionPoint "
"objects"
)
self._distribution_points = distribution_points
def __iter__(self):
return iter(self._distribution_points)
def __len__(self):
return len(self._distribution_points)
def __repr__(self):
return "<CRLDistributionPoints({0})>".format(self._distribution_points)
def __eq__(self, other):
if not isinstance(other, CRLDistributionPoints):
return NotImplemented
return self._distribution_points == other._distribution_points
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._distribution_points[idx]
class DistributionPoint(object):
def __init__(self, full_name, relative_name, reasons, crl_issuer):
if full_name and relative_name:
raise ValueError(
"You cannot provide both full_name and relative_name, at "
"least one must be None."
)
if full_name:
full_name = list(full_name)
if not all(isinstance(x, GeneralName) for x in full_name):
raise TypeError(
"full_name must be a list of GeneralName objects"
)
if relative_name:
if isinstance(relative_name, Name):
warnings.warn(
"relative_name=<Name> is deprecated and will "
"be removed in a future version; use "
"<RelativeDistinguishedName> instead.",
utils.DeprecatedIn16,
stacklevel=2
)
relative_name = RelativeDistinguishedName(relative_name)
elif not isinstance(relative_name, RelativeDistinguishedName):
raise TypeError(
"relative_name must be a RelativeDistinguishedName"
)
if crl_issuer:
crl_issuer = list(crl_issuer)
if not all(isinstance(x, GeneralName) for x in crl_issuer):
raise TypeError(
"crl_issuer must be None or a list of general names"
)
if reasons and (not isinstance(reasons, frozenset) or not all(
isinstance(x, ReasonFlags) for x in reasons
)):
raise TypeError("reasons must be None or frozenset of ReasonFlags")
if reasons and (
ReasonFlags.unspecified in reasons or
ReasonFlags.remove_from_crl in reasons
):
raise ValueError(
"unspecified and remove_from_crl are not valid reasons in a "
"DistributionPoint"
)
if reasons and not crl_issuer and not (full_name or relative_name):
raise ValueError(
"You must supply crl_issuer, full_name, or relative_name when "
"reasons is not None"
)
self._full_name = full_name
self._relative_name = relative_name
self._reasons = reasons
self._crl_issuer = crl_issuer
def __repr__(self):
return (
"<DistributionPoint(full_name={0.full_name}, relative_name={0.rela"
"tive_name}, reasons={0.reasons}, crl_issuer={0.crl_is"
"suer})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, DistributionPoint):
return NotImplemented
return (
self.full_name == other.full_name and
self.relative_name == other.relative_name and
self.reasons == other.reasons and
self.crl_issuer == other.crl_issuer
)
def __ne__(self, other):
return not self == other
full_name = utils.read_only_property("_full_name")
relative_name = utils.read_only_property("_relative_name")
reasons = utils.read_only_property("_reasons")
crl_issuer = utils.read_only_property("_crl_issuer")
class ReasonFlags(Enum):
unspecified = "unspecified"
key_compromise = "keyCompromise"
ca_compromise = "cACompromise"
affiliation_changed = "affiliationChanged"
superseded = "superseded"
cessation_of_operation = "cessationOfOperation"
certificate_hold = "certificateHold"
privilege_withdrawn = "privilegeWithdrawn"
aa_compromise = "aACompromise"
remove_from_crl = "removeFromCRL"
@utils.register_interface(ExtensionType)
class PolicyConstraints(object):
oid = ExtensionOID.POLICY_CONSTRAINTS
def __init__(self, require_explicit_policy, inhibit_policy_mapping):
if require_explicit_policy is not None and not isinstance(
require_explicit_policy, six.integer_types
):
raise TypeError(
"require_explicit_policy must be a non-negative integer or "
"None"
)
if inhibit_policy_mapping is not None and not isinstance(
inhibit_policy_mapping, six.integer_types
):
raise TypeError(
"inhibit_policy_mapping must be a non-negative integer or None"
)
if inhibit_policy_mapping is None and require_explicit_policy is None:
raise ValueError(
"At least one of require_explicit_policy and "
"inhibit_policy_mapping must not be None"
)
self._require_explicit_policy = require_explicit_policy
self._inhibit_policy_mapping = inhibit_policy_mapping
def __repr__(self):
return (
u"<PolicyConstraints(require_explicit_policy={0.require_explicit"
u"_policy}, inhibit_policy_mapping={0.inhibit_policy_"
u"mapping})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyConstraints):
return NotImplemented
return (
self.require_explicit_policy == other.require_explicit_policy and
self.inhibit_policy_mapping == other.inhibit_policy_mapping
)
def __ne__(self, other):
return not self == other
require_explicit_policy = utils.read_only_property(
"_require_explicit_policy"
)
inhibit_policy_mapping = utils.read_only_property(
"_inhibit_policy_mapping"
)
@utils.register_interface(ExtensionType)
class CertificatePolicies(object):
oid = ExtensionOID.CERTIFICATE_POLICIES
def __init__(self, policies):
policies = list(policies)
if not all(isinstance(x, PolicyInformation) for x in policies):
raise TypeError(
"Every item in the policies list must be a "
"PolicyInformation"
)
self._policies = policies
def __iter__(self):
return iter(self._policies)
def __len__(self):
return len(self._policies)
def __repr__(self):
return "<CertificatePolicies({0})>".format(self._policies)
def __eq__(self, other):
if not isinstance(other, CertificatePolicies):
return NotImplemented
return self._policies == other._policies
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._policies[idx]
class PolicyInformation(object):
def __init__(self, policy_identifier, policy_qualifiers):
if not isinstance(policy_identifier, ObjectIdentifier):
raise TypeError("policy_identifier must be an ObjectIdentifier")
self._policy_identifier = policy_identifier
if policy_qualifiers:
policy_qualifiers = list(policy_qualifiers)
if not all(
isinstance(x, (six.text_type, UserNotice))
for x in policy_qualifiers
):
raise TypeError(
"policy_qualifiers must be a list of strings and/or "
"UserNotice objects or None"
)
self._policy_qualifiers = policy_qualifiers
def __repr__(self):
return (
"<PolicyInformation(policy_identifier={0.policy_identifier}, polic"
"y_qualifiers={0.policy_qualifiers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyInformation):
return NotImplemented
return (
self.policy_identifier == other.policy_identifier and
self.policy_qualifiers == other.policy_qualifiers
)
def __ne__(self, other):
return not self == other
policy_identifier = utils.read_only_property("_policy_identifier")
policy_qualifiers = utils.read_only_property("_policy_qualifiers")
class UserNotice(object):
def __init__(self, notice_reference, explicit_text):
if notice_reference and not isinstance(
notice_reference, NoticeReference
):
raise TypeError(
"notice_reference must be None or a NoticeReference"
)
self._notice_reference = notice_reference
self._explicit_text = explicit_text
def __repr__(self):
return (
"<UserNotice(notice_reference={0.notice_reference}, explicit_text="
"{0.explicit_text!r})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, UserNotice):
return NotImplemented
return (
self.notice_reference == other.notice_reference and
self.explicit_text == other.explicit_text
)
def __ne__(self, other):
return not self == other
notice_reference = utils.read_only_property("_notice_reference")
explicit_text = utils.read_only_property("_explicit_text")
class NoticeReference(object):
def __init__(self, organization, notice_numbers):
self._organization = organization
notice_numbers = list(notice_numbers)
if not all(isinstance(x, int) for x in notice_numbers):
raise TypeError(
"notice_numbers must be a list of integers"
)
self._notice_numbers = notice_numbers
def __repr__(self):
return (
"<NoticeReference(organization={0.organization!r}, notice_numbers="
"{0.notice_numbers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, NoticeReference):
return NotImplemented
return (
self.organization == other.organization and
self.notice_numbers == other.notice_numbers
)
def __ne__(self, other):
return not self == other
organization = utils.read_only_property("_organization")
notice_numbers = utils.read_only_property("_notice_numbers")
@utils.register_interface(ExtensionType)
class ExtendedKeyUsage(object):
oid = ExtensionOID.EXTENDED_KEY_USAGE
def __init__(self, usages):
usages = list(usages)
if not all(isinstance(x, ObjectIdentifier) for x in usages):
raise TypeError(
"Every item in the usages list must be an ObjectIdentifier"
)
self._usages = usages
def __iter__(self):
return iter(self._usages)
def __len__(self):
return len(self._usages)
def __repr__(self):
return "<ExtendedKeyUsage({0})>".format(self._usages)
def __eq__(self, other):
if not isinstance(other, ExtendedKeyUsage):
return NotImplemented
return self._usages == other._usages
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class OCSPNoCheck(object):
oid = ExtensionOID.OCSP_NO_CHECK
@utils.register_interface(ExtensionType)
class InhibitAnyPolicy(object):
oid = ExtensionOID.INHIBIT_ANY_POLICY
def __init__(self, skip_certs):
if not isinstance(skip_certs, six.integer_types):
raise TypeError("skip_certs must be an integer")
if skip_certs < 0:
raise ValueError("skip_certs must be a non-negative integer")
self._skip_certs = skip_certs
def __repr__(self):
return "<InhibitAnyPolicy(skip_certs={0.skip_certs})>".format(self)
def __eq__(self, other):
if not isinstance(other, InhibitAnyPolicy):
return NotImplemented
return self.skip_certs == other.skip_certs
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.skip_certs)
skip_certs = utils.read_only_property("_skip_certs")
@utils.register_interface(ExtensionType)
class KeyUsage(object):
oid = ExtensionOID.KEY_USAGE
def __init__(self, digital_signature, content_commitment, key_encipherment,
data_encipherment, key_agreement, key_cert_sign, crl_sign,
encipher_only, decipher_only):
if not key_agreement and (encipher_only or decipher_only):
raise ValueError(
"encipher_only and decipher_only can only be true when "
"key_agreement is true"
)
self._digital_signature = digital_signature
self._content_commitment = content_commitment
self._key_encipherment = key_encipherment
self._data_encipherment = data_encipherment
self._key_agreement = key_agreement
self._key_cert_sign = key_cert_sign
self._crl_sign = crl_sign
self._encipher_only = encipher_only
self._decipher_only = decipher_only
digital_signature = utils.read_only_property("_digital_signature")
content_commitment = utils.read_only_property("_content_commitment")
key_encipherment = utils.read_only_property("_key_encipherment")
data_encipherment = utils.read_only_property("_data_encipherment")
key_agreement = utils.read_only_property("_key_agreement")
key_cert_sign = utils.read_only_property("_key_cert_sign")
crl_sign = utils.read_only_property("_crl_sign")
@property
def encipher_only(self):
if not self.key_agreement:
raise ValueError(
"encipher_only is undefined unless key_agreement is true"
)
else:
return self._encipher_only
@property
def decipher_only(self):
if not self.key_agreement:
raise ValueError(
"decipher_only is undefined unless key_agreement is true"
)
else:
return self._decipher_only
def __repr__(self):
try:
encipher_only = self.encipher_only
decipher_only = self.decipher_only
except ValueError:
encipher_only = None
decipher_only = None
return ("<KeyUsage(digital_signature={0.digital_signature}, "
"content_commitment={0.content_commitment}, "
"key_encipherment={0.key_encipherment}, "
"data_encipherment={0.data_encipherment}, "
"key_agreement={0.key_agreement}, "
"key_cert_sign={0.key_cert_sign}, crl_sign={0.crl_sign}, "
"encipher_only={1}, decipher_only={2})>").format(
self, encipher_only, decipher_only)
def __eq__(self, other):
if not isinstance(other, KeyUsage):
return NotImplemented
return (
self.digital_signature == other.digital_signature and
self.content_commitment == other.content_commitment and
self.key_encipherment == other.key_encipherment and
self.data_encipherment == other.data_encipherment and
self.key_agreement == other.key_agreement and
self.key_cert_sign == other.key_cert_sign and
self.crl_sign == other.crl_sign and
self._encipher_only == other._encipher_only and
self._decipher_only == other._decipher_only
)
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class NameConstraints(object):
oid = ExtensionOID.NAME_CONSTRAINTS
def __init__(self, permitted_subtrees, excluded_subtrees):
if permitted_subtrees is not None:
permitted_subtrees = list(permitted_subtrees)
if not all(
isinstance(x, GeneralName) for x in permitted_subtrees
):
raise TypeError(
"permitted_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(permitted_subtrees)
if excluded_subtrees is not None:
excluded_subtrees = list(excluded_subtrees)
if not all(
isinstance(x, GeneralName) for x in excluded_subtrees
):
raise TypeError(
"excluded_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(excluded_subtrees)
if permitted_subtrees is None and excluded_subtrees is None:
raise ValueError(
"At least one of permitted_subtrees and excluded_subtrees "
"must not be None"
)
self._permitted_subtrees = permitted_subtrees
self._excluded_subtrees = excluded_subtrees
def __eq__(self, other):
if not isinstance(other, NameConstraints):
return NotImplemented
return (
self.excluded_subtrees == other.excluded_subtrees and
self.permitted_subtrees == other.permitted_subtrees
)
def __ne__(self, other):
return not self == other
def _validate_ip_name(self, tree):
if any(isinstance(name, IPAddress) and not isinstance(
name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network)
) for name in tree):
raise TypeError(
"IPAddress name constraints must be an IPv4Network or"
" IPv6Network object"
)
def __repr__(self):
return (
u"<NameConstraints(permitted_subtrees={0.permitted_subtrees}, "
u"excluded_subtrees={0.excluded_subtrees})>".format(self)
)
permitted_subtrees = utils.read_only_property("_permitted_subtrees")
excluded_subtrees = utils.read_only_property("_excluded_subtrees")
class Extension(object):
def __init__(self, oid, critical, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if not isinstance(critical, bool):
raise TypeError("critical must be a boolean value")
self._oid = oid
self._critical = critical
self._value = value
oid = utils.read_only_property("_oid")
critical = utils.read_only_property("_critical")
value = utils.read_only_property("_value")
def __repr__(self):
return ("<Extension(oid={0.oid}, critical={0.critical}, "
"value={0.value})>").format(self)
def __eq__(self, other):
if not isinstance(other, Extension):
return NotImplemented
return (
self.oid == other.oid and
self.critical == other.critical and
self.value == other.value
)
def __ne__(self, other):
return not self == other
class GeneralNames(object):
def __init__(self, general_names):
general_names = list(general_names)
if not all(isinstance(x, GeneralName) for x in general_names):
raise TypeError(
"Every item in the general_names list must be an "
"object conforming to the GeneralName interface"
)
self._general_names = general_names
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
# Return the value of each GeneralName, except for OtherName instances
# which we return directly because it has two important properties not
# just one value.
objs = (i for i in self if isinstance(i, type))
if type != OtherName:
objs = (i.value for i in objs)
return list(objs)
def __repr__(self):
return "<GeneralNames({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, GeneralNames):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class SubjectAlternativeName(object):
oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<SubjectAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, SubjectAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __getitem__(self, idx):
return self._general_names[idx]
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class IssuerAlternativeName(object):
oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<IssuerAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, IssuerAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class CertificateIssuer(object):
oid = CRLEntryExtensionOID.CERTIFICATE_ISSUER
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<CertificateIssuer({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, CertificateIssuer):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class CRLReason(object):
oid = CRLEntryExtensionOID.CRL_REASON
def __init__(self, reason):
if not isinstance(reason, ReasonFlags):
raise TypeError("reason must be an element from ReasonFlags")
self._reason = reason
def __repr__(self):
return "<CRLReason(reason={0})>".format(self._reason)
def __eq__(self, other):
if not isinstance(other, CRLReason):
return NotImplemented
return self.reason == other.reason
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.reason)
reason = utils.read_only_property("_reason")
@utils.register_interface(ExtensionType)
class InvalidityDate(object):
oid = CRLEntryExtensionOID.INVALIDITY_DATE
def __init__(self, invalidity_date):
if not isinstance(invalidity_date, datetime.datetime):
raise TypeError("invalidity_date must be a datetime.datetime")
self._invalidity_date = invalidity_date
def __repr__(self):
return "<InvalidityDate(invalidity_date={0})>".format(
self._invalidity_date
)
def __eq__(self, other):
if not isinstance(other, InvalidityDate):
return NotImplemented
return self.invalidity_date == other.invalidity_date
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.invalidity_date)
invalidity_date = utils.read_only_property("_invalidity_date")
@utils.register_interface(ExtensionType)
class UnrecognizedExtension(object):
def __init__(self, oid, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
self._oid = oid
self._value = value
oid = utils.read_only_property("_oid")
value = utils.read_only_property("_value")
def __repr__(self):
return (
"<UnrecognizedExtension(oid={0.oid}, value={0.value!r})>".format(
self
)
)
def __eq__(self, other):
if not isinstance(other, UnrecognizedExtension):
return NotImplemented
return self.oid == other.oid and self.value == other.value
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.oid, self.value))
|
|
# -*- coding: utf-8 -*-
import os
import shutil
import socket
import subprocess
import urllib
import urllib2
SqueakImage = "Squeak4.5-12568"
# You need to enter the real URL and have the server running
CODESPEED_URL = 'http://speed.bithug.org/'
class Project(object):
def __init__(self, name, executables={}, arguments="", commitid=None):
self.commitid = commitid if commitid else self.get_commitid()
self.name = name
self.executables = executables
self.arguments = arguments
def run(self):
for executable in self.executables:
yield executable.name, executable.run(self.arguments)
def post_results(self):
for executable, output in self.run():
benchmarks = output.split('\n')
for s in benchmarks:
if ';' in s:
results = s.split(';')
if len(results) in [2, 4]:
self.add(executable, *results)
def add(self, executable, benchmark, result, min=None, max=None):
print "Saving result %s for executable %s, benchmark %s" % (
result, executable, benchmark)
if min is max is None:
data = self.build_data(executable, benchmark, result)
else:
data = self.build_extended_data(executable, benchmark, result, min,
max)
params = urllib.urlencode(data)
response = "None"
print "Saving result for executable %s, revision %s, benchmark %s" % (
data['executable'], data['commitid'], data['benchmark'])
try:
f = urllib2.urlopen(CODESPEED_URL + 'result/add/', params)
except urllib2.HTTPError as e:
print str(e)
print e.read()
return
response = f.read()
f.close()
print "Server (%s) response: %s\n" % (CODESPEED_URL, response)
def get_commitid(self):
try:
pipe = subprocess.Popen(
["hg", "log", "-l", "1", "--template", "{rev}:{node|short}"],
stdout=subprocess.PIPE
)
if pipe.wait() == 0:
return pipe.stdout.read()
except:
pass
try:
pipe = subprocess.Popen(
["git", "log", "-1", "--pretty=%H"],
stdout=subprocess.PIPE
)
if pipe.wait() == 0:
return pipe.stdout.read()
except:
pass
raise Exception("commitid not found. not a git or hg repo")
def build_data(self, executable, benchmark, result):
# Mandatory fields
return {
'commitid': self.commitid,
'branch': 'default',
'project': self.name,
'executable': executable,
'benchmark': benchmark,
'environment': socket.gethostname(),
'result_value': str(result),
}
# Optional fields
# {
# 'std_dev': 1.11111, # Optional. Default is blank
# 'max': 4001.6, # Optional. Default is blank
# 'min': 3995.1, # Optional. Default is blank
# }
def build_data_extended(self, executable, benchmark, result, min, max):
return dict(self.build_data(executable, benchmark, result), **{
'min': str(min),
'max': str(max)
}
)
class Archive(object):
def __init__(self, filename, target, func):
self.filename = filename
self.func = func
self.target = target
def extract(self):
self.func(self.filename, self.target)
def __enter__(self):
self.extract()
def __exit__(self, *_):
if os.path.exists(self.target) and os.path.isfile(self.target):
os.remove(self.target)
class Executable(object):
def __init__(self, name, path, url=None, callback=None):
self.name = name
self.path = path
if url:
self.download(url, callback=callback)
def ungzip(self, source, target):
import gzip
contents = gzip.open(source).read()
with open(target, "w") as t:
t.write(contents)
def untar(self, source, target):
import tarfile
try:
f = tarfile.open(source)
f.extractall(target)
finally:
f.close()
def download(self, url, callback=None):
if os.path.exists(self.path):
shutil.rmtree(os.path.dirname(self.path))
filename = url.rsplit("/", 1)[1]
if os.path.exists(filename):
os.remove(filename)
print "Downloading from", url
with open(filename, "w") as f:
f.write(urllib2.urlopen(url).read())
try:
print "Extracting", filename
if filename.endswith(".tar.gz") or filename.endswith(".tgz"):
tarfile = os.path.basename(filename) + ".tar"
with Archive(filename, tarfile, self.ungzip):
Archive(tarfile, ".", self.untar).extract()
elif filename.endswith(".tar"):
Archive(filename, ".", self.untar).extract()
else:
raise NotImplementedError
finally:
os.remove(filename)
if callback:
callback(self)
def run(self, args):
print 'Calling %s (%s) ...' % (self.name, " ".join([self.path] + args))
pipe = subprocess.Popen(
["%s" % self.path] + args,
stdout=subprocess.PIPE
)
out, err = pipe.communicate()
# errcode = pipe.wait()
print out
return out
# XXX: Find a better place to put this
def update_image(executable):
print "Updating image ..."
with open('update.st', 'w') as f:
f.write('''Smalltalk snapshot: true andQuit: true.''')
print executable.run(["-vm-display-X11", "-headless",
"images/%s" % SqueakImage, "../update.st"])
os.remove('update.st')
def find_cog_url():
baseurl = "http://www.mirandabanda.org/files/Cog/VM/"
r = urllib2.urlopen(baseurl)
ver = r.read().rsplit("VM.r", 1)[1].split("/", 1)[0]
vmfolder = "%s/VM.r%s/" % (baseurl, ver)
r = urllib2.urlopen(vmfolder).read()
off = r.find("coglinux")
filename = r[off:r.find(".tgz", off)] + ".tgz"
return ver, vmfolder + filename
cogid, cogurl = find_cog_url()
Cog = Project(
"squeak",
executables=[
Executable(
"cogvm",
"coglinux/squeak",
cogurl,
callback=update_image
),
Executable(
"stackvm",
"stackvm/bin/squeak",
"http://squeakvm.org/unix/release/"
"Squeak-4.10.2.2614-linux_i386.tar.gz",
callback=(lambda x: subprocess.Popen(
["mv", "Squeak-4.10.2.2614-linux_i386", "stackvm"]).wait())
)
],
arguments=["-vm-display-null",
"images/%s.image" % SqueakImage, '../benchmarks.st'],
commitid=cogid
)
RSqueakVM = Project(
"lang-smalltalk",
executables=[
Executable("rsqueak", "bash"),
# Executable("rsqueak-nojit", "./rsqueak-nojit-c")
],
arguments=["-c", "./rsqueak images/%s.image -m runSPyBenchmarks > >(tee "
"stdout.log) 2> >(tee stderr.log >&2)" % SqueakImage]
)
if __name__ == "__main__":
try:
for project in [Cog, RSqueakVM]:
project.post_results()
finally:
subprocess.Popen(["rm", '-r', "stackvm"])
|
|
import operator
import pytest
from flask_allows.allows import Allows
from flask_allows.overrides import Override, OverrideManager
from flask_allows.requirements import (
And,
C,
ConditionalRequirement,
Not,
Or,
Requirement,
wants_request,
)
def test_cant_create_Requirement():
with pytest.raises(TypeError) as excinfo:
Requirement()
assert "with abstract methods fulfill" in str(excinfo.value)
def test_call_fulfills_with_call(spy):
spy(object(), object())
assert spy.called
def test_ConditionalRequirement_defaults(always):
Cond = ConditionalRequirement(always)
assert (Cond.requirements, Cond.op, Cond.until, Cond.negated) == (
(always,),
operator.and_,
None,
None,
)
def test_empty_Conditional_is_True(member, request):
Cond = ConditionalRequirement()
assert Cond(member, request)
def test_custom_ConditionalRequirement(always):
Cond = ConditionalRequirement(
always, always, op=operator.xor, negated=True, until=False
)
assert (Cond.requirements, Cond.op, Cond.until, Cond.negated) == (
(always, always),
operator.xor,
False,
True,
)
def test_AndConditional_defaults(always):
Cond = And(always)
assert (Cond.requirements, Cond.op, Cond.until, Cond.negated) == (
(always,),
operator.and_,
False,
None,
)
def test_OrConditional_defaults(always):
Cond = Or(always)
assert (Cond.requirements, Cond.op, Cond.until, Cond.negated) == (
(always,),
operator.or_,
True,
None,
)
def test_NotConditional_defaults(always):
Cond = Not(always)
assert (Cond.requirements, Cond.op, Cond.until, Cond.negated) == (
(always,),
operator.and_,
None,
True,
)
def test_OrConditional_shortcircuit(always, never, member, request):
cond = Or(always, never)
cond.fulfill(member, request)
assert not never.called
def test_OrConditional_fulfills(always, never, member, request):
assert Or(always, never)(member, request)
assert Or(never, always)(member, request)
def test_OrConditional_shortcut(always):
A = C(always)
Cond = A | A
assert (Cond.requirements, Cond.op, Cond.until, Cond.negated) == (
(A, A),
operator.or_,
True,
None,
)
def test_AndConditional_shortcircuit(always, never, member, request):
cond = And(never, always)
cond.fulfill(member, request)
assert not always.called
def test_AndConditional_fulfills(always, never, member, request):
assert not And(always, never)(member, request)
assert not And(never, always)(member, request)
def test_AndConditional_shortcut(always):
A = C(always)
Cond = A & A
assert (Cond.requirements, Cond.op, Cond.until, Cond.negated) == (
(A, A),
operator.and_,
False,
None,
)
def test_NotConditional_shortcut(always):
A = C(always)
Cond = ~A
assert (Cond.requirements, Cond.op, Cond.until, Cond.negated) == (
(A,),
operator.and_,
None,
True,
)
def test_NotConditional_singular_true(always, member, request):
assert not Not(always)(member, request)
def test_NotConditional_singular_false(never, member, request):
assert Not(never)(member, request)
def test_NotConditional_many_all_true(always, member, request):
assert not Not(always, always)(member, request)
def test_NotConditional_many_all_false(never, member, request):
assert Not(never, never)(member, request)
def test_NotConditional_many_mixed(always, never, member, request):
assert Not(always, never)(member, request)
def test_supports_new_style_requirements(member, request):
class SomeRequirement(Requirement):
def fulfill(self, user):
return True
assert SomeRequirement()(member, request)
def test_ConditionalRequirement_supports_new_style_requirements(member, request):
def is_true(user):
return True
assert C(is_true)(member, request)
@pytest.mark.regression
def test_wants_request_stops_incorrect_useronly_flow(member, request):
"""
When a request parameter has a default value, requirement runners will
incorrectly decide it is a user only requirement and not provide the
request object to it.
"""
SENTINEL = object()
def my_requirement(user, request=SENTINEL):
return request is not SENTINEL
allows = Allows(app=None, identity_loader=lambda: member)
# incorrect flow happens here, only member is passed
assert not allows.fulfill([my_requirement], member)
assert allows.fulfill([wants_request(my_requirement)], member)
def test_conditional_skips_overridden_requirements(member, never, always, request):
manager = OverrideManager()
manager.push(Override(never))
reqs = And(never, always)
assert reqs.fulfill(member, request)
manager.pop()
def test_conditional_skips_overridden_requirements_even_if_nested(
member, always, never, request
):
manager = OverrideManager()
manager.push(Override(never))
reqs = And(And(And(always), Or(never)))
assert reqs.fulfill(member, request)
manager.pop()
|
|
"""Provide methods to bootstrap a Home Assistant instance."""
import asyncio
import logging
import logging.handlers
import os
import sys
from time import time
from collections import OrderedDict
from typing import Any, Optional, Dict
import voluptuous as vol
from homeassistant import (
core, config as conf_util, loader, components as core_components)
from homeassistant.components import persistent_notification
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.setup import async_setup_component
from homeassistant.util.logging import AsyncHandler
from homeassistant.util.package import async_get_user_site, get_user_site
from homeassistant.util.yaml import clear_secret_cache
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.signal import async_register_signal_handling
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = 'home-assistant.log'
# hass.data key for logging information.
DATA_LOGGING = 'logging'
FIRST_INIT_COMPONENT = set((
'recorder', 'mqtt', 'mqtt_eventstream', 'logger', 'introduction',
'frontend', 'history'))
def from_config_dict(config: Dict[str, Any],
hass: Optional[core.HomeAssistant]=None,
config_dir: Optional[str]=None,
enable_log: bool=True,
verbose: bool=False,
skip_pip: bool=False,
log_rotate_days: Any=None,
log_file: Any=None) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
"""
if hass is None:
hass = core.HomeAssistant()
if config_dir is not None:
config_dir = os.path.abspath(config_dir)
hass.config.config_dir = config_dir
hass.loop.run_until_complete(
async_mount_local_lib_path(config_dir, hass.loop))
# run task
hass = hass.loop.run_until_complete(
async_from_config_dict(
config, hass, config_dir, enable_log, verbose, skip_pip,
log_rotate_days, log_file)
)
return hass
@asyncio.coroutine
def async_from_config_dict(config: Dict[str, Any],
hass: core.HomeAssistant,
config_dir: Optional[str]=None,
enable_log: bool=True,
verbose: bool=False,
skip_pip: bool=False,
log_rotate_days: Any=None,
log_file: Any=None) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = time()
if enable_log:
async_enable_logging(hass, verbose, log_rotate_days, log_file)
if sys.version_info[:2] < (3, 5):
_LOGGER.warning(
'Python 3.4 support has been deprecated and will be removed in '
'the beginning of 2018. Please upgrade Python or your operating '
'system. More info: https://home-assistant.io/blog/2017/10/06/'
'deprecating-python-3.4-support/'
)
core_config = config.get(core.DOMAIN, {})
try:
yield from conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as ex:
conf_util.async_log_exception(ex, 'homeassistant', core_config, hass)
return None
yield from hass.async_add_job(conf_util.process_ha_config_upgrade, hass)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning("Skipping pip installation of required modules. "
"This may cause issues")
if not loader.PREPARED:
yield from hass.async_add_job(loader.prepare, hass)
# Merge packages
conf_util.merge_packages_config(
config, core_config.get(conf_util.CONF_PACKAGES, {}))
# Make a copy because we are mutating it.
# Use OrderedDict in case original one was one.
# Convert values to dictionaries if they are None
new_config = OrderedDict()
for key, value in config.items():
new_config[key] = value or {}
config = new_config
# Filter out the repeating and common config section [homeassistant]
components = set(key.split(' ')[0] for key in config.keys()
if key != core.DOMAIN)
# setup components
# pylint: disable=not-an-iterable
res = yield from core_components.async_setup(hass, config)
if not res:
_LOGGER.error("Home Assistant core failed to initialize. "
"further initialization aborted")
return hass
yield from persistent_notification.async_setup(hass, config)
_LOGGER.info("Home Assistant core initialized")
# stage 1
for component in components:
if component not in FIRST_INIT_COMPONENT:
continue
hass.async_add_job(async_setup_component(hass, component, config))
yield from hass.async_block_till_done()
# stage 2
for component in components:
if component in FIRST_INIT_COMPONENT:
continue
hass.async_add_job(async_setup_component(hass, component, config))
yield from hass.async_block_till_done()
stop = time()
_LOGGER.info("Home Assistant initialized in %.2fs", stop-start)
async_register_signal_handling(hass)
return hass
def from_config_file(config_path: str,
hass: Optional[core.HomeAssistant]=None,
verbose: bool=False,
skip_pip: bool=True,
log_rotate_days: Any=None,
log_file: Any=None):
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter if given,
instantiates a new Home Assistant object if 'hass' is not given.
"""
if hass is None:
hass = core.HomeAssistant()
# run task
hass = hass.loop.run_until_complete(
async_from_config_file(
config_path, hass, verbose, skip_pip, log_rotate_days, log_file)
)
return hass
@asyncio.coroutine
def async_from_config_file(config_path: str,
hass: core.HomeAssistant,
verbose: bool=False,
skip_pip: bool=True,
log_rotate_days: Any=None,
log_file: Any=None):
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter.
This method is a coroutine.
"""
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
yield from async_mount_local_lib_path(config_dir, hass.loop)
async_enable_logging(hass, verbose, log_rotate_days, log_file)
try:
config_dict = yield from hass.async_add_job(
conf_util.load_yaml_config_file, config_path)
except HomeAssistantError as err:
_LOGGER.error("Error loading %s: %s", config_path, err)
return None
finally:
clear_secret_cache()
hass = yield from async_from_config_dict(
config_dict, hass, enable_log=False, skip_pip=skip_pip)
return hass
@core.callback
def async_enable_logging(hass: core.HomeAssistant, verbose: bool=False,
log_rotate_days=None, log_file=None) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
logging.basicConfig(level=logging.INFO)
fmt = ("%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s")
colorfmt = "%(log_color)s{}%(reset)s".format(fmt)
datefmt = '%Y-%m-%d %H:%M:%S'
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
pass
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or \
(not err_path_exists and os.access(err_dir, os.W_OK)):
if log_rotate_days:
err_handler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when='midnight', backupCount=log_rotate_days)
else:
err_handler = logging.FileHandler(
err_log_path, mode='w', delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
async_handler = AsyncHandler(hass.loop, err_handler)
@asyncio.coroutine
def async_stop_async_handler(event):
"""Cleanup async handler."""
logging.getLogger('').removeHandler(async_handler)
yield from async_handler.async_close(blocking=True)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, async_stop_async_handler)
logger = logging.getLogger('')
logger.addHandler(async_handler)
logger.setLevel(logging.INFO)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error(
"Unable to setup error log %s (access denied)", err_log_path)
def mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path."""
deps_dir = os.path.join(config_dir, 'deps')
lib_dir = get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
@asyncio.coroutine
def async_mount_local_lib_path(config_dir: str,
loop: asyncio.AbstractEventLoop) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, 'deps')
lib_dir = yield from async_get_user_site(deps_dir, loop=loop)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
|
|
"""Provide a way to connect devices to one physical location."""
from collections import OrderedDict
from typing import Container, Dict, Iterable, List, MutableMapping, Optional, cast
import attr
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.loader import bind_hass
from homeassistant.util import slugify
from .typing import HomeAssistantType
# mypy: disallow-any-generics
DATA_REGISTRY = "area_registry"
EVENT_AREA_REGISTRY_UPDATED = "area_registry_updated"
STORAGE_KEY = "core.area_registry"
STORAGE_VERSION = 1
SAVE_DELAY = 10
@attr.s(slots=True, frozen=True)
class AreaEntry:
"""Area Registry Entry."""
name: str = attr.ib()
normalized_name: str = attr.ib()
id: Optional[str] = attr.ib(default=None)
def generate_id(self, existing_ids: Container[str]) -> None:
"""Initialize ID."""
suggestion = suggestion_base = slugify(self.name)
tries = 1
while suggestion in existing_ids:
tries += 1
suggestion = f"{suggestion_base}_{tries}"
object.__setattr__(self, "id", suggestion)
class AreaRegistry:
"""Class to hold a registry of areas."""
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize the area registry."""
self.hass = hass
self.areas: MutableMapping[str, AreaEntry] = {}
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._normalized_name_area_idx: Dict[str, str] = {}
@callback
def async_get_area(self, area_id: str) -> Optional[AreaEntry]:
"""Get area by id."""
return self.areas.get(area_id)
@callback
def async_get_area_by_name(self, name: str) -> Optional[AreaEntry]:
"""Get area by name."""
normalized_name = normalize_area_name(name)
if normalized_name not in self._normalized_name_area_idx:
return None
return self.areas[self._normalized_name_area_idx[normalized_name]]
@callback
def async_list_areas(self) -> Iterable[AreaEntry]:
"""Get all areas."""
return self.areas.values()
@callback
def async_get_or_create(self, name: str) -> AreaEntry:
"""Get or create an area."""
area = self.async_get_area_by_name(name)
if area:
return area
return self.async_create(name)
@callback
def async_create(self, name: str) -> AreaEntry:
"""Create a new area."""
normalized_name = normalize_area_name(name)
if self.async_get_area_by_name(name):
raise ValueError(f"The name {name} ({normalized_name}) is already in use")
area = AreaEntry(name=name, normalized_name=normalized_name)
area.generate_id(self.areas)
assert area.id is not None
self.areas[area.id] = area
self._normalized_name_area_idx[normalized_name] = area.id
self.async_schedule_save()
self.hass.bus.async_fire(
EVENT_AREA_REGISTRY_UPDATED, {"action": "create", "area_id": area.id}
)
return area
@callback
def async_delete(self, area_id: str) -> None:
"""Delete area."""
area = self.areas[area_id]
device_registry = dr.async_get(self.hass)
entity_registry = er.async_get(self.hass)
device_registry.async_clear_area_id(area_id)
entity_registry.async_clear_area_id(area_id)
del self.areas[area_id]
del self._normalized_name_area_idx[area.normalized_name]
self.hass.bus.async_fire(
EVENT_AREA_REGISTRY_UPDATED, {"action": "remove", "area_id": area_id}
)
self.async_schedule_save()
@callback
def async_update(self, area_id: str, name: str) -> AreaEntry:
"""Update name of area."""
updated = self._async_update(area_id, name)
self.hass.bus.async_fire(
EVENT_AREA_REGISTRY_UPDATED, {"action": "update", "area_id": area_id}
)
return updated
@callback
def _async_update(self, area_id: str, name: str) -> AreaEntry:
"""Update name of area."""
old = self.areas[area_id]
changes = {}
if name == old.name:
return old
normalized_name = normalize_area_name(name)
if normalized_name != old.normalized_name:
if self.async_get_area_by_name(name):
raise ValueError(
f"The name {name} ({normalized_name}) is already in use"
)
changes["name"] = name
changes["normalized_name"] = normalized_name
new = self.areas[area_id] = attr.evolve(old, **changes)
self._normalized_name_area_idx[
normalized_name
] = self._normalized_name_area_idx.pop(old.normalized_name)
self.async_schedule_save()
return new
async def async_load(self) -> None:
"""Load the area registry."""
data = await self._store.async_load()
areas: MutableMapping[str, AreaEntry] = OrderedDict()
if data is not None:
for area in data["areas"]:
normalized_name = normalize_area_name(area["name"])
areas[area["id"]] = AreaEntry(
name=area["name"], id=area["id"], normalized_name=normalized_name
)
self._normalized_name_area_idx[normalized_name] = area["id"]
self.areas = areas
@callback
def async_schedule_save(self) -> None:
"""Schedule saving the area registry."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> Dict[str, List[Dict[str, Optional[str]]]]:
"""Return data of area registry to store in a file."""
data = {}
data["areas"] = [
{
"name": entry.name,
"id": entry.id,
}
for entry in self.areas.values()
]
return data
@callback
def async_get(hass: HomeAssistantType) -> AreaRegistry:
"""Get area registry."""
return cast(AreaRegistry, hass.data[DATA_REGISTRY])
async def async_load(hass: HomeAssistantType) -> None:
"""Load area registry."""
assert DATA_REGISTRY not in hass.data
hass.data[DATA_REGISTRY] = AreaRegistry(hass)
await hass.data[DATA_REGISTRY].async_load()
@bind_hass
async def async_get_registry(hass: HomeAssistantType) -> AreaRegistry:
"""Get area registry.
This is deprecated and will be removed in the future. Use async_get instead.
"""
return async_get(hass)
def normalize_area_name(area_name: str) -> str:
"""Normalize an area name by removing whitespace and case folding."""
return area_name.casefold().replace(" ", "")
|
|
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for cinder.volume.rpcapi
"""
import copy
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from cinder import context
from cinder import db
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils
CONF = cfg.CONF
class VolumeRpcAPITestCase(test.TestCase):
def setUp(self):
super(VolumeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
vol = {}
vol['host'] = 'fake_host'
vol['availability_zone'] = CONF.storage_availability_zone
vol['status'] = "available"
vol['attach_status'] = "detached"
vol['metadata'] = {"test_key": "test_val"}
vol['size'] = 1
volume = db.volume_create(self.context, vol)
kwargs = {
'status': "creating",
'progress': '0%',
'display_name': 'fake_name',
'display_description': 'fake_description'}
snapshot = tests_utils.create_snapshot(self.context, vol['id'],
**kwargs)
source_group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
host='fakehost@fakedrv#fakepool')
cgsnapshot = tests_utils.create_cgsnapshot(
self.context,
consistencygroup_id=source_group.id)
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
host='fakehost@fakedrv#fakepool',
cgsnapshot_id=cgsnapshot.id)
group2 = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
host='fakehost@fakedrv#fakepool',
source_cgid=source_group.id)
group = objects.ConsistencyGroup.get_by_id(self.context, group.id)
group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id)
self.fake_volume = jsonutils.to_primitive(volume)
self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol)
self.fake_volume_metadata = volume["volume_metadata"]
self.fake_snapshot = snapshot
self.fake_reservations = ["RESERVATION"]
self.fake_cg = group
self.fake_cg2 = group2
self.fake_src_cg = jsonutils.to_primitive(source_group)
self.fake_cgsnap = cgsnapshot
def test_serialized_volume_has_id(self):
self.assertIn('id', self.fake_volume)
def _test_volume_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
if 'rpcapi_class' in kwargs:
rpcapi_class = kwargs['rpcapi_class']
del kwargs['rpcapi_class']
else:
rpcapi_class = volume_rpcapi.VolumeAPI
rpcapi = rpcapi_class()
expected_retval = 'foo' if method == 'call' else None
target = {
"version": kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
}
if 'request_spec' in kwargs:
spec = jsonutils.to_primitive(kwargs['request_spec'])
kwargs['request_spec'] = spec
expected_msg = copy.deepcopy(kwargs)
if 'volume' in expected_msg:
volume = expected_msg['volume']
# NOTE(thangp): copy.deepcopy() is making oslo_versionedobjects
# think that 'metadata' was changed.
if isinstance(volume, objects.Volume):
volume.obj_reset_changes()
del expected_msg['volume']
expected_msg['volume_id'] = volume['id']
expected_msg['volume'] = volume
if 'snapshot' in expected_msg:
snapshot = expected_msg['snapshot']
del expected_msg['snapshot']
expected_msg['snapshot_id'] = snapshot.id
expected_msg['snapshot'] = snapshot
if 'cgsnapshot' in expected_msg:
cgsnapshot = expected_msg['cgsnapshot']
if cgsnapshot:
cgsnapshot.consistencygroup
kwargs['cgsnapshot'].consistencygroup
if 'host' in expected_msg:
del expected_msg['host']
if 'dest_host' in expected_msg:
dest_host = expected_msg['dest_host']
dest_host_dict = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
del expected_msg['dest_host']
expected_msg['host'] = dest_host_dict
if 'new_volume' in expected_msg:
volume = expected_msg['new_volume']
del expected_msg['new_volume']
expected_msg['new_volume_id'] = volume['id']
if 'host' in kwargs:
host = kwargs['host']
elif 'group' in kwargs:
host = kwargs['group']['host']
elif 'volume' in kwargs:
host = kwargs['volume']['host']
elif 'snapshot' in kwargs:
host = 'fake_host'
elif 'cgsnapshot' in kwargs:
host = kwargs['cgsnapshot'].consistencygroup.host
target['server'] = utils.extract_host(host)
target['topic'] = '%s.%s' % (CONF.volume_topic, host)
self.fake_args = None
self.fake_kwargs = None
def _fake_prepare_method(*args, **kwds):
for kwd in kwds:
self.assertEqual(kwds[kwd], target[kwd])
return rpcapi.client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method)
self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
expected_args = [ctxt, method]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(expected_arg, arg)
for kwarg, value in self.fake_kwargs.items():
if isinstance(value, objects.Snapshot):
expected_snapshot = expected_msg[kwarg].obj_to_primitive()
snapshot = value.obj_to_primitive()
self.assertEqual(expected_snapshot, snapshot)
elif isinstance(value, objects.ConsistencyGroup):
expected_cg = expected_msg[kwarg].obj_to_primitive()
cg = value.obj_to_primitive()
self.assertEqual(expected_cg, cg)
elif isinstance(value, objects.CGSnapshot):
expected_cgsnapshot = expected_msg[kwarg].obj_to_primitive()
cgsnapshot = value.obj_to_primitive()
self.assertEqual(expected_cgsnapshot, cgsnapshot)
elif isinstance(value, objects.Volume):
expected_volume = expected_msg[kwarg].obj_to_primitive()
volume = value.obj_to_primitive()
self.assertEqual(expected_volume, volume)
else:
self.assertEqual(expected_msg[kwarg], value)
def test_create_consistencygroup(self):
self._test_volume_api('create_consistencygroup', rpc_method='cast',
group=self.fake_cg, host='fake_host1',
version='1.26')
def test_delete_consistencygroup(self):
self._test_volume_api('delete_consistencygroup', rpc_method='cast',
group=self.fake_cg, version='1.26')
def test_update_consistencygroup(self):
self._test_volume_api('update_consistencygroup', rpc_method='cast',
group=self.fake_cg, add_volumes=['vol1'],
remove_volumes=['vol2'], version='1.26')
def test_create_cgsnapshot(self):
self._test_volume_api('create_cgsnapshot', rpc_method='cast',
cgsnapshot=self.fake_cgsnap, version='1.31')
def test_delete_cgsnapshot(self):
self._test_volume_api('delete_cgsnapshot', rpc_method='cast',
cgsnapshot=self.fake_cgsnap, version='1.31')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_create_volume(self, can_send_version):
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
host='fake_host1',
request_spec='fake_request_spec',
filter_properties='fake_properties',
allow_reschedule=True,
version='1.32')
can_send_version.assert_called_once_with('1.32')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_create_volume_old(self, can_send_version):
# Tests backwards compatibility with older clients
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
host='fake_host1',
request_spec='fake_request_spec',
filter_properties='fake_properties',
allow_reschedule=True,
version='1.24')
can_send_version.assert_called_once_with('1.32')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_create_volume_serialization(self, can_send_version):
request_spec = {"metadata": self.fake_volume_metadata}
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
host='fake_host1',
request_spec=request_spec,
filter_properties='fake_properties',
allow_reschedule=True,
version='1.32')
can_send_version.assert_called_once_with('1.32')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_delete_volume(self, can_send_version):
self._test_volume_api('delete_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
unmanage_only=False,
version='1.33')
can_send_version.assert_called_once_with('1.33')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_delete_volume_old(self, can_send_version):
self._test_volume_api('delete_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
unmanage_only=False,
version='1.15')
can_send_version.assert_called_once_with('1.33')
def test_create_snapshot(self):
self._test_volume_api('create_snapshot',
rpc_method='cast',
volume=self.fake_volume,
snapshot=self.fake_snapshot)
def test_delete_snapshot(self):
self._test_volume_api('delete_snapshot',
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host',
unmanage_only=False)
def test_delete_snapshot_with_unmanage_only(self):
self._test_volume_api('delete_snapshot',
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host',
unmanage_only=True)
def test_attach_volume_to_instance(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid='fake_uuid',
host_name=None,
mountpoint='fake_mountpoint',
mode='ro',
version='1.11')
def test_attach_volume_to_host(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid=None,
host_name='fake_host',
mountpoint='fake_mountpoint',
mode='rw',
version='1.11')
def test_detach_volume(self):
self._test_volume_api('detach_volume',
rpc_method='call',
volume=self.fake_volume,
attachment_id='fake_uuid',
version="1.20")
def test_copy_volume_to_image(self):
self._test_volume_api('copy_volume_to_image',
rpc_method='cast',
volume=self.fake_volume,
image_meta={'id': 'fake_image_id',
'container_format': 'fake_type',
'disk_format': 'fake_type'},
version='1.3')
def test_initialize_connection(self):
self._test_volume_api('initialize_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector')
def test_terminate_connection(self):
self._test_volume_api('terminate_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector',
force=False)
def test_accept_transfer(self):
self._test_volume_api('accept_transfer',
rpc_method='call',
volume=self.fake_volume,
new_user='e5565fd0-06c8-11e3-'
'8ffd-0800200c9b77',
new_project='e4465fd0-06c8-11e3'
'-8ffd-0800200c9a66',
version='1.9')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_extend_volume(self, can_send_version):
self._test_volume_api('extend_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
new_size=1,
reservations=self.fake_reservations,
version='1.35')
can_send_version.assert_called_once_with('1.35')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_extend_volume_old(self, can_send_version):
self._test_volume_api('extend_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
new_size=1,
reservations=self.fake_reservations,
version='1.14')
can_send_version.assert_called_once_with('1.35')
def test_migrate_volume(self):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
self._test_volume_api('migrate_volume',
rpc_method='cast',
volume=self.fake_volume,
dest_host=dest_host,
force_host_copy=True,
version='1.8')
def test_migrate_volume_completion(self):
self._test_volume_api('migrate_volume_completion',
rpc_method='call',
volume=self.fake_volume,
new_volume=self.fake_volume,
error=False,
version='1.10')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_retype(self, can_send_version):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
self._test_volume_api('retype',
rpc_method='cast',
volume=self.fake_volume_obj,
new_type_id='fake',
dest_host=dest_host,
migration_policy='never',
reservations=None,
version='1.34')
can_send_version.assert_called_once_with('1.34')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_retype_old(self, can_send_version):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
self._test_volume_api('retype',
rpc_method='cast',
volume=self.fake_volume_obj,
new_type_id='fake',
dest_host=dest_host,
migration_policy='never',
reservations=None,
version='1.12')
can_send_version.assert_called_once_with('1.34')
def test_manage_existing(self):
self._test_volume_api('manage_existing',
rpc_method='cast',
volume=self.fake_volume,
ref={'lv_name': 'foo'},
version='1.15')
def test_manage_existing_snapshot(self):
volume_update = {'host': 'fake_host'}
snpshot = {
'id': 1,
'volume_id': 'fake_id',
'status': "creating",
'progress': '0%',
'volume_size': 0,
'display_name': 'fake_name',
'display_description': 'fake_description',
'volume': fake_volume.fake_db_volume(**volume_update),
'expected_attrs': ['volume'], }
my_fake_snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snpshot)
self._test_volume_api('manage_existing_snapshot',
rpc_method='cast',
snapshot=my_fake_snapshot_obj,
ref='foo',
host='fake_host',
version='1.28')
def test_promote_replica(self):
self._test_volume_api('promote_replica',
rpc_method='cast',
volume=self.fake_volume,
version='1.17')
def test_reenable_replica(self):
self._test_volume_api('reenable_replication',
rpc_method='cast',
volume=self.fake_volume,
version='1.17')
def test_create_consistencygroup_from_src_cgsnapshot(self):
self._test_volume_api('create_consistencygroup_from_src',
rpc_method='cast',
group=self.fake_cg,
cgsnapshot=self.fake_cgsnap,
source_cg=None,
version='1.31')
def test_create_consistencygroup_from_src_cg(self):
self._test_volume_api('create_consistencygroup_from_src',
rpc_method='cast',
group=self.fake_cg2,
cgsnapshot=None,
source_cg=self.fake_src_cg,
version='1.31')
def test_get_capabilities(self):
self._test_volume_api('get_capabilities',
rpc_method='call',
host='fake_host',
discover=True,
version='1.29')
def test_remove_export(self):
self._test_volume_api('remove_export',
rpc_method='cast',
volume=self.fake_volume,
version='1.30')
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from pecan import abort
from pecan import request
from pecan import response
from pecan import rest
from pecan.secure import secure
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from storyboard._i18n import _
from storyboard.api.auth import authorization_checks as checks
from storyboard.api.v1.search import search_engine
from storyboard.api.v1 import wmodels
from storyboard.common import decorators
from storyboard.common import event_types
from storyboard.common import exception as exc
from storyboard.db.api import comments as comments_api
from storyboard.db.api import stories as stories_api
from storyboard.db.api import timeline_events as events_api
CONF = cfg.CONF
SEARCH_ENGINE = search_engine.get_engine()
class TimeLineEventsController(rest.RestController):
"""Manages timeline events."""
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose(wmodels.TimeLineEvent, int)
def get_one(self, event_id):
"""Retrieve details about one event.
Example::
curl https://my.example.org/api/v1/events/15994
:param event_id: An ID of the event.
"""
event = events_api.event_get(event_id,
current_user=request.current_user_id)
if events_api.is_visible(event, request.current_user_id):
wsme_event = wmodels.TimeLineEvent.from_db_model(event)
wsme_event = wmodels.TimeLineEvent.resolve_event_values(wsme_event)
return wsme_event
else:
raise exc.NotFound(_("Event %s not found") % event_id)
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose([wmodels.TimeLineEvent], int, int, int, [wtypes.text],
int, int, wtypes.text, wtypes.text)
def get_all(self, story_id=None, worklist_id=None, board_id=None,
event_type=None, offset=None, limit=None,
sort_field=None, sort_dir=None):
"""Retrieve a filtered list of all events.
With no filters or limit set this will likely take a long time
and return a very long list. Applying some filters is recommended.
Example::
curl https://my.example.org/api/v1/events
:param story_id: Filter events by story ID.
:param worklist_id: Filter events by worklist ID.
:param board_id: Filter events by board ID.
:param event_type: A selection of event types to get.
:param offset: The offset to start the page at.
:param limit: The number of events to retrieve.
:param sort_field: The name of the field to sort on.
:param sort_dir: Sort direction for results (asc, desc).
"""
current_user = request.current_user_id
# Boundary check on limit.
if limit is not None:
limit = max(0, limit)
# Sanity check on event types.
if event_type:
for r_type in event_type:
if r_type not in event_types.ALL:
msg = _('Invalid event_type requested. Event type must be '
'one of the following: %s')
msg = msg % (', '.join(event_types.ALL),)
abort(400, msg)
events = events_api.events_get_all(story_id=story_id,
worklist_id=worklist_id,
board_id=board_id,
event_type=event_type,
sort_field=sort_field,
sort_dir=sort_dir,
current_user=current_user)
# Apply the query response headers.
if limit:
response.headers['X-Limit'] = str(limit)
if offset is not None:
response.headers['X-Offset'] = str(offset)
visible = [event for event in events
if events_api.is_visible(event, current_user)]
if offset is None:
offset = 0
if limit is None:
limit = len(visible)
response.headers['X-Total'] = str(len(visible))
return [wmodels.TimeLineEvent.resolve_event_values(
wmodels.TimeLineEvent.from_db_model(event))
for event in visible[offset:limit + offset]]
class NestedTimeLineEventsController(rest.RestController):
"""Manages comments."""
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose(wmodels.TimeLineEvent, int, int)
def get_one(self, story_id, event_id):
"""Retrieve details about one event.
Example::
curl https://my.example.org/api/v1/stories/11/events/15994
:param story_id: An ID of the story. It stays in params as a
placeholder so that pecan knows where to match an
incoming value. It will stay unused, as far as events
have their own unique ids.
:param event_id: An ID of the event.
"""
event = events_api.event_get(event_id,
current_user=request.current_user_id)
if event:
wsme_event = wmodels.TimeLineEvent.from_db_model(event)
wsme_event = wmodels.TimeLineEvent.resolve_event_values(wsme_event)
return wsme_event
else:
raise exc.NotFound(_("Event %s not found") % event_id)
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose([wmodels.TimeLineEvent], int, [wtypes.text], int,
int, int, wtypes.text, wtypes.text)
def get_all(self, story_id=None, event_type=None, marker=None,
offset=None, limit=None, sort_field=None, sort_dir=None):
"""Retrieve all events that have happened under specified story.
Example::
curl https://my.example.org/api/v1/stories/11/events
:param story_id: Filter events by story ID.
:param event_type: A selection of event types to get.
:param marker: The resource id where the page should begin.
:param offset: The offset to start the page at.
:param limit: The number of events to retrieve.
:param sort_field: The name of the field to sort on.
:param sort_dir: Sort direction for results (asc, desc).
"""
current_user = request.current_user_id
# Boundary check on limit.
if limit is not None:
limit = max(0, limit)
# Sanity check on event types.
if event_type:
for r_type in event_type:
if r_type not in event_types.ALL:
msg = _('Invalid event_type requested. Event type must be '
'one of the following: %s')
msg = msg % (', '.join(event_types.ALL),)
abort(400, msg)
# Resolve the marker record.
marker_event = None
if marker is not None:
marker_event = events_api.event_get(marker)
event_count = events_api.events_get_count(story_id=story_id,
event_type=event_type,
current_user=current_user)
events = events_api.events_get_all(story_id=story_id,
event_type=event_type,
marker=marker_event,
offset=offset,
limit=limit,
sort_field=sort_field,
sort_dir=sort_dir,
current_user=current_user)
# Apply the query response headers.
if limit:
response.headers['X-Limit'] = str(limit)
response.headers['X-Total'] = str(event_count)
if marker_event:
response.headers['X-Marker'] = str(marker_event.id)
if offset is not None:
response.headers['X-Offset'] = str(offset)
return [wmodels.TimeLineEvent.resolve_event_values(
wmodels.TimeLineEvent.from_db_model(event)) for event in events]
class CommentsHistoryController(rest.RestController):
"""Manages comment history."""
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose([wmodels.Comment], int, int)
def get(self, story_id, comment_id):
"""Return any historical versions of this comment.
:param story_id: The ID of the story.
:param comment_id: The ID of the comment to inspect history of.
"""
comment = comments_api.comment_get(comment_id)
if comment is None:
raise exc.NotFound(_("Comment %s not found") % comment_id)
# Check that the user can actually see the relevant story
story = stories_api.story_get_simple(
comment.event[0].story_id, current_user=request.current_user_id)
if story is None:
raise exc.NotFound(_("Comment %s not found") % comment_id)
return [wmodels.Comment.from_db_model(old_comment)
for old_comment in comment.history]
class CommentsController(rest.RestController):
"""Manages comments."""
history = CommentsHistoryController()
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose(wmodels.Comment, int, int)
def get_one(self, story_id, comment_id):
"""Retrieve details about one comment.
Example::
curl https://my.example.org/api/v1/stories/11/comments/6834
:param story_id: An ID of the story. It stays in params as a
placeholder so that pecan knows where to match an
incoming value. It will stay unused, as far as
comments have their own unique ids.
:param comment_id: An ID of the comment.
"""
comment = comments_api.comment_get(comment_id)
if comment is None:
raise exc.NotFound(_("Comment %s not found") % comment_id)
# Check that the user can actually see the relevant story
story = stories_api.story_get_simple(
comment.event[0].story_id, current_user=request.current_user_id)
if story is None:
raise exc.NotFound(_("Comment %s not found") % comment_id)
return wmodels.Comment.from_db_model(comment)
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose([wmodels.Comment], int, int, int, wtypes.text,
wtypes.text)
def get_all(self, story_id=None, marker=None, limit=None, sort_field='id',
sort_dir='asc'):
"""Retrieve all comments posted under specified story.
Example::
curl https://my.example.org/api/v1/stories/11/comments
:param story_id: Filter comments by story ID.
:param marker: The resource id where the page should begin.
:param limit: The number of comments to retrieve.
:param sort_field: The name of the field to sort on.
:param sort_dir: Sort direction for results (asc, desc).
"""
current_user = request.current_user_id
# Boundary check on limit.
if limit is not None:
limit = max(0, limit)
# Resolve the marker record.
marker_event = None
if marker:
event_query = \
events_api.events_get_all(comment_id=marker,
event_type=event_types.USER_COMMENT,
current_user=current_user)
if len(event_query) > 0:
marker_event = event_query[0]
events_count = events_api.events_get_count(
story_id=story_id,
event_type=event_types.USER_COMMENT,
current_user=current_user)
events = events_api.events_get_all(story_id=story_id,
marker=marker_event,
limit=limit,
event_type=event_types.USER_COMMENT,
sort_field=sort_field,
sort_dir=sort_dir,
current_user=current_user)
comments = [comments_api.comment_get(event.comment_id)
for event in events]
# Apply the query response headers.
if limit:
response.headers['X-Limit'] = str(limit)
response.headers['X-Total'] = str(events_count)
if marker_event:
response.headers['X-Marker'] = str(marker)
return [wmodels.Comment.from_db_model(comment) for comment in comments]
@decorators.db_exceptions
@secure(checks.authenticated)
@wsme_pecan.wsexpose(wmodels.TimeLineEvent, int, body=wmodels.Comment)
def post(self, story_id, comment):
"""Create a new comment.
Example::
curl https://my.example.org/api/v1/stories/19/comments \\
-H 'Authorization: Bearer MY_ACCESS_TOKEN' \\
-H 'Content-Type: application/json;charset=UTF-8' \\
--data-binary '{"content":"creating a new comment"}'
:param story_id: An id of a Story to add a Comment to.
:param comment: The comment itself.
"""
created_comment = comments_api.comment_create(comment.as_dict())
event_values = {
"story_id": story_id,
"author_id": request.current_user_id,
"event_type": event_types.USER_COMMENT,
"comment_id": created_comment.id
}
event = wmodels.TimeLineEvent.from_db_model(
events_api.event_create(event_values))
event = wmodels.TimeLineEvent.resolve_event_values(event)
return event
@decorators.db_exceptions
@secure(checks.authenticated)
@wsme_pecan.wsexpose(wmodels.Comment, int, int, body=wmodels.Comment)
def put(self, story_id, comment_id, comment_body):
"""Update an existing comment. This command is disabled by default.
:param story_id: A placeholder.
:param comment_id: The id of a Comment to be updated.
:param comment_body: An updated Comment.
"""
if not CONF.enable_editable_comments:
abort(405, _("Editing of comments is disabled "
"by the server administrator."))
comments_api.comment_get(comment_id)
comment_author_id = events_api.events_get_all(
comment_id=comment_id)[0].author_id
if request.current_user_id != comment_author_id:
abort(403, _("You are not allowed to update this comment."))
updated_comment = comments_api.comment_update(comment_id,
comment_body.as_dict(
omit_unset=True
))
return wmodels.Comment.from_db_model(updated_comment)
@decorators.db_exceptions
@secure(checks.superuser)
@wsme_pecan.wsexpose(None, int, int, status_code=204)
def delete(self, story_id, comment_id):
"""Delete an existing comment. This command is disabled by default.
:param story_id: A placeholder.
:param comment_id: The id of a Comment to be updated.
"""
if not CONF.enable_editable_comments:
abort(405, _("Deletion of comments is disabled "
"by the server administrator."))
comments_api.comment_delete(comment_id)
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose([wmodels.Comment], wtypes.text, wtypes.text, int,
int, int)
def search(self, q="", marker=None, offset=None, limit=None):
"""The search endpoint for comments.
:param q: The query string.
:return: List of Comments matching the query.
"""
comments = SEARCH_ENGINE.comments_query(q=q,
marker=marker,
offset=offset,
limit=limit)
return [wmodels.Comment.from_db_model(comment) for comment in comments]
|
|
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from color import Coloring
from command import PagedCommand
from git_command import git_require, GitCommand
class GrepColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'grep')
self.project = self.printer('project', attr='bold')
class Grep(PagedCommand):
common = True
helpSummary = "Print lines matching a pattern"
helpUsage = """
%prog {pattern | -e pattern} [<project>...]
"""
helpDescription = """
Search for the specified patterns in all project files.
# Boolean Options
The following options can appear as often as necessary to express
the pattern to locate:
-e PATTERN
--and, --or, --not, -(, -)
Further, the -r/--revision option may be specified multiple times
in order to scan multiple trees. If the same file matches in more
than one tree, only the first result is reported, prefixed by the
revision name it was found under.
# Examples
Look for a line that has '#define' and either 'MAX_PATH or 'PATH_MAX':
repo grep -e '#define' --and -\\( -e MAX_PATH -e PATH_MAX \\)
Look for a line that has 'NODE' or 'Unexpected' in files that
contain a line that matches both expressions:
repo grep --all-match -e NODE -e Unexpected
"""
def _Options(self, p):
def carry(option,
opt_str,
value,
parser):
pt = getattr(parser.values, 'cmd_argv', None)
if pt is None:
pt = []
setattr(parser.values, 'cmd_argv', pt)
if opt_str == '-(':
pt.append('(')
elif opt_str == '-)':
pt.append(')')
else:
pt.append(opt_str)
if value is not None:
pt.append(value)
g = p.add_option_group('Sources')
g.add_option('--cached',
action='callback', callback=carry,
help='Search the index, instead of the work tree')
g.add_option('-r', '--revision',
dest='revision', action='append', metavar='TREEish',
help='Search TREEish, instead of the work tree')
g = p.add_option_group('Pattern')
g.add_option('-e',
action='callback', callback=carry,
metavar='PATTERN', type='str',
help='Pattern to search for')
g.add_option('-i', '--ignore-case',
action='callback', callback=carry,
help='Ignore case differences')
g.add_option('-a', '--text',
action='callback', callback=carry,
help="Process binary files as if they were text")
g.add_option('-I',
action='callback', callback=carry,
help="Don't match the pattern in binary files")
g.add_option('-w', '--word-regexp',
action='callback', callback=carry,
help='Match the pattern only at word boundaries')
g.add_option('-v', '--invert-match',
action='callback', callback=carry,
help='Select non-matching lines')
g.add_option('-G', '--basic-regexp',
action='callback', callback=carry,
help='Use POSIX basic regexp for patterns (default)')
g.add_option('-E', '--extended-regexp',
action='callback', callback=carry,
help='Use POSIX extended regexp for patterns')
g.add_option('-F', '--fixed-strings',
action='callback', callback=carry,
help='Use fixed strings (not regexp) for pattern')
g = p.add_option_group('Pattern Grouping')
g.add_option('--all-match',
action='callback', callback=carry,
help='Limit match to lines that have all patterns')
g.add_option('--and', '--or', '--not',
action='callback', callback=carry,
help='Boolean operators to combine patterns')
g.add_option('-(', '-)',
action='callback', callback=carry,
help='Boolean operator grouping')
g = p.add_option_group('Output')
g.add_option('-n',
action='callback', callback=carry,
help='Prefix the line number to matching lines')
g.add_option('-C',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines around match')
g.add_option('-B',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines before match')
g.add_option('-A',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines after match')
g.add_option('-l', '--name-only', '--files-with-matches',
action='callback', callback=carry,
help='Show only file names containing matching lines')
g.add_option('-L', '--files-without-match',
action='callback', callback=carry,
help='Show only file names not containing matching lines')
def Execute(self, opt, args):
out = GrepColoring(self.manifest.manifestProject.config)
cmd_argv = ['grep']
if out.is_on and git_require((1, 6, 3)):
cmd_argv.append('--color')
cmd_argv.extend(getattr(opt, 'cmd_argv', []))
if '-e' not in cmd_argv:
if not args:
self.Usage()
cmd_argv.append('-e')
cmd_argv.append(args[0])
args = args[1:]
projects = self.GetProjects(args)
full_name = False
if len(projects) > 1:
cmd_argv.append('--full-name')
full_name = True
have_rev = False
if opt.revision:
if '--cached' in cmd_argv:
print('fatal: cannot combine --cached and --revision', file=sys.stderr)
sys.exit(1)
have_rev = True
cmd_argv.extend(opt.revision)
cmd_argv.append('--')
bad_rev = False
have_match = False
for project in projects:
p = GitCommand(project,
cmd_argv,
bare = False,
capture_stdout = True,
capture_stderr = True)
if p.Wait() != 0:
# no results
#
if p.stderr:
if have_rev and 'fatal: ambiguous argument' in p.stderr:
bad_rev = True
else:
out.project('--- project %s ---' % project.relpath)
out.nl()
out.write("%s", p.stderr)
out.nl()
continue
have_match = True
# We cut the last element, to avoid a blank line.
#
r = p.stdout.split('\n')
r = r[0:-1]
if have_rev and full_name:
for line in r:
rev, line = line.split(':', 1)
out.write("%s", rev)
out.write(':')
out.project(project.relpath)
out.write('/')
out.write("%s", line)
out.nl()
elif full_name:
for line in r:
out.project(project.relpath)
out.write('/')
out.write("%s", line)
out.nl()
else:
for line in r:
print(line)
if have_match:
sys.exit(0)
elif have_rev and bad_rev:
for r in opt.revision:
print("error: can't search revision %s" % r, file=sys.stderr)
sys.exit(1)
else:
sys.exit(1)
|
|
"""The tests for the Geofency device tracker platform."""
# pylint: disable=redefined-outer-name
import asyncio
from unittest.mock import patch
import pytest
from homeassistant.components import zone
import homeassistant.components.device_tracker as device_tracker
from homeassistant.components.device_tracker.geofency import (
CONF_MOBILE_BEACONS, URL)
from homeassistant.const import (
CONF_PLATFORM, HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME,
STATE_NOT_HOME)
from homeassistant.setup import async_setup_component
from homeassistant.util import slugify
HOME_LATITUDE = 37.239622
HOME_LONGITUDE = -115.815811
NOT_HOME_LATITUDE = 37.239394
NOT_HOME_LONGITUDE = -115.763283
GPS_ENTER_HOME = {
'latitude': HOME_LATITUDE,
'longitude': HOME_LONGITUDE,
'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
'name': 'Home',
'radius': 100,
'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
'date': '2017-08-19T10:53:53Z',
'address': 'Testing Trail 1',
'entry': '1'
}
GPS_EXIT_HOME = {
'latitude': HOME_LATITUDE,
'longitude': HOME_LONGITUDE,
'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
'name': 'Home',
'radius': 100,
'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
'date': '2017-08-19T10:53:53Z',
'address': 'Testing Trail 1',
'entry': '0'
}
BEACON_ENTER_HOME = {
'latitude': HOME_LATITUDE,
'longitude': HOME_LONGITUDE,
'beaconUUID': 'FFEF0E83-09B2-47C8-9837-E7B563F5F556',
'minor': '36138',
'major': '8629',
'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
'name': 'Home',
'radius': 100,
'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
'date': '2017-08-19T10:53:53Z',
'address': 'Testing Trail 1',
'entry': '1'
}
BEACON_EXIT_HOME = {
'latitude': HOME_LATITUDE,
'longitude': HOME_LONGITUDE,
'beaconUUID': 'FFEF0E83-09B2-47C8-9837-E7B563F5F556',
'minor': '36138',
'major': '8629',
'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
'name': 'Home',
'radius': 100,
'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
'date': '2017-08-19T10:53:53Z',
'address': 'Testing Trail 1',
'entry': '0'
}
BEACON_ENTER_CAR = {
'latitude': NOT_HOME_LATITUDE,
'longitude': NOT_HOME_LONGITUDE,
'beaconUUID': 'FFEF0E83-09B2-47C8-9837-E7B563F5F556',
'minor': '36138',
'major': '8629',
'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
'name': 'Car 1',
'radius': 100,
'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
'date': '2017-08-19T10:53:53Z',
'address': 'Testing Trail 1',
'entry': '1'
}
BEACON_EXIT_CAR = {
'latitude': NOT_HOME_LATITUDE,
'longitude': NOT_HOME_LONGITUDE,
'beaconUUID': 'FFEF0E83-09B2-47C8-9837-E7B563F5F556',
'minor': '36138',
'major': '8629',
'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
'name': 'Car 1',
'radius': 100,
'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
'date': '2017-08-19T10:53:53Z',
'address': 'Testing Trail 1',
'entry': '0'
}
@pytest.fixture
def geofency_client(loop, hass, test_client):
"""Geofency mock client."""
assert loop.run_until_complete(async_setup_component(
hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'geofency',
CONF_MOBILE_BEACONS: ['Car 1']
}}))
with patch('homeassistant.components.device_tracker.update_config'):
yield loop.run_until_complete(test_client(hass.http.app))
@pytest.fixture(autouse=True)
def setup_zones(loop, hass):
"""Setup Zone config in HA."""
assert loop.run_until_complete(async_setup_component(
hass, zone.DOMAIN, {
'zone': {
'name': 'Home',
'latitude': HOME_LATITUDE,
'longitude': HOME_LONGITUDE,
'radius': 100,
}}))
@asyncio.coroutine
def test_data_validation(geofency_client):
"""Test data validation."""
# No data
req = yield from geofency_client.post(URL)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
missing_attributes = ['address', 'device',
'entry', 'latitude', 'longitude', 'name']
# missing attributes
for attribute in missing_attributes:
copy = GPS_ENTER_HOME.copy()
del copy[attribute]
req = yield from geofency_client.post(URL, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
@asyncio.coroutine
def test_gps_enter_and_exit_home(hass, geofency_client):
"""Test GPS based zone enter and exit."""
# Enter the Home zone
req = yield from geofency_client.post(URL, data=GPS_ENTER_HOME)
assert req.status == HTTP_OK
device_name = slugify(GPS_ENTER_HOME['device'])
state_name = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).state
assert STATE_HOME == state_name
# Exit the Home zone
req = yield from geofency_client.post(URL, data=GPS_EXIT_HOME)
assert req.status == HTTP_OK
device_name = slugify(GPS_EXIT_HOME['device'])
state_name = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).state
assert STATE_NOT_HOME == state_name
# Exit the Home zone with "Send Current Position" enabled
data = GPS_EXIT_HOME.copy()
data['currentLatitude'] = NOT_HOME_LATITUDE
data['currentLongitude'] = NOT_HOME_LONGITUDE
req = yield from geofency_client.post(URL, data=data)
assert req.status == HTTP_OK
device_name = slugify(GPS_EXIT_HOME['device'])
current_latitude = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).attributes['latitude']
assert NOT_HOME_LATITUDE == current_latitude
current_longitude = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).attributes['longitude']
assert NOT_HOME_LONGITUDE == current_longitude
@asyncio.coroutine
def test_beacon_enter_and_exit_home(hass, geofency_client):
"""Test iBeacon based zone enter and exit - a.k.a stationary iBeacon."""
# Enter the Home zone
req = yield from geofency_client.post(URL, data=BEACON_ENTER_HOME)
assert req.status == HTTP_OK
device_name = slugify("beacon_{}".format(BEACON_ENTER_HOME['name']))
state_name = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).state
assert STATE_HOME == state_name
# Exit the Home zone
req = yield from geofency_client.post(URL, data=BEACON_EXIT_HOME)
assert req.status == HTTP_OK
device_name = slugify("beacon_{}".format(BEACON_ENTER_HOME['name']))
state_name = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).state
assert STATE_NOT_HOME == state_name
@asyncio.coroutine
def test_beacon_enter_and_exit_car(hass, geofency_client):
"""Test use of mobile iBeacon."""
# Enter the Car away from Home zone
req = yield from geofency_client.post(URL, data=BEACON_ENTER_CAR)
assert req.status == HTTP_OK
device_name = slugify("beacon_{}".format(BEACON_ENTER_CAR['name']))
state_name = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).state
assert STATE_NOT_HOME == state_name
# Exit the Car away from Home zone
req = yield from geofency_client.post(URL, data=BEACON_EXIT_CAR)
assert req.status == HTTP_OK
device_name = slugify("beacon_{}".format(BEACON_ENTER_CAR['name']))
state_name = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).state
assert STATE_NOT_HOME == state_name
# Enter the Car in the Home zone
data = BEACON_ENTER_CAR.copy()
data['latitude'] = HOME_LATITUDE
data['longitude'] = HOME_LONGITUDE
req = yield from geofency_client.post(URL, data=data)
assert req.status == HTTP_OK
device_name = slugify("beacon_{}".format(data['name']))
state_name = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).state
assert STATE_HOME == state_name
# Exit the Car in the Home zone
req = yield from geofency_client.post(URL, data=data)
assert req.status == HTTP_OK
device_name = slugify("beacon_{}".format(data['name']))
state_name = hass.states.get('{}.{}'.format(
'device_tracker', device_name)).state
assert STATE_HOME == state_name
|
|
import os
from collections import OrderedDict
import re
import Common
import Instructions
import Mif
class Line(object):
def __init__(self, fileName, number, string):
self.FileName = fileName
self.Number = number
self.String = string
def __str__(self):
return "%s : %s" % (str(self.Number),self.String)
def __repr__(self):
return str(self)
class BinaryData(object):
def __init__(self, address, data):
self.Address = address
self.Data = data
self.Line = None # a ref to the original Line object it was parsed from
def __str__(self):
return "@{:08X} {:X}".format(self.Address, self.Data)
def __repr__(self):
return "{}(0x{:X}, 0x{:X})".format(self.__class__.__name__, self.Address, self.Data)
class Assembly(object):
CommentString = "//"
InterruptVectorTable = {}
AddressSpaceSize = None
VectorTableStartAddress = None
def __init__(self, addressWidth):
self.Original = []
self.WithoutComments = []
self.Code = []
self.ConstantsLines = []
self.DirectivesLines = []
self.Constants = OrderedDict()
self.Directives = {}
self.Instructions = []
Assembly.AddressSpaceSize = (2**addressWidth) - 1
Assembly.VectorTableStartAddress = Assembly.AddressSpaceSize - (2**3 - 1)
def __str__(self):
string = ""
if self.ConstantsLines:
string += "Constants:"
for line in self.ConstantsLines:
string += "\n\t%s" % line
string += "\n"
if self.DirectivesLines:
string += "Directives:"
for line in self.DirectivesLines:
string += "\n\t%s" % line
string += "\n"
if self.Code:
string += "Code:"
for line in self.Code:
string += "\n\t%s" % line
return string
def __repr__(self):
return str(self)
def Decode(self):
self.DecodeDirectives()
self.DecodeConstants()
self.DecodeCode()
def DecodeConstants(self):
for constant in self.ConstantsLines:
split = constant.String.split("=")
split = [piece.strip() for piece in split]
if len(split) != 2:
Common.Error(constant, "Wrong syntax for constant")
else:
tempAddress = self.ReplaceDirective(split[0]).strip()
tempConstant = self.ReplaceDirective(split[1]).strip()
self.Constants[Common.ExprToHexString(tempAddress,constant)] = (Common.ExprToHexString(tempConstant,constant), constant)
def DecodeDirectives(self):
for directive in self.DirectivesLines:
split = directive.String.split("=")
split = [piece.strip() for piece in split]
if len(split) != 2:
Common.Error(directive, "Wrong syntax for directive")
else:
tempDirective = self.ReplaceDirective(split[1])
self.Directives[split[0]] = tempDirective.strip() if tempDirective.startswith('R') else "0x"+Common.ExprToHexString(tempDirective.strip(),directive)
def DecodeCode(self):
newCode = []
for line in self.Code:
for directive, value in self.Directives.iteritems():
extracted = [piece for piece in re.split("\[|\]| |,|\t|\+", line.String)] # Split the instruction by spaces, commas, and brackets
if directive in extracted:
line.String = line.String.replace(directive, value)
if line.String.strip().endswith(':'):
Common.Error(line, "Label must be on the same line as an instruction")
self.Instructions.append(Instructions.DecodeLine(line))
newCode.append(line)
self.Code = newCode
def ReplaceDirective(self, string):
for directive, value in self.Directives.iteritems():
extracted = [piece for piece in re.split("[^a-zA-Z0-9_]", string)] # Split the instruction by spaces, commas, and brackets
if directive in extracted:
string = string.replace(directive, value)
return string
class Disassembly(object):
def __init__(self, width, memoryWidth, debug):
self.Width = width
self.MemoryWidth = memoryWidth
self.Debug = debug
self.Original = []
self.WithoutComments = []
self.Binary = []
self.Instructions = OrderedDict()
def Encode(self):
for word in self.Binary:
instruction = Instructions.Encode(word)
self.Instructions[word.Address] = instruction
def MergeBinary(self):
piecesToJoin = self.Width / self.MemoryWidth
if piecesToJoin > 1:
newBinary = []
for i in range(0, len(self.Binary), piecesToJoin):
newData = 0
for j in range(0, piecesToJoin):
newData |= ((self.Binary[i+j].Data & (2**self.MemoryWidth-1)) << j*self.MemoryWidth)
bd = BinaryData(address=self.Binary[i].Address, data=newData)
bd.Line = self.Binary[i].Line
newBinary.append(bd)
self.Binary = newBinary
def Write(self, filePath, headers=[]):
with open(filePath, "w+") as _file:
_file.seek(0)
_file.truncate() # Clears out the file if it exists
_file.write("{} Disassembled for cjg_risc by Connor Goldberg\n".format(Assembly.CommentString))
for line in headers:
_file.write("{} {}\n".format(Assembly.CommentString, line))
_file.write("\n")
_file.write(".code\n")
for address, instruction in self.Instructions.iteritems():
line = "{:12}{:28}{} {}".format(instruction.Label+':' if instruction.Label else '',
instruction.DisassembledString.lower(),
Assembly.CommentString,
instruction.Line.String)
_file.write("{}\n".format(line))
_file.write(".endcode\n")
class Parser(object):
def __init__(self, assemblyFilePath, width, addressWidth, memoryWidth, canInclude = False, label=None):
self.AssemblyFilePath = assemblyFilePath
self.Assembly = Assembly(addressWidth)
self.Width = width
self.AddressWidth = addressWidth
self.MemoryWidth = memoryWidth
self.CanInclude = canInclude
self.Label = label
self.LabelTable = {}
self.IncludeFiles = []
self.IncludeParsers = []
def Assemble(self):
for instruction in self.Assembly.Instructions:
instruction.Assemble()
for parser in self.IncludeParsers:
for instruction in parser.Assembly.Instructions:
instruction.Assemble()
def FileToLines(self, assemblyFilePath):
if os.path.isfile(assemblyFilePath):
with open(assemblyFilePath) as _file:
lineCount = 1
for line in _file:
self.Assembly.Original.append(Line(os.path.basename(assemblyFilePath), lineCount, line.strip()))
lineCount+=1
else:
return []
def GetAssemblyData(self):
lines = []
allParsers = []
allParsers.append(self)
for parser in self.IncludeParsers:
allParsers.append(parser)
for parser in allParsers:
if parser.Label != None and parser.Assembly.Instructions:
lines.append(Mif.MifLine(comment="----- %s -----" % parser.Label.String))
for instruction in parser.Assembly.Instructions:
data = int(instruction.MachineCode, 2)
comment = instruction.Line.String.strip().replace('\t',' ')
lines.append(Mif.MifLine(data=data, comment=comment, instruction=instruction))
return lines
def GetConstantsData(self):
lines = []
addresses = {}
for address, data in self.Assembly.Constants.iteritems():
if address in addresses.keys():
if addresses[address] == data[0]:
continue
else:
Common.Error(data[1], "Duplicate constant found at address: 0x%s. Address already assigned to: 0x%s" % (address, addresses[address]))
lines.append(Mif.MifLine(data=int(data[0],16), address=int(address,16), comment="%s:%s" % (data[1].FileName, data[1].Number)))
addresses[address] = data[0]
for parser in self.IncludeParsers:
for address, data in parser.Assembly.Constants.iteritems():
if address in addresses.keys():
if addresses[address] == data[0]:
continue
else:
Common.Error(data[1], "Duplicate constant found at address: 0x%s. Address already assigned to: 0x%s" % (address, addresses[address]))
lines.append(Mif.MifLine(data=int(data[0],16), address=int(address,16), comment="%s:%s" % (data[1].FileName, data[1].Number)))
addresses[address] = data[0]
return lines
@staticmethod
def GetInterruptVectorTable():
lines = []
for num, dest in Assembly.InterruptVectorTable.iteritems():
dest = Common.NumToHexString(dest, 8)
lines.append(Mif.MifLine(address=Assembly.VectorTableStartAddress+num, data=dest, comment="ISR_%i" % num))
return lines
def MergeIncludes(self):
addressCounter = len(self.Assembly.Instructions) * (self.Width / self.MemoryWidth)
for parser in self.IncludeParsers:
label = parser.Label.String.split()[0]
if label in self.LabelTable.keys():
Common.Error(parser.Label, "Duplicate include label: %s" % parser.Label.String)
self.LabelTable[label] = addressCounter
addressCounter = parser.ResolveAddresses(startAddress=addressCounter)
parser.SetLabelAddresses()
self.ResolveAddresses()
self.SetLabelAddresses()
@staticmethod
def RemoveComments(contents):
pass1 = [line for line in contents if not line.String.startswith(";") and not line.String.startswith("//")] # Removes all lines starting with semicolons
pass2 = []
for line in pass1:
if ';' in line.String:
line.String = line.String[:line.String.index(';')] # Removes semicolon comments
if "//" in line.String:
line.String = line.String[:line.String.index("//")] # Removes // comments
pass2.append(line)
return [line for line in pass2 if line.String != ""] # Remove empty lines
def Separate(self):
category = Common.Enum("Directives", "Constants", "Code", "Includes")
myCategory = None
for line in self.Assembly.WithoutComments:
if line.String.strip() == ".directives":
myCategory = category.Directives
elif line.String.strip() == ".constants":
myCategory = category.Constants
elif line.String.strip() == ".code":
myCategory = category.Code
elif line.String.strip() == ".includes":
if not self.CanInclude:
Common.Error(line, "Cannot recursively include files")
myCategory = category.Includes
elif line.String.startswith('.end'):
myCategory = None
else:
if myCategory == category.Directives:
self.Assembly.DirectivesLines.append(line)
elif myCategory == category.Constants:
self.Assembly.ConstantsLines.append(line)
elif myCategory == category.Code:
self.Assembly.Code.append(line)
elif myCategory == category.Includes:
if not line in self.IncludeFiles:
self.IncludeFiles.append(line)
else:
Common.Error(line, "Line \"%s\" belongs to unknown section" % line.String)
def Parse(self):
self.FileToLines(self.AssemblyFilePath)
self.Assembly.WithoutComments = Parser.RemoveComments(self.Assembly.Original)
self.Separate()
self.Assembly.Decode()
if self.CanInclude:
self.ParseIncludes()
self.MergeIncludes()
self.Assemble()
def ParseIncludes(self):
for include in self.IncludeFiles:
split = include.String.split()
if len(split) != 3:
Common.Error(constant, "Wrong syntax for include")
filePath = os.path.abspath(split[2])
if not os.path.isfile(filePath):
Common.Error(include, "Cannot find file: %s" % filePath)
includeParser = Parser(filePath, self.AddressWidth, label=include)
includeParser.Parse()
self.IncludeParsers.append(includeParser)
def ResolveAddresses(self, startAddress = 0):
addressCounter = startAddress
for instruction in self.Assembly.Instructions:
if instruction.Label != None:
if instruction.Label.startswith("ISR_"):
num = instruction.Label.replace("ISR_",'')
if not num.isdigit():
Common.Error(instruction.Line, "ISR must be followed by a number")
elif Assembly.VectorTableStartAddress+int(num) > Assembly.AddressSpaceSize:
Common.Error(instruction.Line, "ISR value is too large. Must not exceed: %s" % str(Assembly.AddressSpaceSize-Assembly.VectorTableStartAddress))
if int(num) in Assembly.InterruptVectorTable.keys():
Common.Error(instruction.Line, "Found previous declaration of ISR: %s" % instruction.Label)
Assembly.InterruptVectorTable[int(num)] = addressCounter
else:
if instruction.Label in self.LabelTable.keys():
Common.Error(instruction.Line, "Found previous declaration of label: %s" % instruction.Label)
self.LabelTable[instruction.Label] = addressCounter
addressCounter += self.Width / self.MemoryWidth
return addressCounter
def SetLabelAddresses(self):
for instruction in self.Assembly.Instructions:
if instruction.NeedsLabelAddress:
if instruction.LabelOperand in self.LabelTable.keys():
instruction.Address = self.LabelTable[instruction.LabelOperand]
else:
Common.Error(instruction.Line, "Could not find destination label for: %s" % instruction.LabelOperand)
return self.Assembly.Instructions
class DisassemblyParser(object):
def __init__(self, mifFilePath, mifFormat, width, memoryWidth, debug):
self.MifFilePath = mifFilePath
if mifFormat != "cadence":
Common.Error("Only the cadence mif format is currently supported")
self.MifFormat = mifFormat
self.Width = width
self.MemoryWidth = memoryWidth
self.Debug = debug
self.Disassembly = Disassembly(width=self.Width, memoryWidth=self.MemoryWidth, debug=self.Debug)
def Parse(self):
self.Disassembly.Original = self.FileToLines()
self.Disassembly.WithoutComments = Parser.RemoveComments(self.Disassembly.Original)
self.Disassembly.Binary = self.SplitToBinary(self.Disassembly.WithoutComments)
self.Disassembly.MergeBinary()
self.Disassembly.Encode()
self.Disassemble()
return self.Disassembly
def Disassemble(self):
# Pass 1
for address, instruction in self.Disassembly.Instructions.iteritems():
instruction.Disassemble()
if (self.Debug):
print "@{:08} {:08X}".format(address, instruction.MachineCodeValue)
print instruction
# Pass 2 to handle labels
labelCount = 0
for address, instruction in self.Disassembly.Instructions.iteritems():
if instruction.NeedsLabelOperand:
if instruction.Address not in self.Disassembly.Instructions.keys():
print instruction
Common.Error(instruction.Line, "Cannot find instruction at destination address: 0x{:X}".format(instruction.Address))
destinationInstruction = self.Disassembly.Instructions[instruction.Address]
if not destinationInstruction.Label:
destinationInstruction.Label = "label_{}".format(labelCount)
labelCount += 1
instruction.FixupLabel(destinationInstruction.Label)
def FileToLines(self):
lines = []
if os.path.isfile(self.MifFilePath):
with open(self.MifFilePath) as _file:
lineCount = 1
for line in _file:
lines.append(Line(os.path.basename(self.MifFilePath), lineCount, line.strip()))
lineCount+=1
return lines
else:
return []
@staticmethod
def SplitToBinary(contents):
dataList = []
for line in contents:
split = line.String.split()
try:
address = int(split[0].strip('@'), 16)
except Exception as e:
Common.Error(line, "Could not decode address: {}".format(split[0]))
try:
data = int(split[1], 16)
except Exception as e:
Common.Error(line, "Could not decode data: {}".format(split[1]))
bd = BinaryData(address=address, data=data)
bd.Line = line
dataList.append(bd)
return dataList
|
|
class KEY(object):
def __init__(self, internal_name, external_name):
self.internalName = internal_name
self.externalName = external_name
class REQUEST(object):
def __init__(self, internal_name, external_name):
self.internalName = internal_name
self.externalName = external_name
class KIND(object):
def __init__(self, internal_name, external_name):
self.internalName = internal_name
self.externalName = external_name
UID_KEYS = [
KEY('VersionMajor', 'key.version_major'),
KEY('VersionMinor', 'key.version_minor'),
KEY('VersionPatch', 'key.version_patch'),
KEY('Results', 'key.results'),
KEY('Request', 'key.request'),
KEY('Notification', 'key.notification'),
KEY('Kind', 'key.kind'),
KEY('AccessLevel', 'key.accessibility'),
KEY('SetterAccessLevel', 'key.setter_accessibility'),
KEY('Keyword', 'key.keyword'),
KEY('Name', 'key.name'),
KEY('USR', 'key.usr'),
KEY('OriginalUSR', 'key.original_usr'),
KEY('DefaultImplementationOf', 'key.default_implementation_of'),
KEY('InterestedUSR', 'key.interested_usr'),
KEY('GenericParams', 'key.generic_params'),
KEY('GenericRequirements', 'key.generic_requirements'),
KEY('DocFullAsXML', 'key.doc.full_as_xml'),
KEY('Line', 'key.line'),
KEY('Column', 'key.column'),
KEY('ReceiverUSR', 'key.receiver_usr'),
KEY('IsDynamic', 'key.is_dynamic'),
KEY('IsImplicit', 'key.is_implicit'),
KEY('FilePath', 'key.filepath'),
KEY('ModuleInterfaceName', 'key.module_interface_name'),
KEY('Hash', 'key.hash'),
KEY('CompilerArgs', 'key.compilerargs'),
KEY('Severity', 'key.severity'),
KEY('Offset', 'key.offset'),
KEY('Length', 'key.length'),
KEY('SourceFile', 'key.sourcefile'),
KEY('SerializedSyntaxTree', 'key.serialized_syntax_tree'),
KEY('SourceText', 'key.sourcetext'),
KEY('EnableSyntaxMap', 'key.enablesyntaxmap'),
KEY('SyntaxTreeTransferMode', 'key.syntaxtreetransfermode'),
KEY('SyntaxTreeSerializationFormat',
'key.syntax_tree_serialization_format'),
KEY('EnableStructure', 'key.enablesubstructure'),
KEY('Description', 'key.description'),
KEY('TypeName', 'key.typename'),
KEY('RuntimeName', 'key.runtime_name'),
KEY('SelectorName', 'key.selector_name'),
KEY('AnnotatedDecl', 'key.annotated_decl'),
KEY('FullyAnnotatedDecl', 'key.fully_annotated_decl'),
KEY('FullyAnnotatedGenericSignature',
'key.fully_annotated_generic_signature'),
KEY('DocBrief', 'key.doc.brief'),
KEY('Context', 'key.context'),
KEY('ModuleImportDepth', 'key.moduleimportdepth'),
KEY('NumBytesToErase', 'key.num_bytes_to_erase'),
KEY('NotRecommended', 'key.not_recommended'),
KEY('Annotations', 'key.annotations'),
KEY('DiagnosticStage', 'key.diagnostic_stage'),
KEY('SyntaxMap', 'key.syntaxmap'),
KEY('IsSystem', 'key.is_system'),
KEY('Related', 'key.related'),
KEY('Inherits', 'key.inherits'),
KEY('Conforms', 'key.conforms'),
KEY('Extends', 'key.extends'),
KEY('Dependencies', 'key.dependencies'),
KEY('Entities', 'key.entities'),
KEY('NameOffset', 'key.nameoffset'),
KEY('NameLength', 'key.namelength'),
KEY('BodyOffset', 'key.bodyoffset'),
KEY('BodyLength', 'key.bodylength'),
KEY('ThrowOffset', 'key.throwoffset'),
KEY('ThrowLength', 'key.throwlength'),
KEY('DocOffset', 'key.docoffset'),
KEY('DocLength', 'key.doclength'),
KEY('IsLocal', 'key.is_local'),
KEY('InheritedTypes', 'key.inheritedtypes'),
KEY('Attributes', 'key.attributes'),
KEY('Attribute', 'key.attribute'),
KEY('Elements', 'key.elements'),
KEY('SubStructure', 'key.substructure'),
KEY('Ranges', 'key.ranges'),
KEY('Fixits', 'key.fixits'),
KEY('Diagnostics', 'key.diagnostics'),
KEY('FormatOptions', 'key.editor.format.options'),
KEY('CodeCompleteOptions', 'key.codecomplete.options'),
KEY('FilterRules', 'key.codecomplete.filterrules'),
KEY('NextRequestStart', 'key.nextrequeststart'),
KEY('Popular', 'key.popular'),
KEY('Unpopular', 'key.unpopular'),
KEY('Hide', 'key.hide'),
KEY('Platform', 'key.platform'),
KEY('IsDeprecated', 'key.is_deprecated'),
KEY('IsUnavailable', 'key.is_unavailable'),
KEY('IsOptional', 'key.is_optional'),
KEY('Message', 'key.message'),
KEY('Introduced', 'key.introduced'),
KEY('Deprecated', 'key.deprecated'),
KEY('Obsoleted', 'key.obsoleted'),
KEY('RemoveCache', 'key.removecache'),
KEY('TypeInterface', 'key.typeinterface'),
KEY('TypeUsr', 'key.typeusr'),
KEY('ContainerTypeUsr', 'key.containertypeusr'),
KEY('ModuleGroups', 'key.modulegroups'),
KEY('BaseName', 'key.basename'),
KEY('ArgNames', 'key.argnames'),
KEY('SelectorPieces', 'key.selectorpieces'),
KEY('NameKind', 'key.namekind'),
KEY('LocalizationKey', 'key.localization_key'),
KEY('IsZeroArgSelector', 'key.is_zero_arg_selector'),
KEY('SwiftVersion', 'key.swift_version'),
KEY('Value', 'key.value'),
KEY('EnableDiagnostics', 'key.enablediagnostics'),
KEY('GroupName', 'key.groupname'),
KEY('ActionName', 'key.actionname'),
KEY('SynthesizedExtension', 'key.synthesizedextensions'),
KEY('UsingSwiftArgs', 'key.usingswiftargs'),
KEY('Names', 'key.names'),
KEY('UIDs', 'key.uids'),
KEY('SyntacticOnly', 'key.syntactic_only'),
KEY('ParentLoc', 'key.parent_loc'),
KEY('IsTestCandidate', 'key.is_test_candidate'),
KEY('Overrides', 'key.overrides'),
KEY('AssociatedUSRs', 'key.associated_usrs'),
KEY('ModuleName', 'key.modulename'),
KEY('RelatedDecls', 'key.related_decls'),
KEY('Simplified', 'key.simplified'),
KEY('RangeContent', 'key.rangecontent'),
KEY('CancelOnSubsequentRequest', 'key.cancel_on_subsequent_request'),
KEY('RenameLocations', 'key.renamelocations'),
KEY('Locations', 'key.locations'),
KEY('NameType', 'key.nametype'),
KEY('NewName', 'key.newname'),
KEY('CategorizedEdits', 'key.categorizededits'),
KEY('CategorizedRanges', 'key.categorizedranges'),
KEY('RangesWorthNote', 'key.rangesworthnote'),
KEY('Edits', 'key.edits'),
KEY('EndLine', 'key.endline'),
KEY('EndColumn', 'key.endcolumn'),
KEY('ArgIndex', 'key.argindex'),
KEY('Text', 'key.text'),
KEY('Category', 'key.category'),
KEY('IsFunctionLike', 'key.is_function_like'),
KEY('IsNonProtocolType', 'key.is_non_protocol_type'),
KEY('RefactorActions', 'key.refactor_actions'),
KEY('RetrieveRefactorActions', 'key.retrieve_refactor_actions'),
KEY('ActionUID', 'key.actionuid'),
KEY('ActionUnavailableReason', 'key.actionunavailablereason'),
KEY('CompileID', 'key.compileid'),
KEY('CompilerArgsString', 'key.compilerargs-string'),
KEY('ImplicitMembers', 'key.implicitmembers'),
KEY('ExpectedTypes', 'key.expectedtypes'),
KEY('Members', 'key.members'),
KEY('TypeBuffer', 'key.printedtypebuffer'),
KEY('ExpressionTypeList', 'key.expression_type_list'),
KEY('ExpressionOffset', 'key.expression_offset'),
KEY('ExpressionLength', 'key.expression_length'),
KEY('ExpressionType', 'key.expression_type'),
KEY('CanonicalizeType', 'key.canonicalize_type'),
KEY('InternalDiagnostic', "key.internal_diagnostic"),
KEY('VFSName', 'key.vfs.name'),
KEY('VFSOptions', 'key.vfs.options'),
KEY('Files', 'key.files'),
]
UID_REQUESTS = [
REQUEST('ProtocolVersion', 'source.request.protocol_version'),
REQUEST('CompilerVersion', 'source.request.compiler_version'),
REQUEST('CrashWithExit', 'source.request.crash_exit'),
REQUEST('Demangle', 'source.request.demangle'),
REQUEST('MangleSimpleClass', 'source.request.mangle_simple_class'),
REQUEST('Index', 'source.request.indexsource'),
REQUEST('DocInfo', 'source.request.docinfo'),
REQUEST('CodeComplete', 'source.request.codecomplete'),
REQUEST('CodeCompleteOpen', 'source.request.codecomplete.open'),
REQUEST('CodeCompleteClose', 'source.request.codecomplete.close'),
REQUEST('CodeCompleteUpdate', 'source.request.codecomplete.update'),
REQUEST('CodeCompleteCacheOnDisk',
'source.request.codecomplete.cache.ondisk'),
REQUEST('CodeCompleteSetPopularAPI',
'source.request.codecomplete.setpopularapi'),
REQUEST('CodeCompleteSetCustom', 'source.request.codecomplete.setcustom'),
REQUEST('TypeContextInfo', 'source.request.typecontextinfo'),
REQUEST('ConformingMethodList', 'source.request.conformingmethods'),
REQUEST('CursorInfo', 'source.request.cursorinfo'),
REQUEST('RangeInfo', 'source.request.rangeinfo'),
REQUEST('RelatedIdents', 'source.request.relatedidents'),
REQUEST('EditorOpen', 'source.request.editor.open'),
REQUEST('EditorOpenInterface', 'source.request.editor.open.interface'),
REQUEST('EditorOpenHeaderInterface',
'source.request.editor.open.interface.header'),
REQUEST('EditorOpenSwiftSourceInterface',
'source.request.editor.open.interface.swiftsource'),
REQUEST('EditorOpenSwiftTypeInterface',
'source.request.editor.open.interface.swifttype'),
REQUEST('EditorExtractTextFromComment',
'source.request.editor.extract.comment'),
REQUEST('EditorClose', 'source.request.editor.close'),
REQUEST('EditorReplaceText', 'source.request.editor.replacetext'),
REQUEST('EditorFormatText', 'source.request.editor.formattext'),
REQUEST('EditorExpandPlaceholder',
'source.request.editor.expand_placeholder'),
REQUEST('EditorFindUSR', 'source.request.editor.find_usr'),
REQUEST('EditorFindInterfaceDoc',
'source.request.editor.find_interface_doc'),
REQUEST('BuildSettingsRegister', 'source.request.buildsettings.register'),
REQUEST('ModuleGroups', 'source.request.module.groups'),
REQUEST('NameTranslation', 'source.request.name.translation'),
REQUEST('MarkupToXML', 'source.request.convert.markup.xml'),
REQUEST('Statistics', 'source.request.statistics'),
REQUEST('SyntacticRename', 'source.request.syntacticrename'),
REQUEST('FindRenameRanges', 'source.request.find-syntactic-rename-ranges'),
REQUEST('FindLocalRenameRanges',
'source.request.find-local-rename-ranges'),
REQUEST('SemanticRefactoring', 'source.request.semantic.refactoring'),
REQUEST('EnableCompileNotifications',
'source.request.enable-compile-notifications'),
REQUEST('TestNotification', 'source.request.test_notification'),
REQUEST('CollectExpressionType', 'source.request.expression.type'),
]
UID_KINDS = [
KIND('DeclFunctionFree', 'source.lang.swift.decl.function.free'),
KIND('RefFunctionFree', 'source.lang.swift.ref.function.free'),
KIND('DeclMethodInstance',
'source.lang.swift.decl.function.method.instance'),
KIND('RefMethodInstance',
'source.lang.swift.ref.function.method.instance'),
KIND('DeclMethodStatic', 'source.lang.swift.decl.function.method.static'),
KIND('RefMethodStatic', 'source.lang.swift.ref.function.method.static'),
KIND('DeclMethodClass', 'source.lang.swift.decl.function.method.class'),
KIND('RefMethodClass', 'source.lang.swift.ref.function.method.class'),
KIND('DeclAccessorGetter',
'source.lang.swift.decl.function.accessor.getter'),
KIND('RefAccessorGetter',
'source.lang.swift.ref.function.accessor.getter'),
KIND('DeclAccessorSetter',
'source.lang.swift.decl.function.accessor.setter'),
KIND('RefAccessorSetter',
'source.lang.swift.ref.function.accessor.setter'),
KIND('DeclAccessorWillSet',
'source.lang.swift.decl.function.accessor.willset'),
KIND('RefAccessorWillSet',
'source.lang.swift.ref.function.accessor.willset'),
KIND('DeclAccessorDidSet',
'source.lang.swift.decl.function.accessor.didset'),
KIND('RefAccessorDidSet',
'source.lang.swift.ref.function.accessor.didset'),
KIND('DeclAccessorAddress',
'source.lang.swift.decl.function.accessor.address'),
KIND('RefAccessorAddress',
'source.lang.swift.ref.function.accessor.address'),
KIND('DeclAccessorMutableAddress',
'source.lang.swift.decl.function.accessor.mutableaddress'),
KIND('RefAccessorMutableAddress',
'source.lang.swift.ref.function.accessor.mutableaddress'),
KIND('DeclAccessorRead',
'source.lang.swift.decl.function.accessor.read'),
KIND('RefAccessorRead',
'source.lang.swift.ref.function.accessor.read'),
KIND('DeclAccessorModify',
'source.lang.swift.decl.function.accessor.modify'),
KIND('RefAccessorModify',
'source.lang.swift.ref.function.accessor.modify'),
KIND('DeclConstructor', 'source.lang.swift.decl.function.constructor'),
KIND('RefConstructor', 'source.lang.swift.ref.function.constructor'),
KIND('DeclDestructor', 'source.lang.swift.decl.function.destructor'),
KIND('RefDestructor', 'source.lang.swift.ref.function.destructor'),
KIND('DeclFunctionPrefixOperator',
'source.lang.swift.decl.function.operator.prefix'),
KIND('DeclFunctionPostfixOperator',
'source.lang.swift.decl.function.operator.postfix'),
KIND('DeclFunctionInfixOperator',
'source.lang.swift.decl.function.operator.infix'),
KIND('RefFunctionPrefixOperator',
'source.lang.swift.ref.function.operator.prefix'),
KIND('RefFunctionPostfixOperator',
'source.lang.swift.ref.function.operator.postfix'),
KIND('RefFunctionInfixOperator',
'source.lang.swift.ref.function.operator.infix'),
KIND('DeclPrecedenceGroup', 'source.lang.swift.decl.precedencegroup'),
KIND('RefPrecedenceGroup', 'source.lang.swift.ref.precedencegroup'),
KIND('DeclSubscript', 'source.lang.swift.decl.function.subscript'),
KIND('RefSubscript', 'source.lang.swift.ref.function.subscript'),
KIND('DeclVarGlobal', 'source.lang.swift.decl.var.global'),
KIND('RefVarGlobal', 'source.lang.swift.ref.var.global'),
KIND('DeclVarInstance', 'source.lang.swift.decl.var.instance'),
KIND('RefVarInstance', 'source.lang.swift.ref.var.instance'),
KIND('DeclVarStatic', 'source.lang.swift.decl.var.static'),
KIND('RefVarStatic', 'source.lang.swift.ref.var.static'),
KIND('DeclVarClass', 'source.lang.swift.decl.var.class'),
KIND('RefVarClass', 'source.lang.swift.ref.var.class'),
KIND('DeclVarLocal', 'source.lang.swift.decl.var.local'),
KIND('RefVarLocal', 'source.lang.swift.ref.var.local'),
KIND('DeclVarParam', 'source.lang.swift.decl.var.parameter'),
KIND('DeclModule', 'source.lang.swift.decl.module'),
KIND('DeclClass', 'source.lang.swift.decl.class'),
KIND('RefClass', 'source.lang.swift.ref.class'),
KIND('DeclStruct', 'source.lang.swift.decl.struct'),
KIND('RefStruct', 'source.lang.swift.ref.struct'),
KIND('DeclEnum', 'source.lang.swift.decl.enum'),
KIND('RefEnum', 'source.lang.swift.ref.enum'),
KIND('DeclEnumCase', 'source.lang.swift.decl.enumcase'),
KIND('DeclEnumElement', 'source.lang.swift.decl.enumelement'),
KIND('RefEnumElement', 'source.lang.swift.ref.enumelement'),
KIND('DeclProtocol', 'source.lang.swift.decl.protocol'),
KIND('RefProtocol', 'source.lang.swift.ref.protocol'),
KIND('DeclExtension', 'source.lang.swift.decl.extension'),
KIND('DeclExtensionStruct', 'source.lang.swift.decl.extension.struct'),
KIND('DeclExtensionClass', 'source.lang.swift.decl.extension.class'),
KIND('DeclExtensionEnum', 'source.lang.swift.decl.extension.enum'),
KIND('DeclExtensionProtocol', 'source.lang.swift.decl.extension.protocol'),
KIND('DeclAssociatedType', 'source.lang.swift.decl.associatedtype'),
KIND('RefAssociatedType', 'source.lang.swift.ref.associatedtype'),
KIND('DeclOpaqueType', 'source.lang.swift.decl.opaquetype'),
KIND('RefOpaqueType', 'source.lang.swift.ref.opaquetype'),
KIND('DeclTypeAlias', 'source.lang.swift.decl.typealias'),
KIND('RefTypeAlias', 'source.lang.swift.ref.typealias'),
KIND('DeclGenericTypeParam', 'source.lang.swift.decl.generic_type_param'),
KIND('RefGenericTypeParam', 'source.lang.swift.ref.generic_type_param'),
KIND('RefModule', 'source.lang.swift.ref.module'),
KIND('StmtForEach', 'source.lang.swift.stmt.foreach'),
KIND('StmtFor', 'source.lang.swift.stmt.for'),
KIND('StmtWhile', 'source.lang.swift.stmt.while'),
KIND('StmtRepeatWhile', 'source.lang.swift.stmt.repeatwhile'),
KIND('StmtIf', 'source.lang.swift.stmt.if'),
KIND('StmtGuard', 'source.lang.swift.stmt.guard'),
KIND('StmtSwitch', 'source.lang.swift.stmt.switch'),
KIND('StmtCase', 'source.lang.swift.stmt.case'),
KIND('StmtBrace', 'source.lang.swift.stmt.brace'),
KIND('ExprCall', 'source.lang.swift.expr.call'),
KIND('ExprArg', 'source.lang.swift.expr.argument'),
KIND('ExprArray', 'source.lang.swift.expr.array'),
KIND('ExprDictionary', 'source.lang.swift.expr.dictionary'),
KIND('ExprObjectLiteral', 'source.lang.swift.expr.object_literal'),
KIND('ExprTuple', 'source.lang.swift.expr.tuple'),
KIND('ExprClosure', 'source.lang.swift.expr.closure'),
KIND('StructureElemId', 'source.lang.swift.structure.elem.id'),
KIND('StructureElemExpr', 'source.lang.swift.structure.elem.expr'),
KIND('StructureElemInitExpr',
'source.lang.swift.structure.elem.init_expr'),
KIND('StructureElemCondExpr',
'source.lang.swift.structure.elem.condition_expr'),
KIND('StructureElemPattern', 'source.lang.swift.structure.elem.pattern'),
KIND('StructureElemTypeRef', 'source.lang.swift.structure.elem.typeref'),
KIND('RangeSingleStatement', 'source.lang.swift.range.singlestatement'),
KIND('RangeSingleExpression', 'source.lang.swift.range.singleexpression'),
KIND('RangeSingleDeclaration',
'source.lang.swift.range.singledeclaration'),
KIND('RangeMultiStatement', 'source.lang.swift.range.multistatement'),
KIND('RangeMultiTypeMemberDeclaration',
'source.lang.swift.range.multitypememberdeclaration'),
KIND('RangeInvalid', 'source.lang.swift.range.invalid'),
KIND('NameObjc', 'source.lang.name.kind.objc'),
KIND('NameSwift', 'source.lang.name.kind.swift'),
KIND('Keyword', 'source.lang.swift.syntaxtype.keyword'),
KIND('Identifier', 'source.lang.swift.syntaxtype.identifier'),
KIND('TypeIdentifier', 'source.lang.swift.syntaxtype.typeidentifier'),
KIND('BuildConfigKeyword',
'source.lang.swift.syntaxtype.buildconfig.keyword'),
KIND('BuildConfigId', 'source.lang.swift.syntaxtype.buildconfig.id'),
KIND('PoundDirectiveKeyword',
'source.lang.swift.syntaxtype.pounddirective.keyword'),
KIND('AttributeId', 'source.lang.swift.syntaxtype.attribute.id'),
KIND('AttributeBuiltin', 'source.lang.swift.syntaxtype.attribute.builtin'),
KIND('Number', 'source.lang.swift.syntaxtype.number'),
KIND('String', 'source.lang.swift.syntaxtype.string'),
KIND('StringInterpolation',
'source.lang.swift.syntaxtype.string_interpolation_anchor'),
KIND('Comment', 'source.lang.swift.syntaxtype.comment'),
KIND('DocComment', 'source.lang.swift.syntaxtype.doccomment'),
KIND('DocCommentField', 'source.lang.swift.syntaxtype.doccomment.field'),
KIND('CommentMarker', 'source.lang.swift.syntaxtype.comment.mark'),
KIND('CommentURL', 'source.lang.swift.syntaxtype.comment.url'),
KIND('Placeholder', 'source.lang.swift.syntaxtype.placeholder'),
KIND('ObjectLiteral', 'source.lang.swift.syntaxtype.objectliteral'),
KIND('Expr', 'source.lang.swift.expr'),
KIND('Stmt', 'source.lang.swift.stmt'),
KIND('Type', 'source.lang.swift.type'),
KIND('ForEachSequence', 'source.lang.swift.foreach.sequence'),
KIND('DiagNote', 'source.diagnostic.severity.note'),
KIND('DiagWarning', 'source.diagnostic.severity.warning'),
KIND('DiagError', 'source.diagnostic.severity.error'),
KIND('CodeCompletionEverything', 'source.codecompletion.everything'),
KIND('CodeCompletionModule', 'source.codecompletion.module'),
KIND('CodeCompletionKeyword', 'source.codecompletion.keyword'),
KIND('CodeCompletionLiteral', 'source.codecompletion.literal'),
KIND('CodeCompletionCustom', 'source.codecompletion.custom'),
KIND('CodeCompletionIdentifier', 'source.codecompletion.identifier'),
KIND('CodeCompletionDescription', 'source.codecompletion.description'),
KIND('EditActive', 'source.edit.kind.active'),
KIND('EditInactive', 'source.edit.kind.inactive'),
KIND('EditSelector', 'source.edit.kind.selector'),
KIND('EditString', 'source.edit.kind.string'),
KIND('EditComment', 'source.edit.kind.comment'),
KIND('EditMismatch', 'source.edit.kind.mismatch'),
KIND('EditUnknown', 'source.edit.kind.unknown'),
KIND('RenameRangeBase', 'source.refactoring.range.kind.basename'),
KIND('RenameRangeKeywordBase',
'source.refactoring.range.kind.keyword-basename'),
KIND('RenameRangeParam',
'source.refactoring.range.kind.parameter-and-whitespace'),
KIND('RenameRangeNoncollapsibleParam',
'source.refactoring.range.kind.noncollapsible-parameter'),
KIND('RenameRangeDeclArgLabel',
'source.refactoring.range.kind.decl-argument-label'),
KIND('RenameRangeCallArgLabel',
'source.refactoring.range.kind.call-argument-label'),
KIND('RenameRangeCallArgColon',
'source.refactoring.range.kind.call-argument-colon'),
KIND('RenameRangeCallArgCombined',
'source.refactoring.range.kind.call-argument-combined'),
KIND('RenameRangeSelectorArgLabel',
'source.refactoring.range.kind.selector-argument-label'),
KIND('Definition', 'source.syntacticrename.definition'),
KIND('Reference', 'source.syntacticrename.reference'),
KIND('Call', 'source.syntacticrename.call'),
KIND('Unknown', 'source.syntacticrename.unknown'),
KIND('StatNumRequests', 'source.statistic.num-requests'),
KIND('StatNumSemaRequests', 'source.statistic.num-semantic-requests'),
KIND('SyntaxTreeOff', 'source.syntaxtree.transfer.off'),
KIND('SyntaxTreeIncremental', 'source.syntaxtree.transfer.incremental'),
KIND('SyntaxTreeFull', 'source.syntaxtree.transfer.full'),
KIND('SyntaxTreeSerializationJSON',
'source.syntaxtree.serialization.format.json'),
KIND('SyntaxTreeSerializationByteTree',
'source.syntaxtree.serialization.format.bytetree'),
]
|
|
import logging, threading, sys, os, time, subprocess, string, tempfile, re, traceback, shutil
from galaxy import util, model, config
from galaxy.model import mapping
from galaxy.model.orm import lazyload
from galaxy.datatypes.tabular import *
from galaxy.datatypes.interval import *
from galaxy.datatypes import metadata
from galaxy.util.bunch import Bunch
from sqlalchemy import or_
import pkg_resources
pkg_resources.require( "PasteDeploy" )
from paste.deploy.converters import asbool
from Queue import Queue, Empty
log = logging.getLogger( __name__ )
uci_states = Bunch(
NEW_UCI = "newUCI",
NEW = "new",
CREATING = "creating",
DELETING_UCI = "deletingUCI",
DELETING = "deleting",
DELETED = "deleted",
SUBMITTED_UCI = "submittedUCI",
SUBMITTED = "submitted",
SHUTTING_DOWN_UCI = "shutting-downUCI",
SHUTTING_DOWN = "shutting-down",
ADD_STORAGE_UCI = "add-storageUCI",
ADD_STORAGE = "add-storage",
AVAILABLE = "available",
RUNNING = "running",
PENDING = "pending",
ERROR = "error",
SNAPSHOT_UCI = "snapshotUCI",
SNAPSHOT = "snapshot"
)
instance_states = Bunch(
TERMINATED = "terminated",
SUBMITTED = "submitted",
RUNNING = "running",
ADDING = "adding-storage",
PENDING = "pending",
SHUTTING_DOWN = "shutting-down",
ERROR = "error"
)
store_status = Bunch(
WAITING = "waiting",
IN_USE = "in-use",
ADDING = "adding",
CREATING = "creating",
DELETED = 'deleted',
ERROR = "error"
)
snapshot_status = Bunch(
SUBMITTED = 'submitted',
PENDING = 'pending',
COMPLETED = 'completed',
DELETE = 'delete',
DELETED= 'deleted',
ERROR = "error"
)
class CloudManager( object ):
"""
Highest level interface to cloud management.
"""
def __init__( self, app ):
self.app = app
self.sa_session = app.model.context
if self.app.config.enable_cloud_execution == True:
# The dispatcher manager for underlying cloud instances - implements and contacts individual cloud providers
self.provider = CloudProvider( app )
# Monitor for updating status of cloud instances
self.cloud_monitor = CloudMonitor( self.app, self.provider )
else:
self.job_queue = self.job_stop_queue = NoopCloudMonitor()
def shutdown( self ):
self.cloud_monitor.shutdown()
class Sleeper( object ):
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless*
the notify method is called (from a different thread).
"""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
class CloudMonitor( object ):
"""
Cloud manager, waits for user to instantiate a cloud instance and then invokes a
CloudProvider.
"""
STOP_SIGNAL = object()
def __init__( self, app, provider ):
"""Start the cloud manager"""
self.app = app
# Keep track of the pid that started the cloud manager, only it
# has valid threads
self.parent_pid = os.getpid()
self.sa_session = app.model.context
# Contains requests that are waiting (only use from monitor thread)
self.waiting = []
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.running = True
self.provider = provider
self.monitor_thread = threading.Thread( target=self.__monitor )
self.monitor_thread.start()
log.info( "Cloud manager started" )
def __monitor( self ):
"""
Daemon that continuously monitors cloud instance requests as well as state
of running instances.
"""
# HACK: Delay until after forking, we need a way to do post fork notification!!!
time.sleep( 10 )
cnt = 0 # Run global update only periodically so keep counter variable
while self.running:
try:
self.__monitor_step()
if cnt%30 == 0: # Run global update every 30 iterations (1 minute)
self.provider.update()
cnt = 0
except:
log.exception( "Exception in cloud manager monitor_step" )
# Sleep
cnt += 1
self.sleeper.sleep( 2 )
def __monitor_step( self ):
"""
Called repeatedly by `monitor` to process cloud instance requests.
TODO: Update following description to match the code
Gets any new cloud instance requests from the database, then iterates
over all new and waiting jobs to check the state of the jobs each
depends on. If the job has dependencies that have not finished, it
it goes to the waiting queue. If the job has dependencies with errors,
it is marked as having errors and removed from the queue. Otherwise,
the job is dispatched.
"""
model = self.app.model
new_requests = []
for r in self.sa_session.query( model.UCI ) \
.filter( or_( model.UCI.table.c.state==uci_states.NEW_UCI,
model.UCI.table.c.state==uci_states.SUBMITTED_UCI,
model.UCI.table.c.state==uci_states.SHUTTING_DOWN_UCI,
model.UCI.table.c.state==uci_states.DELETING_UCI,
model.UCI.table.c.state==uci_states.SNAPSHOT_UCI,
model.UCI.table.c.state==uci_states.ADD_STORAGE_UCI ) ) \
.all():
uci_wrapper = UCIwrapper( r, self.app )
new_requests.append( uci_wrapper )
for uci_wrapper in new_requests:
self.sa_session.expunge_all()
self.put( uci_wrapper )
def put( self, uci_wrapper ):
"""Add a request to the queue."""
self.provider.put( uci_wrapper )
self.sleeper.wake()
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real queue, do nothing
return
else:
log.info( "Sending stop signal to worker thread" )
self.running = False
self.sleeper.wake()
log.info( "cloud manager stopped" )
self.dispatcher.shutdown()
class UCIwrapper( object ):
"""
Wraps 'model.UCI' with convenience methods for state management
"""
def __init__( self, uci, app ):
self.uci_id = uci.id
self.app = app
self.sa_session = self.app.model.context
base_directory = os.path.join( self.app.config.job_working_directory, "cloud" )
self.working_directory = os.path.join( base_directory, str( self.uci_id ) )
# log.debug( "Cloud controller working directory for UCI DB ID '%s': '%s'" % ( self.uci_id, self.working_directory ) )
if not os.path.exists( base_directory ):
os.mkdir( base_directory )
# --------- Setter methods -----------------
def change_state( self, uci_state=None, instance_id=None, i_state=None ):
"""
Sets state for UCI and/or UCI's instance with instance_id as provided by cloud provider and stored in local
Galaxy database.
Need to provide either: (1) state for the UCI, or (2) instance_id and it's state, or (3) all arguments.
"""
# log.debug( "Changing state - new uci_state: %s, instance_id: %s, i_state: %s" % ( uci_state, instance_id, i_state ) )
if uci_state is not None:
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
uci.state = uci_state
self.sa_session.flush()
if ( instance_id is not None ) and ( i_state is not None ):
instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=instance_id).first()
instance.state = i_state
self.sa_session.add( instance )
self.sa_session.flush()
def set_mi( self, i_index, mi_id ):
"""
Sets Machine Image (MI), e.g., 'ami-66fa190f', for UCI's instance with given index as it
is stored in local Galaxy database.
"""
mi = self.sa_session.query( model.CloudImage ).filter( model.CloudImage.table.c.image_id==mi_id ).first()
instance = self.sa_session.query( model.CloudInstance ).get( i_index )
instance.image = mi
self.sa_session.add( instance )
self.sa_session.flush()
def set_key_pair( self, key_name, key_material=None ):
"""
Sets key pair value for current UCI.
"""
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
uci.key_pair_name = key_name
if key_material is not None:
uci.key_pair_material = key_material
self.sa_session.flush()
def set_instance_launch_time( self, launch_time, i_index=None, i_id=None ):
"""
Stores launch time in local database for instance with specified index - i_index (as it is stored in local
Galaxy database) or with specified instance ID - i_id (as obtained from the cloud provider AND stored
in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
"""
if i_index != None:
instance = self.sa_session.query( model.CloudInstance ).get( i_index )
elif i_id != None:
instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=i_id).first()
else:
return None
instance.launch_time = launch_time
self.sa_session.add( instance )
self.sa_session.flush()
def set_uci_launch_time( self, launch_time ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
uci.launch_time = launch_time
self.sa_session.add( uci )
self.sa_session.flush()
def set_stop_time( self, stop_time, i_index=None, i_id=None ):
"""
Stores stop time in local database for instance with specified index - i_index (as it is stored in local
Galaxy database) or with specified instance ID - i_id (as obtained from the cloud provider AND stored
in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
"""
if i_index != None:
instance = self.sa_session.query( model.CloudInstance ).get( i_index )
elif i_id != None:
instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=i_id).first()
else:
return None
instance.stop_time = stop_time
self.sa_session.add( instance )
self.sa_session.flush()
def set_security_group_name( self, security_group_name, i_index=None, i_id=None ):
"""
Stores security group name in local database for instance with specified index - i_index (as it is stored in local
Galaxy database) or with specified instance ID - i_id (as obtained from the cloud provider AND stored
in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
"""
if i_index != None:
instance = self.sa_session.query( model.CloudInstance ).get( i_index )
elif i_id != None:
instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=i_id).first()
else:
return None
instance.security_group = security_group_name
self.sa_session.add( instance )
self.sa_session.flush()
def set_reservation_id( self, i_index, reservation_id ):
instance = self.sa_session.query( model.CloudInstance ).get( i_index )
instance.reservation_id = reservation_id
self.sa_session.add( instance )
self.sa_session.flush()
def set_instance_id( self, i_index, instance_id ):
"""
i_index refers to UCI's instance ID as stored in local database
instance_id refers to real-world, cloud resource ID (e.g., 'i-78hd823a')
"""
instance = self.sa_session.query( model.CloudInstance ).get( i_index )
instance.instance_id = instance_id
self.sa_session.add( instance )
self.sa_session.flush()
# def set_public_dns( self, instance_id, public_dns ):
# uci = self.sa_session.query( model.UCI ).get( self.uci_id )
# self.sa_session.refresh( uci )
# uci.instance[instance_id].public_dns = public_dns
# uci.instance[instance_id].flush()
#
# def set_private_dns( self, instance_id, private_dns ):
# uci = self.sa_session.query( model.UCI ).get( self.uci_id )
# self.sa_session.refresh( uci )
# uci.instance[instance_id].private_dns = private_dns
# uci.instance[instance_id].flush()
def reset_uci_launch_time( self ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
uci.launch_time = None
self.sa_session.add( uci )
self.sa_session.flush()
def set_error( self, error, set_state=False ):
"""
Sets error field of given UCI in local Galaxy database as well as any instances associated with
this UCI whose state is 'None' or 'SUBMITTED'. If set_state is set to 'true',
method also sets state of give UCI and corresponding instances to 'error'
"""
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
uci.error = error
if set_state:
uci.state = uci_states.ERROR
# Process all instances associated with this UCI
instances = self.sa_session.query( model.CloudInstance ) \
.filter_by( uci=uci ) \
.filter( or_( model.CloudInstance.table.c.state==None, model.CloudInstance.table.c.state==instance_states.SUBMITTED ) ) \
.all()
for i in instances:
i.error = error
i.state = instance_states.ERROR
self.sa_session.add( i )
self.sa_session.flush()
self.sa_session.add( uci )
self.sa_session.flush()
def set_deleted( self ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
uci.state = uci_states.DELETED # for bookkeeping reasons, mark as deleted but don't actually delete.
uci.deleted = True
self.sa_session.add( uci )
self.sa_session.flush()
# def set_store_device( self, store_id, device ):
# uci = self.sa_session.query( model.UCI ).get( self.uci_id )
# self.sa_session.refresh( uci )
# uci.store[store_id].device = device
# uci.store[store_id].flush()
def set_uci_total_size( self, total_size ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
uci.total_size = total_size
self.sa_session.add( uci )
self.sa_session.flush()
def set_store_error( self, error, store_index=None, store_id=None ):
if store_index != None:
store = self.sa_session.query( model.CloudStore ).get( store_index )
elif store_id != None:
store = self.sa_session.query( model.CloudStore ).filter_by( volume_id = store_id ).first()
else:
return None
store.error = error
store.status = store_status.ERROR
self.sa_session.add( store )
self.sa_session.flush()
def set_store_status( self, vol_id, status ):
vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.volume_id == vol_id ).first()
vol.status = status
self.sa_session.add( vol )
self.sa_session.flush()
def set_store_availability_zone( self, availability_zone, vol_id=None ):
"""
Sets availability zone of storage volumes for either ALL volumes associated with current
UCI or for the volume whose volume ID (e.g., 'vol-39F80512') is provided as argument.
"""
if vol_id is not None:
vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.volume_id == vol_id ).all()
else:
vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.uci_id == self.uci_id ).all()
for v in vol:
v.availability_zone = availability_zone
self.sa_session.add( v )
self.sa_session.flush()
def set_store_volume_id( self, store_index, volume_id ):
"""
Given store index as it is stored in local database, set volume ID as it is registered
on the cloud provider (e.g., vol-39890501)
"""
if store_index != None:
store = self.sa_session.query( model.CloudStore ).get( store_index )
store.volume_id = volume_id
self.sa_session.add( store )
self.sa_session.flush()
else:
return None
# uci = self.sa_session.query( model.UCI ).get( self.uci_id )
# self.sa_session.refresh( uci )
# uci.store[store_index].volume_id = volume_id
# #uci.store[store_index].flush()
# self.sa_session.add( uci )
# self.sa_session.flush()
def set_store_instance( self, vol_id, instance_id ):
"""
Stores instance ID that given store volume is attached to. Store volume ID should
be given in following format: 'vol-78943248'
"""
vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.volume_id == vol_id ).first()
inst = self.sa_session.query( model.CloudInstance ).filter_by( instance_id=instance_id ).first()
vol.inst = inst
self.sa_session.add( vol )
self.sa_session.flush()
def set_store_device( self, vol_id, device ):
"""
Stores instance ID that given store volume is attached to. Store volume ID should
be given in following format: 'vol-78943248'
"""
vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.volume_id == vol_id ).first()
vol.device = str( device )
self.sa_session.add( vol )
self.sa_session.flush()
def set_store_deleted( self, vol_id, status=None ):
"""
Set storage volume as deleted in local database. Optionally, set the volume status too.
"""
vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.volume_id == vol_id ).first()
vol.deleted = True
if status != None:
vol.status = status
self.sa_session.add( vol )
self.sa_session.flush()
def set_snapshot_id( self, snap_index, id ):
snap = model.CloudSnapshot.get( snap_index )
snap.snapshot_id = id
self.sa_session.add( snap )
self.sa_session.flush()
def set_snapshot_status( self, status, snap_index=None, snap_id=None ):
if snap_index != None:
snap = self.sa_session.query( model.CloudSnapshot ).get( snap_index )
elif snap_id != None:
snap = self.sa_session.query( model.CloudSnapshot ).filter_by( snapshot_id = snap_id).first()
else:
return
snap.status = status
self.sa_session.add( snap )
self.sa_session.flush()
def set_snapshot_error( self, error, snap_index=None, snap_id=None, set_status=False ):
if snap_index != None:
snap = self.sa_session.query( model.CloudSnapshot ).get( snap_index )
elif snap_id != None:
snap = self.sa_session.query( model.CloudSnapshot ).filter_by( snapshot_id = snap_id).first()
else:
return
snap.error = error
if set_status:
snap.status = snapshot_status.ERROR
self.sa_session.add( snap )
self.sa_session.flush()
# --------- Getter methods -----------------
def get_provider_type( self ):
""" Returns type of cloud provider associated with given UCI. """
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.credentials.provider.type
def get_provider( self ):
""" Returns database object of cloud provider associated with credentials of given UCI. """
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.credentials.provider
def get_instance_type( self, i_index ):
instance = self.sa_session.query( model.CloudInstance ).get( i_index )
self.sa_session.refresh( instance )
return instance.type
def get_uci_state( self ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.state
def get_instances_indexes( self, state=None ):
"""
Returns indexes of instances associated with given UCI as they are stored in local Galaxy database and
whose state corresponds to passed argument. Returned values enable indexing instances from local Galaxy database.
"""
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
instances = self.sa_session.query( model.CloudInstance ) \
.filter_by( uci=uci ) \
.filter( model.CloudInstance.table.c.state==state ) \
.all()
il = []
for i in instances:
il.append( i.id )
return il
def get_instance_state( self, instance_id ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.instance[instance_id].state
def get_instaces_in_state( self, state ):
""" Get database objects of all instances associated with this UCI in given state. """
return self.sa_session.query( model.CloudInstance ) \
.filter_by( uci_id=self.uci_id, state = state ) \
.all()
def get_instances_ids( self ):
"""
Returns list IDs of all instances' associated with this UCI that are not in 'terminated' or
'error' but the state is defined (i.e., state is not None)
(e.g., return value: ['i-402906D2', 'i-q0290dsD2'] ).
"""
il = self.sa_session.query( model.CloudInstance ) \
.filter_by( uci_id=self.uci_id ) \
.filter( or_( model.CloudInstance.table.c.state != 'terminated',
model.CloudInstance.table.c.state != 'error',
model.CloudInstance.table.c.state != None ) ) \
.all()
instanceList = []
for i in il:
instanceList.append( i.instance_id )
return instanceList
def get_name( self ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.name
def get_key_pair_name( self ):
"""
Returns keypair name associated with given UCI.
"""
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.key_pair_name
def get_key_pair_material( self ):
"""
Returns keypair material (i.e., private key) associated with given UCI.
"""
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.key_pair_material
def get_security_group_name( self, i_index=None, i_id=None ):
"""
Given EITHER instance index as it is stored in local Galaxy database OR instance ID as it is
obtained from cloud provider and stored in local Galaxy database, return security group name associated
with given instance.
"""
if i_index != None:
instance = self.sa_session.query( model.CloudInstance ).get( i_index )
return instance.security_group
elif i_id != None:
instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=i_id).first()
return instance.security_group
def get_access_key( self ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.credentials.access_key
def get_secret_key( self ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.credentials.secret_key
def get_mi_id( self, instance_id=0 ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.instance[instance_id].image.image_id
def get_public_dns( self, instance_id=0 ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.instance[instance_id].public_dns
def get_private_dns( self, instance_id=0 ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.instance[instance_id].private_dns
def get_uci_availability_zone( self ):
"""
Returns UCI's availability zone.
Because all of storage volumes associated with a given UCI must be in the same
availability zone, availability of a UCI is determined by availability zone of
any one storage volume.
"""
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.store[0].availability_zone
def get_uci_total_size( self ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.total_size
def get_store_size( self, store_id=0 ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.store[store_id].size
def get_store_volume_id( self, store_id=0 ):
"""
Given store ID associated with this UCI, get volume ID as it is registered
on the cloud provider (e.g., 'vol-39890501')
"""
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.store[store_id].volume_id
def get_all_stores_in_status( self, status ):
"""
Return database objects of all stores associated with this UCI that have their
status set to value passed as parameter.
"""
return self.sa_session.query( model.CloudStore ).filter_by( deleted=False, uci_id=self.uci_id, status=status ).all()
def get_all_stores( self ):
""" Returns all storage volumes' database objects associated with this UCI that have not been marked as 'deleted'. """
return self.sa_session.query( model.CloudStore ) \
.filter_by( deleted=False, uci_id=self.uci_id ) \
.all()
def get_snapshots( self, status=None ):
""" Returns database objects for all snapshots associated with this UCI and in given status."""
return self.sa_session.query( model.CloudSnapshot ).filter_by( uci_id=self.uci_id, status=status ).all()
def get_uci( self ):
""" Returns database object for given UCI. """
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci
def get_uci_working_directory( self ):
return self.working_directory
def uci_launch_time_set( self ):
uci = self.sa_session.query( model.UCI ).get( self.uci_id )
self.sa_session.refresh( uci )
return uci.launch_time
class CloudProvider( object ):
def __init__( self, app ):
import providers.eucalyptus
import providers.ec2
self.app = app
self.cloud_provider = {}
self.cloud_provider["eucalyptus"] = providers.eucalyptus.EucalyptusCloudProvider( app )
self.cloud_provider["ec2"] = providers.ec2.EC2CloudProvider( app )
def put( self, uci_wrapper ):
""" Put given request for UCI manipulation into provider's request queue."""
self.cloud_provider[uci_wrapper.get_provider_type()].put( uci_wrapper )
def update( self ):
"""
Runs a global status update across all providers for all UCIs in state other than 'terminated' and 'available'.
Reason behind this method is to sync state of local DB and real world resources.
"""
for provider in self.cloud_provider.keys():
# log.debug( "Running global update for provider: '%s'" % provider )
self.cloud_provider[provider].update()
def shutdown( self ):
for runner in self.cloud_provider.itervalues():
runner.shutdown()
class NoopCloudMonitor( object ):
"""
Implements the CloudMonitor interface but does nothing
"""
def put( self, *args ):
return
def shutdown( self ):
return
|
|
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off,
rrr, profile) = __common__.init(__name__, '[guitools]')
# Python
from os.path import split
import sys
#import warnings
# Science
import numpy as np
# Qt
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
# HotSpotter
from hscom import fileio as io
from hscom import helpers
from hscom import helpers as util
from hsviz import draw_func2 as df2
IS_INIT = False
QAPP = None
IS_ROOT = False
DISABLE_NODRAW = False
DEBUG = False
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
def configure_matplotlib():
import multiprocessing
import matplotlib
mplbackend = matplotlib.get_backend()
if multiprocessing.current_process().name == 'MainProcess':
print('[*guitools] current mplbackend is: %r' % mplbackend)
print('[*guitools] matplotlib.use(Qt4Agg)')
else:
return
matplotlib.rcParams['toolbar'] = 'toolbar2'
matplotlib.rc('text', usetex=False)
#matplotlib.rcParams['text'].usetex = False
if mplbackend != 'Qt4Agg':
matplotlib.use('Qt4Agg', warn=True, force=True)
mplbackend = matplotlib.get_backend()
if multiprocessing.current_process().name == 'MainProcess':
print('[*guitools] current mplbackend is: %r' % mplbackend)
#matplotlib.rcParams['toolbar'] = 'None'
#matplotlib.rcParams['interactive'] = True
#---------------
# SLOT DECORATORS
def slot_(*types, **kwargs_): # This is called at wrap time to get args
'''
wrapper around pyqtslot decorator
*args = types
kwargs_['initdbg']
kwargs_['rundbg']
'''
initdbg = kwargs_.get('initdbg', DEBUG)
rundbg = kwargs_.get('rundbg', DEBUG)
# Wrap with debug statments
def pyqtSlotWrapper(func):
func_name = func.func_name
if initdbg:
print('[@guitools] Wrapping %r with slot_' % func.func_name)
if rundbg:
@QtCore.pyqtSlot(*types, name=func.func_name)
def slot_wrapper(self, *args, **kwargs):
argstr_list = map(str, args)
kwastr_list = ['%s=%s' % item for item in kwargs.iteritems()]
argstr = ', '.join(argstr_list + kwastr_list)
print('[**slot_.Begining] %s(%s)' % (func_name, argstr))
#with helpers.Indenter():
result = func(self, *args, **kwargs)
print('[**slot_.Finished] %s(%s)' % (func_name, argstr))
return result
else:
@QtCore.pyqtSlot(*types, name=func.func_name)
def slot_wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
return result
slot_wrapper.func_name = func_name
return slot_wrapper
return pyqtSlotWrapper
#/SLOT DECORATOR
#---------------
# BLOCKING DECORATOR
# TODO: This decorator has to be specific to either front or back. Is there a
# way to make it more general?
def backblocking(func):
#printDBG('[@guitools] Wrapping %r with backblocking' % func.func_name)
def block_wrapper(back, *args, **kwargs):
#print('[guitools] BLOCKING')
wasBlocked_ = back.front.blockSignals(True)
try:
result = func(back, *args, **kwargs)
except Exception as ex:
back.front.blockSignals(wasBlocked_)
print('Block wrapper caugt exception in %r' % func.func_name)
print('back = %r' % back)
VERBOSE = False
if VERBOSE:
print('*args = %r' % (args,))
print('**kwargs = %r' % (kwargs,))
#print('ex = %r' % ex)
import traceback
print(traceback.format_exc())
#back.user_info('Error in blocking ex=%r' % ex)
back.user_info('Error while blocking gui:\nex=%r' % ex)
raise
back.front.blockSignals(wasBlocked_)
#print('[guitools] UNBLOCKING')
return result
block_wrapper.func_name = func.func_name
return block_wrapper
def frontblocking(func):
# HACK: blocking2 is specific to fron
#printDBG('[@guitools] Wrapping %r with frontblocking' % func.func_name)
def block_wrapper(front, *args, **kwargs):
#print('[guitools] BLOCKING')
#wasBlocked = self.blockSignals(True)
wasBlocked_ = front.blockSignals(True)
try:
result = func(front, *args, **kwargs)
except Exception as ex:
front.blockSignals(wasBlocked_)
print('Block wrapper caught exception in %r' % func.func_name)
print('front = %r' % front)
VERBOSE = False
if VERBOSE:
print('*args = %r' % (args,))
print('**kwargs = %r' % (kwargs,))
#print('ex = %r' % ex)
front.user_info('Error in blocking ex=%r' % ex)
raise
front.blockSignals(wasBlocked_)
#print('[guitools] UNBLOCKING')
return result
block_wrapper.func_name = func.func_name
return block_wrapper
# DRAWING DECORATOR
def drawing(func):
'Wraps a class function and draws windows on completion'
#printDBG('[@guitools] Wrapping %r with drawing' % func.func_name)
@util.indent_decor('[drawing]')
def drawing_wrapper(self, *args, **kwargs):
#print('[guitools] DRAWING')
result = func(self, *args, **kwargs)
#print('[guitools] DONE DRAWING')
if kwargs.get('dodraw', True) or DISABLE_NODRAW:
df2.draw()
return result
drawing_wrapper.func_name = func.func_name
return drawing_wrapper
@profile
def select_orientation():
#from matplotlib.backend_bases import mplDeprecation
print('[*guitools] Define an orientation angle by clicking two points')
try:
# Compute an angle from user interaction
sys.stdout.flush()
fig = df2.gcf()
oldcbid, oldcbfn = df2.disconnect_callback(fig, 'button_press_event')
#with warnings.catch_warnings():
#warnings.filterwarnings("ignore", category=mplDeprecation)
pts = np.array(fig.ginput(2))
#print('[*guitools] ginput(2) = %r' % pts)
# Get reference point to origin
refpt = pts[1] - pts[0]
#theta = np.math.atan2(refpt[1], refpt[0])
theta = np.math.atan2(refpt[1], refpt[0])
print('The angle in radians is: %r' % theta)
df2.connect_callback(fig, 'button_press_event', oldcbfn)
return theta
except Exception as ex:
print('Annotate Orientation Failed %r' % ex)
return None
@profile
def select_roi():
#from matplotlib.backend_bases import mplDeprecation
print('[*guitools] Define a Rectanglular ROI by clicking two points.')
try:
sys.stdout.flush()
fig = df2.gcf()
# Disconnect any other button_press events
oldcbid, oldcbfn = df2.disconnect_callback(fig, 'button_press_event')
#with warnings.catch_warnings():
#warnings.filterwarnings("ignore", category=mplDeprecation)
pts = fig.ginput(2)
print('[*guitools] ginput(2) = %r' % (pts,))
[(x1, y1), (x2, y2)] = pts
xm = min(x1, x2)
xM = max(x1, x2)
ym = min(y1, y2)
yM = max(y1, y2)
xywh = map(int, map(round, (xm, ym, xM - xm, yM - ym)))
roi = np.array(xywh, dtype=np.int32)
# Reconnect the old button press events
df2.connect_callback(fig, 'button_press_event', oldcbfn)
print('[*guitools] roi = %r ' % (roi,))
return roi
except Exception as ex:
print('[*guitools] ROI selection Failed:\n%r' % (ex,))
return None
def _addOptions(msgBox, options):
#msgBox.addButton(QtGui.QMessageBox.Close)
for opt in options:
role = QtGui.QMessageBox.ApplyRole
msgBox.addButton(QtGui.QPushButton(opt), role)
def _cacheReply(msgBox):
dontPrompt = QtGui.QCheckBox('dont ask me again', parent=msgBox)
dontPrompt.blockSignals(True)
msgBox.addButton(dontPrompt, QtGui.QMessageBox.ActionRole)
return dontPrompt
def _newMsgBox(msg='', title='', parent=None, options=None, cache_reply=False):
msgBox = QtGui.QMessageBox(parent)
#msgBox.setAttribute(QtCore.Qt.WA_DeleteOnClose)
#std_buts = QtGui.QMessageBox.Close
#std_buts = QtGui.QMessageBox.NoButton
std_buts = QtGui.QMessageBox.Cancel
msgBox.setStandardButtons(std_buts)
msgBox.setWindowTitle(title)
msgBox.setText(msg)
msgBox.setModal(parent is not None)
return msgBox
@profile
def msgbox(msg, title='msgbox'):
'Make a non modal critical QtGui.QMessageBox.'
msgBox = QtGui.QMessageBox(None)
msgBox.setAttribute(QtCore.Qt.WA_DeleteOnClose)
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.setWindowTitle(title)
msgBox.setText(msg)
msgBox.setModal(False)
msgBox.open(msgBox.close)
msgBox.show()
return msgBox
def user_input(parent, msg, title='input dialog'):
reply, ok = QtGui.QInputDialog.getText(parent, title, msg)
if not ok:
return None
return str(reply)
def user_info(parent, msg, title='info'):
msgBox = _newMsgBox(msg, title, parent)
msgBox.setAttribute(QtCore.Qt.WA_DeleteOnClose)
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.setModal(False)
msgBox.open(msgBox.close)
msgBox.show()
@profile
def _user_option(parent, msg, title='options', options=['No', 'Yes'], use_cache=False):
'Prompts user with several options with ability to save decision'
print('[*guitools] _user_option:\n %r: %s' + title + ': ' + msg)
# Recall decision
print('[*guitools] asking user: %r %r' % (msg, title))
cache_id = helpers.hashstr(title + msg)
if use_cache:
reply = io.global_cache_read(cache_id, default=None)
if reply is not None:
return reply
# Create message box
msgBox = _newMsgBox(msg, title, parent)
_addOptions(msgBox, options)
if use_cache:
dontPrompt = _cacheReply(msgBox)
# Wait for output
optx = msgBox.exec_()
if optx == QtGui.QMessageBox.Cancel:
return None
try:
reply = options[optx]
except Exception as ex:
print('[*guitools] USER OPTION EXCEPTION !')
print('[*guitools] optx = %r' % optx)
print('[*guitools] options = %r' % options)
print('[*guitools] ex = %r' % ex)
raise
# Remember decision
if use_cache and dontPrompt.isChecked():
io.global_cache_write(cache_id, reply)
del msgBox
return reply
def user_question(msg):
msgBox = QtGui.QMessageBox.question(None, '', 'lovely day?')
return msgBox
def getQtImageNameFilter():
imgNamePat = ' '.join(['*' + ext for ext in helpers.IMG_EXTENSIONS])
imgNameFilter = 'Images (%s)' % (imgNamePat)
return imgNameFilter
@profile
def select_images(caption='Select images:', directory=None):
name_filter = getQtImageNameFilter()
return select_files(caption, directory, name_filter)
@profile
def select_files(caption='Select Files:', directory=None, name_filter=None):
'Selects one or more files from disk using a qt dialog'
print(caption)
if directory is None:
directory = io.global_cache_read('select_directory')
qdlg = QtGui.QFileDialog()
qfile_list = qdlg.getOpenFileNames(caption=caption, directory=directory, filter=name_filter)
file_list = map(str, qfile_list)
print('Selected %d files' % len(file_list))
io.global_cache_write('select_directory', directory)
return file_list
@profile
def select_directory(caption='Select Directory', directory=None):
print(caption)
if directory is None:
directory = io.global_cache_read('select_directory')
qdlg = QtGui.QFileDialog()
qopt = QtGui.QFileDialog.ShowDirsOnly
qdlg_kwargs = dict(caption=caption, options=qopt, directory=directory)
dpath = str(qdlg.getExistingDirectory(**qdlg_kwargs))
print('Selected Directory: %r' % dpath)
io.global_cache_write('select_directory', split(dpath)[0])
return dpath
@profile
def show_open_db_dlg(parent=None):
# OLD
from _frontend import OpenDatabaseDialog
if not '-nc' in sys.argv and not '--nocache' in sys.argv:
db_dir = io.global_cache_read('db_dir')
if db_dir == '.':
db_dir = None
print('[*guitools] cached db_dir=%r' % db_dir)
if parent is None:
parent = QtGui.QDialog()
opendb_ui = OpenDatabaseDialog.Ui_Dialog()
opendb_ui.setupUi(parent)
#opendb_ui.new_db_but.clicked.connect(create_new_database)
#opendb_ui.open_db_but.clicked.connect(open_old_database)
parent.show()
return opendb_ui, parent
@util.indent_decor('[qt-init]')
@profile
def init_qtapp():
global IS_INIT
global IS_ROOT
global QAPP
if QAPP is not None:
return QAPP, IS_ROOT
app = QtCore.QCoreApplication.instance()
is_root = app is None
if is_root: # if not in qtconsole
print('[*guitools] Initializing QApplication')
app = QtGui.QApplication(sys.argv)
QAPP = app
try:
__IPYTHON__
is_root = False
# You are not root if you are in IPYTHON
except NameError:
pass
IS_INIT = True
return app, is_root
@util.indent_decor('[qt-exit]')
@profile
def exit_application():
print('[*guitools] exiting application')
QtGui.qApp.quit()
@util.indent_decor('[qt-main]')
@profile
def run_main_loop(app, is_root=True, back=None, **kwargs):
if back is not None:
print('[*guitools] setting active window')
app.setActiveWindow(back.front)
back.timer = ping_python_interpreter(**kwargs)
if is_root:
exec_core_app_loop(app)
#exec_core_event_loop(app)
else:
print('[*guitools] using roots main loop')
@profile
def exec_core_event_loop(app):
# This works but does not allow IPython injection
print('[*guitools] running core application loop.')
try:
from IPython.lib.inputhook import enable_qt4
enable_qt4()
from IPython.lib.guisupport import start_event_loop_qt4
print('Starting ipython qt4 hook')
start_event_loop_qt4(app)
except ImportError:
pass
app.exec_()
@profile
def exec_core_app_loop(app):
# This works but does not allow IPython injection
print('[*guitools] running core application loop.')
app.exec_()
#sys.exit(app.exec_())
@profile
def ping_python_interpreter(frequency=4200): # 4200):
'Create a QTimer which lets the python catch ctrl+c'
timer = QtCore.QTimer()
timer.timeout.connect(lambda: None)
timer.start(frequency)
return timer
def make_dummy_main_window():
class DummyBackend(QtCore.QObject):
def __init__(self):
super(DummyBackend, self).__init__()
self.front = QtGui.QMainWindow()
self.front.setWindowTitle('Dummy Main Window')
self.front.show()
back = DummyBackend()
return back
def get_scope(qobj, scope_title='_scope_list'):
if not hasattr(qobj, scope_title):
setattr(qobj, scope_title, [])
return getattr(qobj, scope_title)
def clear_scope(qobj, scope_title='_scope_list'):
setattr(qobj, scope_title, [])
def enfore_scope(qobj, scoped_obj, scope_title='_scope_list'):
get_scope(qobj, scope_title).append(scoped_obj)
@profile
def popup_menu(widget, opt2_callback, parent=None):
def popup_slot(pos):
print(pos)
menu = QtGui.QMenu()
actions = [menu.addAction(opt, func) for opt, func in
iter(opt2_callback)]
#pos=QtGui.QCursor.pos()
selection = menu.exec_(widget.mapToGlobal(pos))
return selection, actions
if parent is not None:
# Make sure popup_slot does not lose scope.
for _slot in get_scope(parent, '_popup_scope'):
parent.customContextMenuRequested.disconnect(_slot)
clear_scope(parent, '_popup_scope')
parent.setContextMenuPolicy(Qt.CustomContextMenu)
parent.customContextMenuRequested.connect(popup_slot)
enfore_scope(parent, popup_slot, '_popup_scope')
return popup_slot
@profile
def make_header_lists(tbl_headers, editable_list, prop_keys=[]):
col_headers = tbl_headers[:] + prop_keys
col_editable = [False] * len(tbl_headers) + [True] * len(prop_keys)
for header in editable_list:
col_editable[col_headers.index(header)] = True
return col_headers, col_editable
|
|
"""
Timsort implementation. Mostly adapted from CPython's listobject.c.
For more information, see listsort.txt in CPython's source tree.
"""
from __future__ import print_function, absolute_import, division
import collections
from numba import types
TimsortImplementation = collections.namedtuple(
'TimsortImplementation',
(# The compile function itself
'compile',
# All subroutines exercised by test_sort
'count_run', 'binarysort', 'gallop_left', 'gallop_right',
'merge_init', 'merge_append', 'merge_pop',
'merge_compute_minrun', 'merge_lo', 'merge_hi', 'merge_at',
'merge_force_collapse', 'merge_collapse',
# The top-level functions
'run_timsort', 'run_timsort_with_values'
))
# The maximum number of entries in a MergeState's pending-runs stack.
# This is enough to sort arrays of size up to about
# 32 * phi ** MAX_MERGE_PENDING
# where phi ~= 1.618. 85 is ridiculously large enough, good for an array
# with 2**64 elements.
# NOTE this implementation doesn't depend on it (the stack is dynamically
# allocated), but it's still good to check as an invariant.
MAX_MERGE_PENDING = 85
# When we get into galloping mode, we stay there until both runs win less
# often than MIN_GALLOP consecutive times. See listsort.txt for more info.
MIN_GALLOP = 7
# Start size for temp arrays.
MERGESTATE_TEMP_SIZE = 256
# A mergestate is a named tuple with the following members:
# - *min_gallop* is an integer controlling when we get into galloping mode
# - *keys* is a temp list for merging keys
# - *values* is a temp list for merging values, if needed
# - *pending* is a stack of pending runs to be merged
# - *n* is the current stack length of *pending*
MergeState = collections.namedtuple(
'MergeState', ('min_gallop', 'keys', 'values', 'pending', 'n'))
MergeRun = collections.namedtuple('MergeRun', ('start', 'size'))
def make_timsort_impl(wrap, make_temp_area):
make_temp_area = wrap(make_temp_area)
intp = types.intp
zero = intp(0)
@wrap
def has_values(keys, values):
return values is not keys
@wrap
def merge_init(keys):
"""
Initialize a MergeState for a non-keyed sort.
"""
temp_size = min(len(keys) // 2 + 1, MERGESTATE_TEMP_SIZE)
temp_keys = make_temp_area(keys, temp_size)
temp_values = temp_keys
pending = [MergeRun(zero, zero)] * MAX_MERGE_PENDING
return MergeState(intp(MIN_GALLOP), temp_keys, temp_values, pending, zero)
@wrap
def merge_init_with_values(keys, values):
"""
Initialize a MergeState for a keyed sort.
"""
temp_size = min(len(keys) // 2 + 1, MERGESTATE_TEMP_SIZE)
temp_keys = make_temp_area(keys, temp_size)
temp_values = make_temp_area(values, temp_size)
pending = [MergeRun(zero, zero)] * MAX_MERGE_PENDING
return MergeState(intp(MIN_GALLOP), temp_keys, temp_values, pending, zero)
@wrap
def merge_append(ms, run):
"""
Append a run on the merge stack.
"""
n = ms.n
assert n < MAX_MERGE_PENDING
ms.pending[n] = run
return MergeState(ms.min_gallop, ms.keys, ms.values, ms.pending, n + 1)
@wrap
def merge_pop(ms):
"""
Pop the top run from the merge stack.
"""
return MergeState(ms.min_gallop, ms.keys, ms.values, ms.pending, ms.n - 1)
@wrap
def merge_getmem(ms, need):
"""
Ensure enough temp memory for 'need' items is available.
"""
alloced = len(ms.keys)
if need <= alloced:
return ms
# Over-allocate
while alloced < need:
alloced = alloced << 1
# Don't realloc! That can cost cycles to copy the old data, but
# we don't care what's in the block.
temp_keys = make_temp_area(ms.keys, alloced)
if has_values(ms.keys, ms.values):
temp_values = make_temp_area(ms.values, alloced)
else:
temp_values = temp_keys
return MergeState(ms.min_gallop, temp_keys, temp_values, ms.pending, ms.n)
@wrap
def merge_adjust_gallop(ms, new_gallop):
"""
Modify the MergeState's min_gallop.
"""
return MergeState(intp(new_gallop), ms.keys, ms.values, ms.pending, ms.n)
@wrap
def LT(a, b):
"""
Trivial comparison function between two keys. This is factored out to
make it clear where comparisons occur.
"""
return a < b
@wrap
def binarysort(keys, values, lo, hi, start):
"""
binarysort is the best method for sorting small arrays: it does
few compares, but can do data movement quadratic in the number of
elements.
[lo, hi) is a contiguous slice of a list, and is sorted via
binary insertion. This sort is stable.
On entry, must have lo <= start <= hi, and that [lo, start) is already
sorted (pass start == lo if you don't know!).
"""
assert lo <= start and start <= hi
_has_values = has_values(keys, values)
if lo == start:
start += 1
while start < hi:
pivot = keys[start]
# Bisect to find where to insert `pivot`
# NOTE: bisection only wins over linear search if the comparison
# function is much more expensive than simply moving data.
l = lo
r = start
# Invariants:
# pivot >= all in [lo, l).
# pivot < all in [r, start).
# The second is vacuously true at the start.
while l < r:
p = l + ((r - l) >> 1)
if LT(pivot, keys[p]):
r = p
else:
l = p+1
# The invariants still hold, so pivot >= all in [lo, l) and
# pivot < all in [l, start), so pivot belongs at l. Note
# that if there are elements equal to pivot, l points to the
# first slot after them -- that's why this sort is stable.
# Slide over to make room (aka memmove()).
for p in range(start, l, -1):
keys[p] = keys[p - 1]
keys[l] = pivot
if _has_values:
pivot_val = values[start]
for p in range(start, l, -1):
values[p] = values[p - 1]
values[l] = pivot_val
start += 1
@wrap
def count_run(keys, lo, hi):
"""
Return the length of the run beginning at lo, in the slice [lo, hi).
lo < hi is required on entry. "A run" is the longest ascending sequence, with
lo[0] <= lo[1] <= lo[2] <= ...
or the longest descending sequence, with
lo[0] > lo[1] > lo[2] > ...
A tuple (length, descending) is returned, where boolean *descending*
is set to 0 in the former case, or to 1 in the latter.
For its intended use in a stable mergesort, the strictness of the defn of
"descending" is needed so that the caller can safely reverse a descending
sequence without violating stability (strict > ensures there are no equal
elements to get out of order).
"""
assert lo < hi
if lo + 1 == hi:
# Trivial 1-long run
return 1, False
if LT(keys[lo + 1], keys[lo]):
# Descending run
for k in range(lo + 2, hi):
if not LT(keys[k], keys[k - 1]):
return k - lo, True
return hi - lo, True
else:
# Ascending run
for k in range(lo + 2, hi):
if LT(keys[k], keys[k - 1]):
return k - lo, False
return hi - lo, False
@wrap
def gallop_left(key, a, start, stop, hint):
"""
Locate the proper position of key in a sorted vector; if the vector contains
an element equal to key, return the position immediately to the left of
the leftmost equal element. [gallop_right() does the same except returns
the position to the right of the rightmost equal element (if any).]
"a" is a sorted vector with stop elements, starting at a[start].
stop must be > start.
"hint" is an index at which to begin the search, start <= hint < stop.
The closer hint is to the final result, the faster this runs.
The return value is the int k in start..stop such that
a[k-1] < key <= a[k]
pretending that a[start-1] is minus infinity and a[stop] is plus infinity.
IOW, key belongs at index k; or, IOW, the first k elements of a should
precede key, and the last stop-start-k should follow key.
See listsort.txt for info on the method.
"""
assert stop > start
assert hint >= start and hint < stop
n = stop - start
# First, gallop from the hint to find a "good" subinterval for bisecting
lastofs = 0
ofs = 1
if LT(a[hint], key):
# a[hint] < key => gallop right, until
# a[hint + lastofs] < key <= a[hint + ofs]
maxofs = stop - hint
while ofs < maxofs:
if LT(a[hint + ofs], key):
lastofs = ofs
ofs = (ofs << 1) + 1
if ofs <= 0:
# Int overflow
ofs = maxofs
else:
# key <= a[hint + ofs]
break
if ofs > maxofs:
ofs = maxofs
# Translate back to offsets relative to a[0]
lastofs += hint
ofs += hint
else:
# key <= a[hint] => gallop left, until
# a[hint - ofs] < key <= a[hint - lastofs]
maxofs = hint - start + 1
while ofs < maxofs:
if LT(a[hint - ofs], key):
break
else:
# key <= a[hint - ofs]
lastofs = ofs
ofs = (ofs << 1) + 1
if ofs <= 0:
# Int overflow
ofs = maxofs
if ofs > maxofs:
ofs = maxofs
# Translate back to positive offsets relative to a[0]
lastofs, ofs = hint - ofs, hint - lastofs
assert start - 1 <= lastofs and lastofs < ofs and ofs <= stop
# Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
# right of lastofs but no farther right than ofs. Do a binary
# search, with invariant a[lastofs-1] < key <= a[ofs].
lastofs += 1
while lastofs < ofs:
m = lastofs + ((ofs - lastofs) >> 1)
if LT(a[m], key):
# a[m] < key
lastofs = m + 1
else:
# key <= a[m]
ofs = m
# Now lastofs == ofs, so a[ofs - 1] < key <= a[ofs]
return ofs
@wrap
def gallop_right(key, a, start, stop, hint):
"""
Exactly like gallop_left(), except that if key already exists in a[start:stop],
finds the position immediately to the right of the rightmost equal value.
The return value is the int k in start..stop such that
a[k-1] <= key < a[k]
The code duplication is massive, but this is enough different given that
we're sticking to "<" comparisons that it's much harder to follow if
written as one routine with yet another "left or right?" flag.
"""
assert stop > start
assert hint >= start and hint < stop
n = stop - start
# First, gallop from the hint to find a "good" subinterval for bisecting
lastofs = 0
ofs = 1
if LT(key, a[hint]):
# key < a[hint] => gallop left, until
# a[hint - ofs] <= key < a[hint - lastofs]
maxofs = hint - start + 1
while ofs < maxofs:
if LT(key, a[hint - ofs]):
lastofs = ofs
ofs = (ofs << 1) + 1
if ofs <= 0:
# Int overflow
ofs = maxofs
else:
# a[hint - ofs] <= key
break
if ofs > maxofs:
ofs = maxofs
# Translate back to positive offsets relative to a[0]
lastofs, ofs = hint - ofs, hint - lastofs
else:
# a[hint] <= key -- gallop right, until
# a[hint + lastofs] <= key < a[hint + ofs]
maxofs = stop - hint
while ofs < maxofs:
if LT(key, a[hint + ofs]):
break
else:
# a[hint + ofs] <= key
lastofs = ofs
ofs = (ofs << 1) + 1
if ofs <= 0:
# Int overflow
ofs = maxofs
if ofs > maxofs:
ofs = maxofs
# Translate back to offsets relative to a[0]
lastofs += hint
ofs += hint
assert start - 1 <= lastofs and lastofs < ofs and ofs <= stop
# Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
# right of lastofs but no farther right than ofs. Do a binary
# search, with invariant a[lastofs-1] <= key < a[ofs].
lastofs += 1
while lastofs < ofs:
m = lastofs + ((ofs - lastofs) >> 1)
if LT(key, a[m]):
# key < a[m]
ofs = m
else:
# a[m] <= key
lastofs = m + 1
# Now lastofs == ofs, so a[ofs - 1] <= key < a[ofs]
return ofs
@wrap
def merge_compute_minrun(n):
"""
Compute a good value for the minimum run length; natural runs shorter
than this are boosted artificially via binary insertion.
If n < 64, return n (it's too small to bother with fancy stuff).
Else if n is an exact power of 2, return 32.
Else return an int k, 32 <= k <= 64, such that n/k is close to, but
strictly less than, an exact power of 2.
See listsort.txt for more info.
"""
r = 0
assert n >= 0
while n >= 64:
r |= n & 1
n >>= 1
return n + r
@wrap
def sortslice_copy(dest_keys, dest_values, dest_start,
src_keys, src_values, src_start,
nitems):
"""
Upwards memcpy().
"""
assert src_start >= 0
assert dest_start >= 0
for i in range(nitems):
dest_keys[dest_start + i] = src_keys[src_start + i]
if has_values(src_keys, src_values):
for i in range(nitems):
dest_values[dest_start + i] = src_values[src_start + i]
@wrap
def sortslice_copy_down(dest_keys, dest_values, dest_start,
src_keys, src_values, src_start,
nitems):
"""
Downwards memcpy().
"""
assert src_start >= 0
assert dest_start >= 0
for i in range(nitems):
dest_keys[dest_start - i] = src_keys[src_start - i]
if has_values(src_keys, src_values):
for i in range(nitems):
dest_values[dest_start - i] = src_values[src_start - i]
# Disable this for debug or perf comparison
DO_GALLOP = 1
@wrap
def merge_lo(ms, keys, values, ssa, na, ssb, nb):
"""
Merge the na elements starting at ssa with the nb elements starting at
ssb = ssa + na in a stable way, in-place. na and nb must be > 0,
and should have na <= nb. See listsort.txt for more info.
An updated MergeState is returned (with possibly a different min_gallop
or larger temp arrays).
NOTE: compared to CPython's timsort, the requirement that
"Must also have that keys[ssa + na - 1] belongs at the end of the merge"
is removed. This makes the code a bit simpler and easier to reason about.
"""
assert na > 0 and nb > 0 and na <= nb
assert ssb == ssa + na
# First copy [ssa, ssa + na) into the temp space
ms = merge_getmem(ms, na)
sortslice_copy(ms.keys, ms.values, 0,
keys, values, ssa,
na)
a_keys = ms.keys
a_values = ms.values
b_keys = keys
b_values = values
dest = ssa
ssa = 0
_has_values = has_values(a_keys, a_values)
min_gallop = ms.min_gallop
# Now start merging into the space left from [ssa, ...)
while nb > 0 and na > 0:
# Do the straightforward thing until (if ever) one run
# appears to win consistently.
acount = 0
bcount = 0
while True:
if LT(b_keys[ssb], a_keys[ssa]):
keys[dest] = b_keys[ssb]
if _has_values:
values[dest] = b_values[ssb]
dest += 1
ssb += 1
nb -= 1
if nb == 0:
break
# It's a B run
bcount += 1
acount = 0
if bcount >= min_gallop:
break
else:
keys[dest] = a_keys[ssa]
if _has_values:
values[dest] = a_values[ssa]
dest += 1
ssa += 1
na -= 1
if na == 0:
break
# It's a A run
acount += 1
bcount = 0
if acount >= min_gallop:
break
# One run is winning so consistently that galloping may
# be a huge win. So try that, and continue galloping until
# (if ever) neither run appears to be winning consistently
# anymore.
if DO_GALLOP and na > 0 and nb > 0:
min_gallop += 1
while acount >= MIN_GALLOP or bcount >= MIN_GALLOP:
# As long as we gallop without leaving this loop, make
# the heuristic more likely
min_gallop -= min_gallop > 1
# Gallop in A to find where keys[ssb] should end up
k = gallop_right(b_keys[ssb], a_keys, ssa, ssa + na, ssa)
# k is an index, make it a size
k -= ssa
acount = k
if k > 0:
# Copy everything from A before k
sortslice_copy(keys, values, dest,
a_keys, a_values, ssa,
k)
dest += k
ssa += k
na -= k
if na == 0:
# Finished merging
break
# Copy keys[ssb]
keys[dest] = b_keys[ssb]
if _has_values:
values[dest] = b_values[ssb]
dest += 1
ssb += 1
nb -= 1
if nb == 0:
# Finished merging
break
# Gallop in B to find where keys[ssa] should end up
k = gallop_left(a_keys[ssa], b_keys, ssb, ssb + nb, ssb)
# k is an index, make it a size
k -= ssb
bcount = k
if k > 0:
# Copy everything from B before k
# NOTE: source and dest are the same buffer, but the
# destination index is below the source index
sortslice_copy(keys, values, dest,
b_keys, b_values, ssb,
k)
dest += k
ssb += k
nb -= k
if nb == 0:
# Finished merging
break
# Copy keys[ssa]
keys[dest] = a_keys[ssa]
if _has_values:
values[dest] = a_values[ssa]
dest += 1
ssa += 1
na -= 1
if na == 0:
# Finished merging
break
# Penalize it for leaving galloping mode
min_gallop += 1
# Merge finished, now handle the remaining areas
if nb == 0:
# Only A remaining to copy at the end of the destination area
sortslice_copy(keys, values, dest,
a_keys, a_values, ssa,
na)
else:
assert na == 0
assert dest == ssb
# B's tail is already at the right place, do nothing
return merge_adjust_gallop(ms, min_gallop)
@wrap
def merge_hi(ms, keys, values, ssa, na, ssb, nb):
"""
Merge the na elements starting at ssa with the nb elements starting at
ssb = ssa + na in a stable way, in-place. na and nb must be > 0,
and should have na >= nb. See listsort.txt for more info.
An updated MergeState is returned (with possibly a different min_gallop
or larger temp arrays).
NOTE: compared to CPython's timsort, the requirement that
"Must also have that keys[ssa + na - 1] belongs at the end of the merge"
is removed. This makes the code a bit simpler and easier to reason about.
"""
assert na > 0 and nb > 0 and na >= nb
assert ssb == ssa + na
# First copy [ssb, ssb + nb) into the temp space
ms = merge_getmem(ms, nb)
sortslice_copy(ms.keys, ms.values, 0,
keys, values, ssb,
nb)
a_keys = keys
a_values = values
b_keys = ms.keys
b_values = ms.values
# Now start merging *in descending order* into the space left
# from [..., ssb + nb).
dest = ssb + nb - 1
ssb = nb - 1
ssa = ssa + na - 1
_has_values = has_values(b_keys, b_values)
min_gallop = ms.min_gallop
while nb > 0 and na > 0:
# Do the straightforward thing until (if ever) one run
# appears to win consistently.
acount = 0
bcount = 0
while True:
if LT(b_keys[ssb], a_keys[ssa]):
# We merge in descending order, so copy the larger value
keys[dest] = a_keys[ssa]
if _has_values:
values[dest] = a_values[ssa]
dest -= 1
ssa -= 1
na -= 1
if na == 0:
break
# It's a A run
acount += 1
bcount = 0
if acount >= min_gallop:
break
else:
keys[dest] = b_keys[ssb]
if _has_values:
values[dest] = b_values[ssb]
dest -= 1
ssb -= 1
nb -= 1
if nb == 0:
break
# It's a B run
bcount += 1
acount = 0
if bcount >= min_gallop:
break
# One run is winning so consistently that galloping may
# be a huge win. So try that, and continue galloping until
# (if ever) neither run appears to be winning consistently
# anymore.
if DO_GALLOP and na > 0 and nb > 0:
min_gallop += 1
while acount >= MIN_GALLOP or bcount >= MIN_GALLOP:
# As long as we gallop without leaving this loop, make
# the heuristic more likely
min_gallop -= min_gallop > 1
# Gallop in A to find where keys[ssb] should end up
k = gallop_right(b_keys[ssb], a_keys, ssa - na + 1, ssa + 1, ssa)
# k is an index, make it a size from the end
k = ssa + 1 - k
acount = k
if k > 0:
# Copy everything from A after k.
# Destination and source are the same buffer, and destination
# index is greater, so copy from the end to the start.
sortslice_copy_down(keys, values, dest,
a_keys, a_values, ssa,
k)
dest -= k
ssa -= k
na -= k
if na == 0:
# Finished merging
break
# Copy keys[ssb]
keys[dest] = b_keys[ssb]
if _has_values:
values[dest] = b_values[ssb]
dest -= 1
ssb -= 1
nb -= 1
if nb == 0:
# Finished merging
break
# Gallop in B to find where keys[ssa] should end up
k = gallop_left(a_keys[ssa], b_keys, ssb - nb + 1, ssb + 1, ssb)
# k is an index, make it a size from the end
k = ssb + 1 - k
bcount = k
if k > 0:
# Copy everything from B before k
sortslice_copy_down(keys, values, dest,
b_keys, b_values, ssb,
k)
dest -= k
ssb -= k
nb -= k
if nb == 0:
# Finished merging
break
# Copy keys[ssa]
keys[dest] = a_keys[ssa]
if _has_values:
values[dest] = a_values[ssa]
dest -= 1
ssa -= 1
na -= 1
if na == 0:
# Finished merging
break
# Penalize it for leaving galloping mode
min_gallop += 1
# Merge finished, now handle the remaining areas
if na == 0:
# Only B remaining to copy at the front of the destination area
sortslice_copy(keys, values, dest - nb + 1,
b_keys, b_values, ssb - nb + 1,
nb)
else:
assert nb == 0
assert dest == ssa
# A's front is already at the right place, do nothing
return merge_adjust_gallop(ms, min_gallop)
@wrap
def merge_at(ms, keys, values, i):
"""
Merge the two runs at stack indices i and i+1.
An updated MergeState is returned.
"""
n = ms.n
assert n >= 2
assert i >= 0
assert i == n - 2 or i == n - 3
ssa, na = ms.pending[i]
ssb, nb = ms.pending[i + 1]
assert na > 0 and nb > 0
assert ssa + na == ssb
# Record the length of the combined runs; if i is the 3rd-last
# run now, also slide over the last run (which isn't involved
# in this merge). The current run i+1 goes away in any case.
ms.pending[i] = MergeRun(ssa, na + nb)
if i == n - 3:
ms.pending[i + 1] = ms.pending[i + 2]
ms = merge_pop(ms)
# Where does b start in a? Elements in a before that can be
# ignored (already in place).
k = gallop_right(keys[ssb], keys, ssa, ssa + na, ssa)
# [k, ssa + na) remains to be merged
na -= k - ssa
ssa = k
if na == 0:
return ms
# Where does a end in b? Elements in b after that can be
# ignored (already in place).
k = gallop_left(keys[ssa + na - 1], keys, ssb, ssb + nb, ssb + nb - 1)
# [ssb, k) remains to be merged
nb = k - ssb
# Merge what remains of the runs, using a temp array with
# min(na, nb) elements.
if na <= nb:
return merge_lo(ms, keys, values, ssa, na, ssb, nb)
else:
return merge_hi(ms, keys, values, ssa, na, ssb, nb)
@wrap
def merge_collapse(ms, keys, values):
"""
Examine the stack of runs waiting to be merged, merging adjacent runs
until the stack invariants are re-established:
1. len[-3] > len[-2] + len[-1]
2. len[-2] > len[-1]
An updated MergeState is returned.
See listsort.txt for more info.
"""
while ms.n > 1:
pending = ms.pending
n = ms.n - 2
if ((n > 0 and pending[n-1].size <= pending[n].size + pending[n+1].size) or
(n > 1 and pending[n-2].size <= pending[n-1].size + pending[n].size)):
if pending[n - 1].size < pending[n + 1].size:
# Merge smaller one first
n -= 1
ms = merge_at(ms, keys, values, n)
elif pending[n].size < pending[n + 1].size:
ms = merge_at(ms, keys, values, n)
else:
break
return ms
@wrap
def merge_force_collapse(ms, keys, values):
"""
Regardless of invariants, merge all runs on the stack until only one
remains. This is used at the end of the mergesort.
An updated MergeState is returned.
"""
while ms.n > 1:
pending = ms.pending
n = ms.n - 2
if n > 0:
if pending[n - 1].size < pending[n + 1].size:
# Merge the smaller one first
n -= 1
ms = merge_at(ms, keys, values, n)
return ms
@wrap
def reverse_slice(keys, values, start, stop):
"""
Reverse a slice, in-place.
"""
i = start
j = stop - 1
while i < j:
keys[i], keys[j] = keys[j], keys[i]
i += 1
j -= 1
if has_values(keys, values):
i = start
j = stop - 1
while i < j:
values[i], values[j] = values[j], values[i]
i += 1
j -= 1
@wrap
def run_timsort_with_mergestate(ms, keys, values):
"""
Run timsort with the mergestate.
"""
nremaining = len(keys)
if nremaining < 2:
return
# March over the array once, left to right, finding natural runs,
# and extending short natural runs to minrun elements.
minrun = merge_compute_minrun(nremaining)
lo = zero
while nremaining > 0:
n, desc = count_run(keys, lo, lo + nremaining)
if desc:
# Descending run => reverse
reverse_slice(keys, values, lo, lo + n)
# If short, extend to min(minrun, nremaining)
if n < minrun:
force = min(minrun, nremaining)
binarysort(keys, values, lo, lo + force, lo + n)
n = force
# Push run onto stack, and maybe merge.
ms = merge_append(ms, MergeRun(lo, n))
ms = merge_collapse(ms, keys, values)
# Advance to find next run.
lo += n
nremaining -= n
# All initial runs have been discovered, now finish merging.
ms = merge_force_collapse(ms, keys, values)
assert ms.n == 1
assert ms.pending[0] == (0, len(keys))
@wrap
def run_timsort(keys):
"""
Run timsort over the given keys.
"""
values = keys
run_timsort_with_mergestate(merge_init(keys), keys, values)
@wrap
def run_timsort_with_values(keys, values):
"""
Run timsort over the given keys and values.
"""
run_timsort_with_mergestate(merge_init_with_values(keys, values),
keys, values)
return TimsortImplementation(
wrap,
count_run, binarysort, gallop_left, gallop_right,
merge_init, merge_append, merge_pop,
merge_compute_minrun, merge_lo, merge_hi, merge_at,
merge_force_collapse, merge_collapse,
run_timsort, run_timsort_with_values)
def make_py_timsort(*args):
return make_timsort_impl((lambda f: f), *args)
def make_jit_timsort(*args):
from numba import jit
return make_timsort_impl((lambda f: jit(nopython=True)(f)),
*args)
|
|
"""Provides the worker thread needed for processing streams."""
from __future__ import annotations
from collections import defaultdict, deque
from collections.abc import Callable, Generator, Iterator, Mapping
import contextlib
import datetime
from io import BytesIO
import logging
from threading import Event
from typing import Any, cast
import av
from homeassistant.core import HomeAssistant
from . import KeyFrameConverter, redact_credentials
from .const import (
ATTR_SETTINGS,
AUDIO_CODECS,
DOMAIN,
HLS_PROVIDER,
MAX_MISSING_DTS,
MAX_TIMESTAMP_GAP,
PACKETS_TO_WAIT_FOR_AUDIO,
SEGMENT_CONTAINER_FORMAT,
SOURCE_TIMEOUT,
)
from .core import Part, Segment, StreamOutput, StreamSettings
from .hls import HlsStreamOutput
_LOGGER = logging.getLogger(__name__)
class StreamWorkerError(Exception):
"""An exception thrown while processing a stream."""
class StreamEndedError(StreamWorkerError):
"""Raised when the stream is complete, exposed for facilitating testing."""
class StreamState:
"""Responsible for trakcing output and playback state for a stream.
Holds state used for playback to interpret a decoded stream. A source stream
may be reset (e.g. reconnecting to an rtsp stream) and this object tracks
the state to inform the player.
"""
def __init__(
self,
hass: HomeAssistant,
outputs_callback: Callable[[], Mapping[str, StreamOutput]],
) -> None:
"""Initialize StreamState."""
self._stream_id: int = 0
self.hass = hass
self._outputs_callback: Callable[
[], Mapping[str, StreamOutput]
] = outputs_callback
# sequence gets incremented before the first segment so the first segment
# has a sequence number of 0.
self._sequence = -1
@property
def sequence(self) -> int:
"""Return the current sequence for the latest segment."""
return self._sequence
def next_sequence(self) -> int:
"""Increment the sequence number."""
self._sequence += 1
return self._sequence
@property
def stream_id(self) -> int:
"""Return the readonly stream_id attribute."""
return self._stream_id
def discontinuity(self) -> None:
"""Mark the stream as having been restarted."""
# Preserving sequence and stream_id here keep the HLS playlist logic
# simple to check for discontinuity at output time, and to determine
# the discontinuity sequence number.
self._stream_id += 1
# Call discontinuity to remove incomplete segment from the HLS output
if hls_output := self._outputs_callback().get(HLS_PROVIDER):
cast(HlsStreamOutput, hls_output).discontinuity()
@property
def outputs(self) -> list[StreamOutput]:
"""Return the active stream outputs."""
return list(self._outputs_callback().values())
class StreamMuxer:
"""StreamMuxer re-packages video/audio packets for output."""
def __init__(
self,
hass: HomeAssistant,
video_stream: av.video.VideoStream,
audio_stream: av.audio.stream.AudioStream | None,
stream_state: StreamState,
) -> None:
"""Initialize StreamMuxer."""
self._hass = hass
self._segment_start_dts: int = cast(int, None)
self._memory_file: BytesIO = cast(BytesIO, None)
self._av_output: av.container.OutputContainer = None
self._input_video_stream: av.video.VideoStream = video_stream
self._input_audio_stream: av.audio.stream.AudioStream | None = audio_stream
self._output_video_stream: av.video.VideoStream = None
self._output_audio_stream: av.audio.stream.AudioStream | None = None
self._segment: Segment | None = None
# the following 3 member variables are used for Part formation
self._memory_file_pos: int = cast(int, None)
self._part_start_dts: int = cast(int, None)
self._part_has_keyframe = False
self._stream_settings: StreamSettings = hass.data[DOMAIN][ATTR_SETTINGS]
self._stream_state = stream_state
self._start_time = datetime.datetime.utcnow()
def make_new_av(
self,
memory_file: BytesIO,
sequence: int,
input_vstream: av.video.VideoStream,
input_astream: av.audio.stream.AudioStream | None,
) -> tuple[
av.container.OutputContainer,
av.video.VideoStream,
av.audio.stream.AudioStream | None,
]:
"""Make a new av OutputContainer and add output streams."""
container = av.open(
memory_file,
mode="w",
format=SEGMENT_CONTAINER_FORMAT,
container_options={
**{
# Removed skip_sidx - see https://github.com/home-assistant/core/pull/39970
# "cmaf" flag replaces several of the movflags used, but too recent to use for now
"movflags": "frag_custom+empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
# Sometimes the first segment begins with negative timestamps, and this setting just
# adjusts the timestamps in the output from that segment to start from 0. Helps from
# having to make some adjustments in test_durations
"avoid_negative_ts": "make_non_negative",
"fragment_index": str(sequence + 1),
"video_track_timescale": str(int(1 / input_vstream.time_base)),
},
# Only do extra fragmenting if we are using ll_hls
# Let ffmpeg do the work using frag_duration
# Fragment durations may exceed the 15% allowed variance but it seems ok
**(
{
"movflags": "empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
# Create a fragment every TARGET_PART_DURATION. The data from each fragment is stored in
# a "Part" that can be combined with the data from all the other "Part"s, plus an init
# section, to reconstitute the data in a "Segment".
# The LL-HLS spec allows for a fragment's duration to be within the range [0.85x,1.0x]
# of the part target duration. We use the frag_duration option to tell ffmpeg to try to
# cut the fragments when they reach frag_duration. However, the resulting fragments can
# have variability in their durations and can end up being too short or too long. With a
# video track with no audio, the discrete nature of frames means that the frame at the
# end of a fragment will sometimes extend slightly beyond the desired frag_duration.
# If there are two tracks, as in the case of a video feed with audio, there is an added
# wrinkle as the fragment cut seems to be done on the first track that crosses the desired
# threshold, and cutting on the audio track may also result in a shorter video fragment
# than desired.
# Given this, our approach is to give ffmpeg a frag_duration somewhere in the middle
# of the range, hoping that the parts stay pretty well bounded, and we adjust the part
# durations a bit in the hls metadata so that everything "looks" ok.
"frag_duration": str(
self._stream_settings.part_target_duration * 9e5
),
}
if self._stream_settings.ll_hls
else {}
),
},
)
output_vstream = container.add_stream(template=input_vstream)
# Check if audio is requested
output_astream = None
if input_astream:
output_astream = container.add_stream(template=input_astream)
return container, output_vstream, output_astream
def reset(self, video_dts: int) -> None:
"""Initialize a new stream segment."""
self._part_start_dts = self._segment_start_dts = video_dts
self._segment = None
self._memory_file = BytesIO()
self._memory_file_pos = 0
(
self._av_output,
self._output_video_stream,
self._output_audio_stream,
) = self.make_new_av(
memory_file=self._memory_file,
sequence=self._stream_state.next_sequence(),
input_vstream=self._input_video_stream,
input_astream=self._input_audio_stream,
)
if self._output_video_stream.name == "hevc":
self._output_video_stream.codec_tag = "hvc1"
def mux_packet(self, packet: av.Packet) -> None:
"""Mux a packet to the appropriate output stream."""
# Check for end of segment
if packet.stream == self._input_video_stream:
if (
packet.is_keyframe
and (packet.dts - self._segment_start_dts) * packet.time_base
>= self._stream_settings.min_segment_duration
):
# Flush segment (also flushes the stub part segment)
self.flush(packet, last_part=True)
# Mux the packet
packet.stream = self._output_video_stream
self._av_output.mux(packet)
self.check_flush_part(packet)
self._part_has_keyframe |= packet.is_keyframe
elif packet.stream == self._input_audio_stream:
packet.stream = self._output_audio_stream
self._av_output.mux(packet)
def check_flush_part(self, packet: av.Packet) -> None:
"""Check for and mark a part segment boundary and record its duration."""
if self._memory_file_pos == self._memory_file.tell():
return
if self._segment is None:
# We have our first non-zero byte position. This means the init has just
# been written. Create a Segment and put it to the queue of each output.
self._segment = Segment(
sequence=self._stream_state.sequence,
stream_id=self._stream_state.stream_id,
init=self._memory_file.getvalue(),
# Fetch the latest StreamOutputs, which may have changed since the
# worker started.
stream_outputs=self._stream_state.outputs,
start_time=self._start_time,
)
self._memory_file_pos = self._memory_file.tell()
else: # These are the ends of the part segments
self.flush(packet, last_part=False)
def flush(self, packet: av.Packet, last_part: bool) -> None:
"""Output a part from the most recent bytes in the memory_file.
If last_part is True, also close the segment, give it a duration,
and clean up the av_output and memory_file.
There are two different ways to enter this function, and when
last_part is True, packet has not yet been muxed, while when
last_part is False, the packet has already been muxed. However,
in both cases, packet is the next packet and is not included in
the Part.
This function writes the duration metadata for the Part and
for the Segment. However, as the fragmentation done by ffmpeg
may result in fragment durations which fall outside the
[0.85x,1.0x] tolerance band allowed by LL-HLS, we need to fudge
some durations a bit by reporting them as being within that
range.
Note that repeated adjustments may cause drift between the part
durations in the metadata and those in the media and result in
playback issues in some clients.
"""
# Part durations should not exceed the part target duration
adjusted_dts = min(
packet.dts,
self._part_start_dts
+ self._stream_settings.part_target_duration / packet.time_base,
)
if last_part:
# Closing the av_output will write the remaining buffered data to the
# memory_file as a new moof/mdat.
self._av_output.close()
elif not self._part_has_keyframe:
# Parts which are not the last part or an independent part should
# not have durations below 0.85 of the part target duration.
adjusted_dts = max(
adjusted_dts,
self._part_start_dts
+ 0.85 * self._stream_settings.part_target_duration / packet.time_base,
)
assert self._segment
self._memory_file.seek(self._memory_file_pos)
self._hass.loop.call_soon_threadsafe(
self._segment.async_add_part,
Part(
duration=float(
(adjusted_dts - self._part_start_dts) * packet.time_base
),
has_keyframe=self._part_has_keyframe,
data=self._memory_file.read(),
),
(
segment_duration := float(
(adjusted_dts - self._segment_start_dts) * packet.time_base
)
)
if last_part
else 0,
)
if last_part:
# If we've written the last part, we can close the memory_file.
self._memory_file.close() # We don't need the BytesIO object anymore
self._start_time += datetime.timedelta(seconds=segment_duration)
# Reinitialize
self.reset(packet.dts)
else:
# For the last part, these will get set again elsewhere so we can skip
# setting them here.
self._memory_file_pos = self._memory_file.tell()
self._part_start_dts = adjusted_dts
self._part_has_keyframe = False
def close(self) -> None:
"""Close stream buffer."""
self._av_output.close()
self._memory_file.close()
class PeekIterator(Iterator):
"""An Iterator that may allow multiple passes.
This may be consumed like a normal Iterator, however also supports a
peek() method that buffers consumed items from the iterator.
"""
def __init__(self, iterator: Iterator[av.Packet]) -> None:
"""Initialize PeekIterator."""
self._iterator = iterator
self._buffer: deque[av.Packet] = deque()
# A pointer to either _iterator or _buffer
self._next = self._iterator.__next__
def __iter__(self) -> Iterator:
"""Return an iterator."""
return self
def __next__(self) -> av.Packet:
"""Return and consume the next item available."""
return self._next()
def replace_underlying_iterator(self, new_iterator: Iterator) -> None:
"""Replace the underlying iterator while preserving the buffer."""
self._iterator = new_iterator
if not self._buffer:
self._next = self._iterator.__next__
def _pop_buffer(self) -> av.Packet:
"""Consume items from the buffer until exhausted."""
if self._buffer:
return self._buffer.popleft()
# The buffer is empty, so change to consume from the iterator
self._next = self._iterator.__next__
return self._next()
def peek(self) -> Generator[av.Packet, None, None]:
"""Return items without consuming from the iterator."""
# Items consumed are added to a buffer for future calls to __next__
# or peek. First iterate over the buffer from previous calls to peek.
self._next = self._pop_buffer
for packet in self._buffer:
yield packet
for packet in self._iterator:
self._buffer.append(packet)
yield packet
class TimestampValidator:
"""Validate ordering of timestamps for packets in a stream."""
def __init__(self) -> None:
"""Initialize the TimestampValidator."""
# Decompression timestamp of last packet in each stream
self._last_dts: dict[av.stream.Stream, int | float] = defaultdict(
lambda: float("-inf")
)
# Number of consecutive missing decompression timestamps
self._missing_dts = 0
def is_valid(self, packet: av.Packet) -> bool:
"""Validate the packet timestamp based on ordering within the stream."""
# Discard packets missing DTS. Terminate if too many are missing.
if packet.dts is None:
if self._missing_dts >= MAX_MISSING_DTS:
raise StreamWorkerError(
f"No dts in {MAX_MISSING_DTS+1} consecutive packets"
)
self._missing_dts += 1
return False
self._missing_dts = 0
# Discard when dts is not monotonic. Terminate if gap is too wide.
prev_dts = self._last_dts[packet.stream]
if packet.dts <= prev_dts:
gap = packet.time_base * (prev_dts - packet.dts)
if gap > MAX_TIMESTAMP_GAP:
raise StreamWorkerError(
f"Timestamp overflow detected: last dts = {prev_dts}, dts = {packet.dts}"
)
return False
self._last_dts[packet.stream] = packet.dts
return True
def is_keyframe(packet: av.Packet) -> Any:
"""Return true if the packet is a keyframe."""
return packet.is_keyframe
def unsupported_audio(packets: Iterator[av.Packet], audio_stream: Any) -> bool:
"""Detect ADTS AAC, which is not supported by pyav."""
if not audio_stream:
return False
for count, packet in enumerate(packets):
if count >= PACKETS_TO_WAIT_FOR_AUDIO:
# Some streams declare an audio stream and never send any packets
_LOGGER.warning("Audio stream not found")
break
if packet.stream == audio_stream:
# detect ADTS AAC and disable audio
if audio_stream.codec.name == "aac" and packet.size > 2:
with memoryview(packet) as packet_view:
if packet_view[0] == 0xFF and packet_view[1] & 0xF0 == 0xF0:
_LOGGER.warning("ADTS AAC detected - disabling audio stream")
return True
break
return False
def stream_worker(
source: str,
options: dict[str, str],
stream_state: StreamState,
keyframe_converter: KeyFrameConverter,
quit_event: Event,
) -> None:
"""Handle consuming streams."""
try:
container = av.open(source, options=options, timeout=SOURCE_TIMEOUT)
except av.AVError as err:
raise StreamWorkerError(
"Error opening stream %s" % redact_credentials(str(source))
) from err
try:
video_stream = container.streams.video[0]
except (KeyError, IndexError) as ex:
raise StreamWorkerError("Stream has no video") from ex
keyframe_converter.create_codec_context(codec_context=video_stream.codec_context)
try:
audio_stream = container.streams.audio[0]
except (KeyError, IndexError):
audio_stream = None
if audio_stream and audio_stream.name not in AUDIO_CODECS:
audio_stream = None
# These formats need aac_adtstoasc bitstream filter, but auto_bsf not
# compatible with empty_moov and manual bitstream filters not in PyAV
if container.format.name in {"hls", "mpegts"}:
audio_stream = None
# Some audio streams do not have a profile and throw errors when remuxing
if audio_stream and audio_stream.profile is None:
audio_stream = None
dts_validator = TimestampValidator()
container_packets = PeekIterator(
filter(dts_validator.is_valid, container.demux((video_stream, audio_stream)))
)
def is_video(packet: av.Packet) -> Any:
"""Return true if the packet is for the video stream."""
return packet.stream.type == "video"
# Have to work around two problems with RTSP feeds in ffmpeg
# 1 - first frame has bad pts/dts https://trac.ffmpeg.org/ticket/5018
# 2 - seeking can be problematic https://trac.ffmpeg.org/ticket/7815
#
# Use a peeking iterator to peek into the start of the stream, ensuring
# everything looks good, then go back to the start when muxing below.
try:
if audio_stream and unsupported_audio(container_packets.peek(), audio_stream):
audio_stream = None
container_packets.replace_underlying_iterator(
filter(dts_validator.is_valid, container.demux(video_stream))
)
# Advance to the first keyframe for muxing, then rewind so the muxing
# loop below can consume.
first_keyframe = next(
filter(lambda pkt: is_keyframe(pkt) and is_video(pkt), container_packets)
)
# Deal with problem #1 above (bad first packet pts/dts) by recalculating
# using pts/dts from second packet. Use the peek iterator to advance
# without consuming from container_packets. Skip over the first keyframe
# then use the duration from the second video packet to adjust dts.
next_video_packet = next(filter(is_video, container_packets.peek()))
# Since the is_valid filter has already been applied before the following
# adjustment, it does not filter out the case where the duration below is
# 0 and both the first_keyframe and next_video_packet end up with the same
# dts. Use "or 1" to deal with this.
start_dts = next_video_packet.dts - (next_video_packet.duration or 1)
first_keyframe.dts = first_keyframe.pts = start_dts
except StreamWorkerError as ex:
container.close()
raise ex
except StopIteration as ex:
container.close()
raise StreamEndedError("Stream ended; no additional packets") from ex
except av.AVError as ex:
container.close()
raise StreamWorkerError(
"Error demuxing stream while finding first packet: %s" % str(ex)
) from ex
muxer = StreamMuxer(stream_state.hass, video_stream, audio_stream, stream_state)
muxer.reset(start_dts)
# Mux the first keyframe, then proceed through the rest of the packets
muxer.mux_packet(first_keyframe)
with contextlib.closing(container), contextlib.closing(muxer):
while not quit_event.is_set():
try:
packet = next(container_packets)
except StreamWorkerError as ex:
raise ex
except StopIteration as ex:
raise StreamEndedError("Stream ended; no additional packets") from ex
except av.AVError as ex:
raise StreamWorkerError("Error demuxing stream: %s" % str(ex)) from ex
muxer.mux_packet(packet)
if packet.is_keyframe and is_video(packet):
keyframe_converter.packet = packet
|
|
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import re
import sys
import json
from jasy.script.tokenize.Lang import keywords
from jasy.script.parse.Lang import expressions, futureReserved
high_unicode = re.compile(r"\\u[2-9A-Fa-f][0-9A-Fa-f]{3}")
ascii_encoder = json.JSONEncoder(ensure_ascii=True)
unicode_encoder = json.JSONEncoder(ensure_ascii=False)
#
# Class
#
class Compressor:
__semicolonSymbol = ";"
__commaSymbol = ","
def __init__(self, format=None):
if format:
if format.has("semicolon"):
self.__semicolonSymbol = ";\n"
if format.has("comma"):
self.__commaSymbol = ",\n"
self.__forcedSemicolon = False
#
# Main
#
def compress(self, node):
type = node.type
result = None
if type in self.__simple:
result = type
elif type in self.__prefixes:
if getattr(node, "postfix", False):
result = self.compress(node[0]) + self.__prefixes[node.type]
else:
result = self.__prefixes[node.type] + self.compress(node[0])
elif type in self.__dividers:
first = self.compress(node[0])
second = self.compress(node[1])
divider = self.__dividers[node.type]
# Fast path
if node.type not in ("plus", "minus"):
result = "%s%s%s" % (first, divider, second)
# Special code for dealing with situations like x + ++y and y-- - x
else:
result = first
if first.endswith(divider):
result += " "
result += divider
if second.startswith(divider):
result += " "
result += second
else:
try:
result = getattr(self, "type_%s" % type)(node)
except AttributeError:
raise Exception("Script compressor does not support type '%s' from line %s in file %s" % (type, node.line, node.getFileName()))
if getattr(node, "parenthesized", None):
return "(%s)" % result
else:
return result
#
# Helpers
#
def __statements(self, node):
result = []
for child in node:
result.append(self.compress(child))
return "".join(result)
def __handleForcedSemicolon(self, node):
if node.type == "semicolon" and not hasattr(node, "expression"):
self.__forcedSemicolon = True
def __addSemicolon(self, result):
if not result.endswith(self.__semicolonSymbol):
if self.__forcedSemicolon:
self.__forcedSemicolon = False
return result + self.__semicolonSymbol
else:
return result
def __removeSemicolon(self, result):
if self.__forcedSemicolon:
self.__forcedSemicolon = False
return result
if result.endswith(self.__semicolonSymbol):
return result[:-len(self.__semicolonSymbol)]
else:
return result
#
# Data
#
__simple_property = re.compile(r"^[a-zA-Z_$][a-zA-Z0-9_$]*$")
__number_property = re.compile(r"^[0-9]+$")
__simple = ["true", "false", "null", "this", "debugger"]
__dividers = {
"plus" : '+',
"minus" : '-',
"mul" : '*',
"div" : '/',
"mod" : '%',
"dot" : '.',
"or" : "||",
"and" : "&&",
"strict_eq" : '===',
"eq" : '==',
"strict_ne" : '!==',
"ne" : '!=',
"lsh" : '<<',
"le" : '<=',
"lt" : '<',
"ursh" : '>>>',
"rsh" : '>>',
"ge" : '>=',
"gt" : '>',
"bitwise_or" : '|',
"bitwise_xor" : '^',
"bitwise_and" : '&'
}
__prefixes = {
"increment" : "++",
"decrement" : "--",
"bitwise_not" : '~',
"not" : "!",
"unary_plus" : "+",
"unary_minus" : "-",
"delete" : "delete ",
"new" : "new ",
"typeof" : "typeof ",
"void" : "void "
}
#
# Script Scope
#
def type_script(self, node):
return self.__statements(node)
#
# Expressions
#
def type_comma(self, node):
return self.__commaSymbol.join(map(self.compress, node))
def type_object_init(self, node):
return "{%s}" % self.__commaSymbol.join(map(self.compress, node))
def type_property_init(self, node):
key = self.compress(node[0])
value = self.compress(node[1])
if type(key) in (int, float):
pass
elif self.__number_property.match(key):
pass
# Protect keywords and special characters
elif key in keywords or key in futureReserved or not self.__simple_property.match(key):
key = self.type_string(node[0])
return "%s:%s" % (key, value)
def type_array_init(self, node):
def helper(child):
return self.compress(child) if child is not None else ""
return "[%s]" % ",".join(map(helper, node))
def type_array_comp(self, node):
return "[%s %s]" % (self.compress(node.expression), self.compress(node.tail))
def type_string(self, node):
# Omit writing real high unicode character which are not supported well by browsers
ascii = ascii_encoder.encode(node.value)
if high_unicode.search(ascii):
return ascii
else:
return unicode_encoder.encode(node.value)
def type_number(self, node):
value = node.value
# Special handling for protected float/exponential
if isinstance(value, str):
# Convert zero-prefix
if value.startswith("0.") and len(value) > 2:
value = value[1:]
# Convert zero postfix
elif value.endswith(".0"):
value = value[:-2]
elif int(value) == value and node.parent.type != "dot":
value = int(value)
return "%s" % value
def type_regexp(self, node):
return node.value
def type_identifier(self, node):
return node.value
def type_list(self, node):
return ",".join(map(self.compress, node))
def type_index(self, node):
return "%s[%s]" % (self.compress(node[0]), self.compress(node[1]))
def type_declaration(self, node):
names = getattr(node, "names", None)
if names:
result = self.compress(names)
else:
result = node.name
initializer = getattr(node, "initializer", None)
if initializer:
result += "=%s" % self.compress(node.initializer)
return result
def type_assign(self, node):
assignOp = getattr(node, "assignOp", None)
operator = "=" if not assignOp else self.__dividers[assignOp] + "="
return self.compress(node[0]) + operator + self.compress(node[1])
def type_call(self, node):
return "%s(%s)" % (self.compress(node[0]), self.compress(node[1]))
def type_new_with_args(self, node):
result = "new %s" % self.compress(node[0])
# Compress new Object(); => new Object;
if len(node[1]) > 0:
result += "(%s)" % self.compress(node[1])
else:
parent = getattr(node, "parent", None)
if parent and parent.type is "dot":
result += "()"
return result
def type_exception(self, node):
return node.value
def type_generator(self, node):
"""Generator Expression."""
result = self.compress(getattr(node, "expression"))
tail = getattr(node, "tail", None)
if tail:
result += " %s" % self.compress(tail)
return result
def type_comp_tail(self, node):
"""Comprehensions Tails."""
result = self.compress(getattr(node, "for"))
guard = getattr(node, "guard", None)
if guard:
result += "if(%s)" % self.compress(guard)
return result
def type_in(self, node):
first = self.compress(node[0])
second = self.compress(node[1])
if first.endswith("'") or first.endswith('"'):
pattern = "%sin %s"
else:
pattern = "%s in %s"
return pattern % (first, second)
def type_instanceof(self, node):
first = self.compress(node[0])
second = self.compress(node[1])
return "%s instanceof %s" % (first, second)
#
# Statements :: Core
#
def type_block(self, node):
return "{%s}" % self.__removeSemicolon(self.__statements(node))
def type_let_block(self, node):
begin = "let(%s)" % ",".join(map(self.compress, node.variables))
if hasattr(node, "block"):
end = self.compress(node.block)
elif hasattr(node, "expression"):
end = self.compress(node.expression)
return begin + end
def type_const(self, node):
return self.__addSemicolon("const %s" % self.type_list(node))
def type_var(self, node):
return self.__addSemicolon("var %s" % self.type_list(node))
def type_let(self, node):
return self.__addSemicolon("let %s" % self.type_list(node))
def type_semicolon(self, node):
expression = getattr(node, "expression", None)
return self.__addSemicolon(self.compress(expression) if expression else "")
def type_label(self, node):
return self.__addSemicolon("%s:%s" % (node.label, self.compress(node.statement)))
def type_break(self, node):
return self.__addSemicolon("break" if not hasattr(node, "label") else "break %s" % node.label)
def type_continue(self, node):
return self.__addSemicolon("continue" if not hasattr(node, "label") else "continue %s" % node.label)
#
# Statements :: Functions
#
def type_function(self, node):
if node.type == "setter":
result = "set"
elif node.type == "getter":
result = "get"
else:
result = "function"
name = getattr(node, "name", None)
if name:
result += " %s" % name
params = getattr(node, "params", None)
result += "(%s)" % self.compress(params) if params else "()"
# keep expression closure format (may be micro-optimized for other code, too)
if getattr(node, "expressionClosure", False):
result += self.compress(node.body)
else:
result += "{%s}" % self.__removeSemicolon(self.compress(node.body))
return result
def type_getter(self, node):
return self.type_function(node)
def type_setter(self, node):
return self.type_function(node)
def type_return(self, node):
result = "return"
if hasattr(node, "value"):
valueCode = self.compress(node.value)
# Micro optimization: Don't need a space when a block/map/array/group/strings are returned
if not valueCode.startswith(("(", "[", "{", "'", '"', "!", "-", "/")):
result += " "
result += valueCode
return self.__addSemicolon(result)
#
# Statements :: Exception Handling
#
def type_throw(self, node):
return self.__addSemicolon("throw %s" % self.compress(node.exception))
def type_try(self, node):
result = "try%s" % self.compress(node.tryBlock)
for catch in node:
if catch.type == "catch":
if hasattr(catch, "guard"):
result += "catch(%s if %s)%s" % (self.compress(catch.exception), self.compress(catch.guard), self.compress(catch.block))
else:
result += "catch(%s)%s" % (self.compress(catch.exception), self.compress(catch.block))
if hasattr(node, "finallyBlock"):
result += "finally%s" % self.compress(node.finallyBlock)
return result
#
# Statements :: Loops
#
def type_while(self, node):
result = "while(%s)%s" % (self.compress(node.condition), self.compress(node.body))
self.__handleForcedSemicolon(node.body)
return result
def type_do(self, node):
# block unwrapping don't help to reduce size on this loop type
# but if it happens (don't like to modify a global function to fix a local issue), we
# need to fix the body and re-add braces around the statement
body = self.compress(node.body)
if not body.startswith("{"):
body = "{%s}" % body
return self.__addSemicolon("do%swhile(%s)" % (body, self.compress(node.condition)))
def type_for_in(self, node):
# Optional variable declarations
varDecl = getattr(node, "varDecl", None)
# Body is optional - at least in comprehensions tails
body = getattr(node, "body", None)
if body:
body = self.compress(body)
else:
body = ""
result = "for"
if node.isEach:
result += " each"
result += "(%s in %s)%s" % (self.__removeSemicolon(self.compress(node.iterator)), self.compress(node.object), body)
if body:
self.__handleForcedSemicolon(node.body)
return result
def type_for(self, node):
setup = getattr(node, "setup", None)
condition = getattr(node, "condition", None)
update = getattr(node, "update", None)
result = "for("
result += self.__addSemicolon(self.compress(setup) if setup else "")
result += self.__addSemicolon(self.compress(condition) if condition else "")
result += self.compress(update) if update else ""
result += ")%s" % self.compress(node.body)
self.__handleForcedSemicolon(node.body)
return result
#
# Statements :: Conditionals
#
def type_hook(self, node):
"""aka ternary operator."""
condition = node.condition
thenPart = node.thenPart
elsePart = node.elsePart
if condition.type == "not":
[thenPart, elsePart] = [elsePart, thenPart]
condition = condition[0]
return "%s?%s:%s" % (self.compress(condition), self.compress(thenPart), self.compress(elsePart))
def type_if(self, node):
result = "if(%s)%s" % (self.compress(node.condition), self.compress(node.thenPart))
elsePart = getattr(node, "elsePart", None)
if elsePart:
result += "else"
elseCode = self.compress(elsePart)
# Micro optimization: Don't need a space when the child is a block
# At this time the brace could not be part of a map declaration (would be a syntax error)
if not elseCode.startswith(("{", "(", ";")):
result += " "
result += elseCode
self.__handleForcedSemicolon(elsePart)
return result
def type_switch(self, node):
result = "switch(%s){" % self.compress(node.discriminant)
for case in node:
if case.type == "case":
labelCode = self.compress(case.label)
if labelCode.startswith('"'):
result += "case%s:" % labelCode
else:
result += "case %s:" % labelCode
elif case.type == "default":
result += "default:"
else:
continue
for statement in case.statements:
temp = self.compress(statement)
if len(temp) > 0:
result += self.__addSemicolon(temp)
return "%s}" % self.__removeSemicolon(result)
|
Subsets and Splits