gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from glanceclient.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_localedir = os.environ.get('glanceclient'.upper() + '_LOCALEDIR')
_t = gettext.translation('glanceclient', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('glanceclient' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='glanceclient')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='glanceclient' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='glanceclient', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
|
|
"""Test the cross_validation module"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.fixes import unique
from sklearn import cross_validation as cval
from sklearn.base import BaseEstimator
from sklearn.datasets import make_regression
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import explained_variance_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import make_scorer
from sklearn.externals import six
from sklearn.linear_model import Ridge
from sklearn.svm import SVC
class MockListClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation.
Checks that GridSearchCV didn't convert X to array.
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
assert_true(isinstance(X, list))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0):
self.a = a
def fit(self, X, Y=None, sample_weight=None, class_prior=None):
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
##############################################################################
# Tests
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
y = [0, 0, 1, 1, 2]
cval.StratifiedKFold(y, 3)
# checking there was only one warning.
assert_equal(len(w), 1)
# checking it has the right type
assert_equal(w[0].category, Warning)
# checking it's the right warning. This might be a bad test since it's
# a characteristic of the code and not a behavior
assert_true("The least populated class" in str(w[0]))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
all_folds = None
for train, test in kf:
if all_folds is None:
all_folds = test.copy()
else:
all_folds = np.concatenate((all_folds, test))
all_folds.sort()
assert_array_equal(all_folds, np.arange(300))
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
@ignore_warnings
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf1 = cval.KFold(300, 3, shuffle=True, random_state=0, indices=True)
kf2 = cval.KFold(300, 3, shuffle=True, random_state=0, indices=False)
ind = np.arange(300)
for kf in (kf1, kf2):
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0, indices=True)
for train, test in sss:
assert_array_equal(unique(y[train]), unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(unique(y[train], return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(unique(y[test], return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
@ignore_warnings
def test_stratified_shuffle_split_iter_no_indices():
y = np.asarray([0, 1, 2] * 10)
sss1 = cval.StratifiedShuffleSplit(y, indices=False, random_state=0)
train_mask, test_mask = next(iter(sss1))
sss2 = cval.StratifiedShuffleSplit(y, indices=True, random_state=0)
train_indices, test_indices = next(iter(sss2))
assert_array_equal(sorted(test_indices), np.where(test_mask)[0])
def test_leave_label_out_changing_labels():
"""Check that LeaveOneLabelOut and LeavePLabelOut work normally if
the labels variable is changed before calling __iter__"""
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X as list
clf = MockListClassifier()
scores = cval.cross_val_score(clf, X.tolist(), y)
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
score = cval.cross_val_score(clf, X, y, score_func=score_func)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = range(10)
split = cval.train_test_split(X, X_s, y)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1", cv=5)
assert_array_almost_equal(f1_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# also test deprecated old way
with warnings.catch_warnings(record=True):
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
score_func=f1_score, cv=5)
assert_array_almost_equal(f1_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
with warnings.catch_warnings(record=True):
ev_scores = cval.cross_val_score(reg, X, y, cv=5,
score_func=explained_variance_score)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, cv=cv, scoring="accuracy", labels=np.ones(y.size),
random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
scorer = make_scorer(fbeta_score, beta=2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, scoring=scorer, cv=cv, labels=np.ones(y.size),
random_state=0)
assert_almost_equal(score_label, .95, 2)
assert_almost_equal(pvalue_label, 0.01, 3)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2, indices=True)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(svm, X, y, cv=cv,
scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.4)
# test with deprecated interface
with warnings.catch_warnings(record=True):
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, score_func=accuracy_score, cv=cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.4)
def test_cross_val_generator_with_mask():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=False)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=False)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=False)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=False)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=False)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=False)
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
4, indices=False)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=True)
lpo = cval.LeavePOut(4, 2, indices=True)
kf = cval.KFold(4, 2, indices=True)
skf = cval.StratifiedKFold(y, 2, indices=True)
lolo = cval.LeaveOneLabelOut(labels, indices=True)
lopo = cval.LeavePLabelOut(labels, 2, indices=True)
b = cval.Bootstrap(2) # only in index mode
ss = cval.ShuffleSplit(2, indices=True)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
@ignore_warnings
def test_cross_val_generator_mask_indices_same():
# Test that the cross validation generators return the same results when
# indices=True and when indices=False
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
labels = np.array([1, 1, 2, 3, 3, 3, 4])
loo_mask = cval.LeaveOneOut(5, indices=False)
loo_ind = cval.LeaveOneOut(5, indices=True)
lpo_mask = cval.LeavePOut(10, 2, indices=False)
lpo_ind = cval.LeavePOut(10, 2, indices=True)
kf_mask = cval.KFold(10, 5, indices=False, shuffle=True, random_state=1)
kf_ind = cval.KFold(10, 5, indices=True, shuffle=True, random_state=1)
skf_mask = cval.StratifiedKFold(y, 3, indices=False)
skf_ind = cval.StratifiedKFold(y, 3, indices=True)
lolo_mask = cval.LeaveOneLabelOut(labels, indices=False)
lolo_ind = cval.LeaveOneLabelOut(labels, indices=True)
lopo_mask = cval.LeavePLabelOut(labels, 2, indices=False)
lopo_ind = cval.LeavePLabelOut(labels, 2, indices=True)
for cv_mask, cv_ind in [(loo_mask, loo_ind), (lpo_mask, lpo_ind),
(kf_mask, kf_ind), (skf_mask, skf_ind),
(lolo_mask, lolo_ind), (lopo_mask, lopo_ind)]:
for (train_mask, test_mask), (train_ind, test_ind) in \
zip(cv_mask, cv_ind):
assert_array_equal(np.where(train_mask)[0], train_ind)
assert_array_equal(np.where(test_mask)[0], test_ind)
def test_bootstrap_errors():
assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)
def test_bootstrap_test_sizes():
assert_equal(cval.Bootstrap(10, test_size=0.2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=None).test_size, 5)
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
@ignore_warnings
def test_cross_indices_exception():
X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
assert_raises(ValueError, cval.check_cv, loo, X, y)
assert_raises(ValueError, cval.check_cv, lpo, X, y)
assert_raises(ValueError, cval.check_cv, kf, X, y)
assert_raises(ValueError, cval.check_cv, skf, X, y)
assert_raises(ValueError, cval.check_cv, lolo, X, y)
assert_raises(ValueError, cval.check_cv, lopo, X, y)
|
|
from functools import wraps
import hashlib
import datetime
import mimetypes
import random
from bson import ObjectId
from flask import jsonify, Response, url_for
from flask_jwt import jwt_required, current_user
from flask_restful import reqparse, abort
import pymongo
import werkzeug
from werkzeug.utils import secure_filename
from flask.ext import restful
import notifier
from core import app, DB, FS, redis_client
api_request_parser = reqparse.RequestParser()
api_request_parser.add_argument('api_key', type=str, required=True, help="Missing api key")
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config.get('ALLOWED_EXTENSIONS')
def get_cam_by_id(camera_id):
if ObjectId.is_valid(camera_id):
return DB.cams.find_one({"_id": ObjectId(camera_id)})
else:
return None
def requires_api_key(f):
@wraps(f)
def decorated_function(*args, **kwargs):
api_args = api_request_parser.parse_args()
input_api_key = api_args['api_key']
if not input_api_key:
restful.abort(401)
else:
valid_cam = DB.cams.find_one({"api_key": input_api_key})
if not valid_cam:
restful.abort(401, description="Valid api key is required")
else:
valid_cam['last_access'] = datetime.datetime.now()
DB.cams.save(valid_cam)
return f(*args, **kwargs)
return decorated_function
class CameraStateController(restful.Resource):
@requires_api_key
def get(self):
args = api_request_parser.parse_args()
valid_cam = DB.cams.find_one({"api_key": args['api_key']})
if valid_cam:
return {'result': 'OK', 'camera_state': valid_cam.get('active')}
return {'result': 'NOK'}, 401
@staticmethod
@jwt_required()
def post(camera_id):
if camera_id:
camera = get_cam_by_id(camera_id)
if camera:
CameraStateController.change_camera_state(camera, not camera.get('active'), current_user.email)
return jsonify(result="OK", new_state=camera.get('active'), id=camera_id)
else:
return jsonify(result="NOK", error="Invalid camera id")
return jsonify(result="NOK")
@staticmethod
def change_camera_state(camera, new_state, user):
camera['active'] = new_state
DB.cams.save(camera)
DB.cams.history.insert({
'action': 'change_state',
'camera': camera.get('name'),
'when': datetime.datetime.now(),
'new_state': camera.get('active'),
'user': user
})
notifier.notify_camera_state_changed(camera)
file_upload_parser = api_request_parser.copy()
file_upload_parser.add_argument('file', type=werkzeug.datastructures.FileStorage, location='files',
required=True)
file_upload_parser.add_argument('date', type=str)
file_upload_parser.add_argument('event', type=str)
class UploadImage(restful.Resource):
@staticmethod
@requires_api_key
def post():
args = file_upload_parser.parse_args()
request_cam = DB.cams.find_one({"api_key": args['api_key']})
in_image_file = args['file']
if in_image_file and allowed_file(in_image_file.filename):
filename = secure_filename(in_image_file.filename)
content_type = in_image_file.content_type \
if in_image_file.content_type else mimetypes.guess_type(in_image_file.filename)[0]
oid = FS.put(in_image_file, content_type=content_type,
filename=filename)
DB.images.save({
"image_id": str(oid),
"date_saved": datetime.datetime.now(),
"date_taken": args.get('date') if 'date' in args else datetime.datetime.now(),
"camera": request_cam.get('name'),
})
notifier.notify_new_image(request_cam, url_for('serve_gridfs_file', oid=str(oid), _external=True))
redis_client.publish(str(request_cam.get("_id")) + ':stream', oid)
return jsonify(status="OK", oid=str(oid), camera_state=request_cam.get('active'))
return jsonify(status="NOK", error="not allowed file")
class CameraController(restful.Resource):
@staticmethod
@jwt_required()
def delete(camera_id):
if camera_id:
cam_by_id = get_cam_by_id(camera_id)
if cam_by_id:
DB.cams.remove({"_id": ObjectId(camera_id)})
DB.cams.history.insert({
'action': 'remove',
'camera': cam_by_id.get('name'),
'when': datetime.datetime.now(),
'user': current_user.email
})
return jsonify(result="OK")
return jsonify(result="NOK")
class StreamController(restful.Resource):
@staticmethod
def get_camera_frame(camera_id):
pubsub = redis_client.get_pubsub(camera_id + ':stream')
if pubsub:
for message in pubsub.listen():
app.logger.debug("Got this %s, data", message)
if ObjectId.is_valid(message.get('data')):
image_file = FS.get(ObjectId(message.get('data')))
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + image_file.read() + b'\r\n')
else:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n\r\n')
@staticmethod
def get(camera_id):
if camera_id and get_cam_by_id(camera_id):
return Response(StreamController.get_camera_frame(camera_id),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
abort(404)
class StreamingController(restful.Resource):
@staticmethod
@jwt_required()
def get():
cameras = []
for camera in DB.cams.find():
cameras.append({
"id": str(camera.get("_id")),
"name": camera.get('name'),
"active": camera.get('active'),
})
return jsonify(result="OK", cameras=cameras)
class CamerasController(restful.Resource):
def __init__(self):
self.register_parser = reqparse.RequestParser()
self.register_parser.add_argument('cam_name', type=str, required=True, help='Provide camera name')
@staticmethod
@jwt_required()
def get():
cameras = []
for camera in DB.cams.find():
cameras.append({
"id": str(camera.get("_id")),
"name": camera.get('name'),
"api_key": camera.get('api_key'),
"active": camera.get('active'),
"last_access": camera.get('last_access'),
"registered": camera.get('registered'),
})
for camera in cameras:
# get the last history entry of the camera
last_events = DB.cams.history.find({"camera": camera.get('name')}) \
.sort("when", pymongo.DESCENDING) \
.limit(5)
if last_events:
camera['last_events'] = list()
for last_event in last_events:
camera['last_events'].append({
"when": last_event.get("when"),
"user": last_event.get("user"),
"action": last_event.get("action"),
"new_state": last_event.get("new_state")
})
last_image = DB.images.find_one({"camera": camera.get('name')}, sort=[("date_saved", pymongo.DESCENDING)])
if last_image:
camera["last_image_date"] = last_image.get("date_saved")
return jsonify(result="OK", cameras=cameras)
@jwt_required()
def put(self):
args = self.register_parser.parse_args()
input_cam_name = args.get('cam_name')
existing = DB.cams.find_one({"name": input_cam_name})
if existing:
return {'error': "There is already a camera with this name"}, 400
else:
new_cam_api_key = hashlib.sha224(str(random.getrandbits(256))).hexdigest()
DB.cams.insert({
"name": input_cam_name,
"api_key": new_cam_api_key,
"registered": datetime.datetime.now(),
"active": True
})
return {'status': "OK", 'api_key': new_cam_api_key}
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorProcess}.
@var properEnv: A copy of L{os.environ} which has L{bytes} keys/values on POSIX
platforms and native L{str} keys/values on Windows.
"""
from __future__ import division, absolute_import, print_function
import io
import os
import signal
import sys
import threading
import twisted
import subprocess
from twisted.trial.unittest import TestCase
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.log import msg, err
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath, _asFilesystemBytes
from twisted.python.compat import (networkString, range, items,
bytesEnviron, unicode)
from twisted.internet import utils
from twisted.internet.interfaces import IReactorProcess, IProcessTransport
from twisted.internet.defer import Deferred, succeed
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.error import ProcessDone, ProcessTerminated
# Get the current Python executable as a bytestring.
pyExe = FilePath(sys.executable)._asBytesPath()
twistedRoot = FilePath(twisted.__file__).parent().parent()
_uidgidSkip = None
if platform.isWindows():
resource = None
process = None
_uidgidSkip = "Cannot change UID/GID on Windows"
properEnv = dict(os.environ)
properEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
else:
import resource
from twisted.internet import process
if os.getuid() != 0:
_uidgidSkip = "Cannot change UID/GID except as root"
properEnv = bytesEnviron()
properEnv[b"PYTHONPATH"] = os.pathsep.join(sys.path).encode(
sys.getfilesystemencoding())
def onlyOnPOSIX(testMethod):
"""
Only run this test on POSIX platforms.
@param testMethod: A test function, being decorated.
@return: the C{testMethod} argument.
"""
if resource is None:
testMethod.skip = "Test only applies to POSIX platforms."
return testMethod
class _ShutdownCallbackProcessProtocol(ProcessProtocol):
"""
An L{IProcessProtocol} which fires a Deferred when the process it is
associated with ends.
@ivar received: A C{dict} mapping file descriptors to lists of bytes
received from the child process on those file descriptors.
"""
def __init__(self, whenFinished):
self.whenFinished = whenFinished
self.received = {}
def childDataReceived(self, fd, bytes):
self.received.setdefault(fd, []).append(bytes)
def processEnded(self, reason):
self.whenFinished.callback(None)
class ProcessTestsBuilderBase(ReactorBuilder):
"""
Base class for L{IReactorProcess} tests which defines some tests which
can be applied to PTY or non-PTY uses of C{spawnProcess}.
Subclasses are expected to set the C{usePTY} attribute to C{True} or
C{False}.
"""
requiredInterfaces = [IReactorProcess]
def test_processTransportInterface(self):
"""
L{IReactorProcess.spawnProcess} connects the protocol passed to it
to a transport which provides L{IProcessTransport}.
"""
ended = Deferred()
protocol = _ShutdownCallbackProcessProtocol(ended)
reactor = self.buildReactor()
transport = reactor.spawnProcess(
protocol, pyExe, [pyExe, b"-c", b""],
usePTY=self.usePTY)
# The transport is available synchronously, so we can check it right
# away (unlike many transport-based tests). This is convenient even
# though it's probably not how the spawnProcess interface should really
# work.
# We're not using verifyObject here because part of
# IProcessTransport is a lie - there are no getHost or getPeer
# methods. See #1124.
self.assertTrue(IProcessTransport.providedBy(transport))
# Let the process run and exit so we don't leave a zombie around.
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
def _writeTest(self, write):
"""
Helper for testing L{IProcessTransport} write functionality. This
method spawns a child process and gives C{write} a chance to write some
bytes to it. It then verifies that the bytes were actually written to
it (by relying on the child process to echo them back).
@param write: A two-argument callable. This is invoked with a process
transport and some bytes to write to it.
"""
reactor = self.buildReactor()
ended = Deferred()
protocol = _ShutdownCallbackProcessProtocol(ended)
bytesToSend = b"hello, world" + networkString(os.linesep)
program = (
b"import sys\n"
b"sys.stdout.write(sys.stdin.readline())\n"
)
def startup():
transport = reactor.spawnProcess(
protocol, pyExe, [pyExe, b"-c", program])
try:
write(transport, bytesToSend)
except:
err(None, "Unhandled exception while writing")
transport.signalProcess('KILL')
reactor.callWhenRunning(startup)
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
self.assertEqual(bytesToSend, b"".join(protocol.received[1]))
def test_write(self):
"""
L{IProcessTransport.write} writes the specified C{bytes} to the standard
input of the child process.
"""
def write(transport, bytesToSend):
transport.write(bytesToSend)
self._writeTest(write)
def test_writeSequence(self):
"""
L{IProcessTransport.writeSequence} writes the specified C{list} of
C{bytes} to the standard input of the child process.
"""
def write(transport, bytesToSend):
transport.writeSequence([bytesToSend])
self._writeTest(write)
def test_writeToChild(self):
"""
L{IProcessTransport.writeToChild} writes the specified C{bytes} to the
specified file descriptor of the child process.
"""
def write(transport, bytesToSend):
transport.writeToChild(0, bytesToSend)
self._writeTest(write)
def test_writeToChildBadFileDescriptor(self):
"""
L{IProcessTransport.writeToChild} raises L{KeyError} if passed a file
descriptor which is was not set up by L{IReactorProcess.spawnProcess}.
"""
def write(transport, bytesToSend):
try:
self.assertRaises(KeyError, transport.writeToChild, 13, bytesToSend)
finally:
# Just get the process to exit so the test can complete
transport.write(bytesToSend)
self._writeTest(write)
def test_spawnProcessEarlyIsReaped(self):
"""
If, before the reactor is started with L{IReactorCore.run}, a
process is started with L{IReactorProcess.spawnProcess} and
terminates, the process is reaped once the reactor is started.
"""
reactor = self.buildReactor()
# Create the process with no shared file descriptors, so that there
# are no other events for the reactor to notice and "cheat" with.
# We want to be sure it's really dealing with the process exiting,
# not some associated event.
if self.usePTY:
childFDs = None
else:
childFDs = {}
# Arrange to notice the SIGCHLD.
signaled = threading.Event()
def handler(*args):
signaled.set()
signal.signal(signal.SIGCHLD, handler)
# Start a process - before starting the reactor!
ended = Deferred()
reactor.spawnProcess(
_ShutdownCallbackProcessProtocol(ended), pyExe,
[pyExe, b"-c", b""], usePTY=self.usePTY, childFDs=childFDs)
# Wait for the SIGCHLD (which might have been delivered before we got
# here, but that's okay because the signal handler was installed above,
# before we could have gotten it).
signaled.wait(120)
if not signaled.isSet():
self.fail("Timed out waiting for child process to exit.")
# Capture the processEnded callback.
result = []
ended.addCallback(result.append)
if result:
# The synchronous path through spawnProcess / Process.__init__ /
# registerReapProcessHandler was encountered. There's no reason to
# start the reactor, because everything is done already.
return
# Otherwise, though, start the reactor so it can tell us the process
# exited.
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
# Make sure the reactor stopped because the Deferred fired.
self.assertTrue(result)
if getattr(signal, 'SIGCHLD', None) is None:
test_spawnProcessEarlyIsReaped.skip = (
"Platform lacks SIGCHLD, early-spawnProcess test can't work.")
def test_processExitedWithSignal(self):
"""
The C{reason} argument passed to L{IProcessProtocol.processExited} is a
L{ProcessTerminated} instance if the child process exits with a signal.
"""
sigName = 'TERM'
sigNum = getattr(signal, 'SIG' + sigName)
exited = Deferred()
source = (
b"import sys\n"
# Talk so the parent process knows the process is running. This is
# necessary because ProcessProtocol.makeConnection may be called
# before this process is exec'd. It would be unfortunate if we
# SIGTERM'd the Twisted process while it was on its way to doing
# the exec.
b"sys.stdout.write('x')\n"
b"sys.stdout.flush()\n"
b"sys.stdin.read()\n")
class Exiter(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
self.transport.signalProcess(sigName)
def childConnectionLost(self, fd):
msg('childConnectionLost(%d)' % (fd,))
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
# Protect the Deferred from the failure so that it follows
# the callback chain. This doesn't use the errback chain
# because it wants to make sure reason is a Failure. An
# Exception would also make an errback-based test pass, and
# that would be wrong.
exited.callback([reason])
def processEnded(self, reason):
msg('processEnded(%r)' % (reason,))
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Exiter(), pyExe,
[pyExe, b"-c", source], usePTY=self.usePTY)
def cbExited(args):
failure, = args
# Trapping implicitly verifies that it's a Failure (rather than
# an exception) and explicitly makes sure it's the right type.
failure.trap(ProcessTerminated)
err = failure.value
if platform.isWindows():
# Windows can't really /have/ signals, so it certainly can't
# report them as the reason for termination. Maybe there's
# something better we could be doing here, anyway? Hard to
# say. Anyway, this inconsistency between different platforms
# is extremely unfortunate and I would remove it if I
# could. -exarkun
self.assertIsNone(err.signal)
self.assertEqual(err.exitCode, 1)
else:
self.assertEqual(err.signal, sigNum)
self.assertIsNone(err.exitCode)
exited.addCallback(cbExited)
exited.addErrback(err)
exited.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
def test_systemCallUninterruptedByChildExit(self):
"""
If a child process exits while a system call is in progress, the system
call should not be interfered with. In particular, it should not fail
with EINTR.
Older versions of Twisted installed a SIGCHLD handler on POSIX without
using the feature exposed by the SA_RESTART flag to sigaction(2). The
most noticeable problem this caused was for blocking reads and writes to
sometimes fail with EINTR.
"""
reactor = self.buildReactor()
result = []
def f():
try:
exe = pyExe.decode(sys.getfilesystemencoding())
subprocess.Popen([exe, "-c", "import time; time.sleep(0.1)"])
f2 = subprocess.Popen([exe, "-c",
("import time; time.sleep(0.5);"
"print(\'Foo\')")],
stdout=subprocess.PIPE)
# The read call below will blow up with an EINTR from the
# SIGCHLD from the first process exiting if we install a
# SIGCHLD handler without SA_RESTART. (which we used to do)
with f2.stdout:
result.append(f2.stdout.read())
finally:
reactor.stop()
reactor.callWhenRunning(f)
self.runReactor(reactor)
self.assertEqual(result, [b"Foo" + os.linesep.encode('ascii')])
@onlyOnPOSIX
def test_openFileDescriptors(self):
"""
Processes spawned with spawnProcess() close all extraneous file
descriptors in the parent. They do have a stdin, stdout, and stderr
open.
"""
# To test this, we are going to open a file descriptor in the parent
# that is unlikely to be opened in the child, then verify that it's not
# open in the child.
source = networkString("""
import sys
sys.path.insert(0, '{0}')
from twisted.internet import process
sys.stdout.write(repr(process._listOpenFDs()))
sys.stdout.flush()""".format(twistedRoot.path))
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
# The call to "os.listdir()" (in _listOpenFDs's implementation) opens a
# file descriptor (with "opendir"), which shows up in _listOpenFDs's
# result. And speaking of "random" file descriptors, the code required
# for _listOpenFDs itself imports logger, which imports random, which
# (depending on your Python version) might leave /dev/urandom open.
# More generally though, even if we were to use an extremely minimal C
# program, the operating system would be within its rights to open file
# descriptors we might not know about in the C library's
# initialization; things like debuggers, profilers, or nsswitch plugins
# might open some and this test should pass in those environments.
# Although some of these file descriptors aren't predictable, we should
# at least be able to select a very large file descriptor which is very
# unlikely to be opened automatically in the subprocess. (Apply a
# fudge factor to avoid hard-coding something too near a limit
# condition like the maximum possible file descriptor, which a library
# might at least hypothetically select.)
fudgeFactor = 17
unlikelyFD = (resource.getrlimit(resource.RLIMIT_NOFILE)[0]
- fudgeFactor)
os.dup2(w, unlikelyFD)
self.addCleanup(os.close, unlikelyFD)
output = io.BytesIO()
class GatheringProtocol(ProcessProtocol):
outReceived = output.write
def processEnded(self, reason):
reactor.stop()
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, GatheringProtocol(), pyExe,
[pyExe, b"-Wignore", b"-c", source], usePTY=self.usePTY)
self.runReactor(reactor)
reportedChildFDs = set(eval(output.getvalue()))
stdFDs = [0, 1, 2]
# Unfortunately this assertion is still not *entirely* deterministic,
# since hypothetically, any library could open any file descriptor at
# any time. See comment above.
self.assertEqual(
reportedChildFDs.intersection(set(stdFDs + [unlikelyFD])),
set(stdFDs)
)
@onlyOnPOSIX
def test_errorDuringExec(self):
"""
When L{os.execvpe} raises an exception, it will format that exception
on stderr as UTF-8, regardless of system encoding information.
"""
def execvpe(*args, **kw):
# Ensure that real traceback formatting has some non-ASCII in it,
# by forcing the filename of the last frame to contain non-ASCII.
filename = u"<\N{SNOWMAN}>"
if not isinstance(filename, str):
filename = filename.encode("utf-8")
codeobj = compile("1/0", filename, "single")
eval(codeobj)
self.patch(os, "execvpe", execvpe)
self.patch(sys, "getfilesystemencoding", lambda: "ascii")
reactor = self.buildReactor()
output = io.BytesIO()
@reactor.callWhenRunning
def whenRunning():
class TracebackCatcher(ProcessProtocol, object):
errReceived = output.write
def processEnded(self, reason):
reactor.stop()
reactor.spawnProcess(TracebackCatcher(), pyExe,
[pyExe, b"-c", b""])
self.runReactor(reactor, timeout=30)
self.assertIn(u"\N{SNOWMAN}".encode("utf-8"), output.getvalue())
def test_timelyProcessExited(self):
"""
If a spawned process exits, C{processExited} will be called in a
timely manner.
"""
reactor = self.buildReactor()
class ExitingProtocol(ProcessProtocol):
exited = False
def processExited(protoSelf, reason):
protoSelf.exited = True
reactor.stop()
self.assertEqual(reason.value.exitCode, 0)
protocol = ExitingProtocol()
reactor.callWhenRunning(
reactor.spawnProcess, protocol, pyExe,
[pyExe, b"-c", b"raise SystemExit(0)"],
usePTY=self.usePTY)
# This will timeout if processExited isn't called:
self.runReactor(reactor, timeout=30)
self.assertTrue(protocol.exited)
def _changeIDTest(self, which):
"""
Launch a child process, using either the C{uid} or C{gid} argument to
L{IReactorProcess.spawnProcess} to change either its UID or GID to a
different value. If the child process reports this hasn't happened,
raise an exception to fail the test.
@param which: Either C{b"uid"} or C{b"gid"}.
"""
program = [
"import os",
"raise SystemExit(os.get%s() != 1)" % (which,)]
container = []
class CaptureExitStatus(ProcessProtocol):
def processEnded(self, reason):
container.append(reason)
reactor.stop()
reactor = self.buildReactor()
protocol = CaptureExitStatus()
reactor.callWhenRunning(
reactor.spawnProcess, protocol, pyExe,
[pyExe, "-c", "\n".join(program)],
**{which: 1})
self.runReactor(reactor)
self.assertEqual(0, container[0].value.exitCode)
def test_changeUID(self):
"""
If a value is passed for L{IReactorProcess.spawnProcess}'s C{uid}, the
child process is run with that UID.
"""
self._changeIDTest("uid")
if _uidgidSkip is not None:
test_changeUID.skip = _uidgidSkip
def test_changeGID(self):
"""
If a value is passed for L{IReactorProcess.spawnProcess}'s C{gid}, the
child process is run with that GID.
"""
self._changeIDTest("gid")
if _uidgidSkip is not None:
test_changeGID.skip = _uidgidSkip
def test_processExitedRaises(self):
"""
If L{IProcessProtocol.processExited} raises an exception, it is logged.
"""
# Ideally we wouldn't need to poke the process module; see
# https://twistedmatrix.com/trac/ticket/6889
reactor = self.buildReactor()
class TestException(Exception):
pass
class Protocol(ProcessProtocol):
def processExited(self, reason):
reactor.stop()
raise TestException("processedExited raised")
protocol = Protocol()
transport = reactor.spawnProcess(
protocol, pyExe, [pyExe, b"-c", b""],
usePTY=self.usePTY)
self.runReactor(reactor)
# Manually clean-up broken process handler.
# Only required if the test fails on systems that support
# the process module.
if process is not None:
for pid, handler in items(process.reapProcessHandlers):
if handler is not transport:
continue
process.unregisterReapProcessHandler(pid, handler)
self.fail("After processExited raised, transport was left in"
" reapProcessHandlers")
self.assertEqual(1, len(self.flushLoggedErrors(TestException)))
class ProcessTestsBuilder(ProcessTestsBuilderBase):
"""
Builder defining tests relating to L{IReactorProcess} for child processes
which do not have a PTY.
"""
usePTY = False
keepStdioOpenProgram = b'twisted.internet.test.process_helper'
if platform.isWindows():
keepStdioOpenArg = b"windows"
else:
# Just a value that doesn't equal "windows"
keepStdioOpenArg = b""
# Define this test here because PTY-using processes only have stdin and
# stdout and the test would need to be different for that to work.
def test_childConnectionLost(self):
"""
L{IProcessProtocol.childConnectionLost} is called each time a file
descriptor associated with a child process is closed.
"""
connected = Deferred()
lost = {0: Deferred(), 1: Deferred(), 2: Deferred()}
class Closer(ProcessProtocol):
def makeConnection(self, transport):
connected.callback(transport)
def childConnectionLost(self, childFD):
lost[childFD].callback(None)
target = b"twisted.internet.test.process_loseconnection"
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Closer(), pyExe,
[pyExe, b"-m", target], env=properEnv, usePTY=self.usePTY)
def cbConnected(transport):
transport.write(b'2\n')
return lost[2].addCallback(lambda ign: transport)
connected.addCallback(cbConnected)
def lostSecond(transport):
transport.write(b'1\n')
return lost[1].addCallback(lambda ign: transport)
connected.addCallback(lostSecond)
def lostFirst(transport):
transport.write(b'\n')
connected.addCallback(lostFirst)
connected.addErrback(err)
def cbEnded(ignored):
reactor.stop()
connected.addCallback(cbEnded)
self.runReactor(reactor)
# This test is here because PTYProcess never delivers childConnectionLost.
def test_processEnded(self):
"""
L{IProcessProtocol.processEnded} is called after the child process
exits and L{IProcessProtocol.childConnectionLost} is called for each of
its file descriptors.
"""
ended = Deferred()
lost = []
class Ender(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
self.transport.loseConnection()
def childConnectionLost(self, childFD):
msg('childConnectionLost(%d)' % (childFD,))
lost.append(childFD)
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
def processEnded(self, reason):
msg('processEnded(%r)' % (reason,))
ended.callback([reason])
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Ender(), pyExe,
[pyExe, b"-m", self.keepStdioOpenProgram, b"child",
self.keepStdioOpenArg],
env=properEnv, usePTY=self.usePTY)
def cbEnded(args):
failure, = args
failure.trap(ProcessDone)
self.assertEqual(set(lost), set([0, 1, 2]))
ended.addCallback(cbEnded)
ended.addErrback(err)
ended.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
# This test is here because PTYProcess.loseConnection does not actually
# close the file descriptors to the child process. This test needs to be
# written fairly differently for PTYProcess.
def test_processExited(self):
"""
L{IProcessProtocol.processExited} is called when the child process
exits, even if file descriptors associated with the child are still
open.
"""
exited = Deferred()
allLost = Deferred()
lost = []
class Waiter(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
def childConnectionLost(self, childFD):
msg('childConnectionLost(%d)' % (childFD,))
lost.append(childFD)
if len(lost) == 3:
allLost.callback(None)
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
# See test_processExitedWithSignal
exited.callback([reason])
self.transport.loseConnection()
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Waiter(), pyExe,
[pyExe, b"-u", b"-m", self.keepStdioOpenProgram, b"child",
self.keepStdioOpenArg],
env=properEnv, usePTY=self.usePTY)
def cbExited(args):
failure, = args
failure.trap(ProcessDone)
msg('cbExited; lost = %s' % (lost,))
self.assertEqual(lost, [])
return allLost
exited.addCallback(cbExited)
def cbAllLost(ignored):
self.assertEqual(set(lost), set([0, 1, 2]))
exited.addCallback(cbAllLost)
exited.addErrback(err)
exited.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
def makeSourceFile(self, sourceLines):
"""
Write the given list of lines to a text file and return the absolute
path to it.
"""
script = _asFilesystemBytes(self.mktemp())
with open(script, 'wt') as scriptFile:
scriptFile.write(os.linesep.join(sourceLines) + os.linesep)
return os.path.abspath(script)
def test_shebang(self):
"""
Spawning a process with an executable which is a script starting
with an interpreter definition line (#!) uses that interpreter to
evaluate the script.
"""
shebangOutput = b'this is the shebang output'
scriptFile = self.makeSourceFile([
"#!%s" % (pyExe.decode('ascii'),),
"import sys",
"sys.stdout.write('%s')" % (shebangOutput.decode('ascii'),),
"sys.stdout.flush()"])
os.chmod(scriptFile, 0o700)
reactor = self.buildReactor()
def cbProcessExited(args):
out, err, code = args
msg("cbProcessExited((%r, %r, %d))" % (out, err, code))
self.assertEqual(out, shebangOutput)
self.assertEqual(err, b"")
self.assertEqual(code, 0)
def shutdown(passthrough):
reactor.stop()
return passthrough
def start():
d = utils.getProcessOutputAndValue(scriptFile, reactor=reactor)
d.addBoth(shutdown)
d.addCallback(cbProcessExited)
d.addErrback(err)
reactor.callWhenRunning(start)
self.runReactor(reactor)
def test_processCommandLineArguments(self):
"""
Arguments given to spawnProcess are passed to the child process as
originally intended.
"""
us = b"twisted.internet.test.process_cli"
args = [b'hello', b'"', b' \t|<>^&', br'"\\"hello\\"', br'"foo\ bar baz\""']
# Ensure that all non-NUL characters can be passed too.
allChars = "".join(map(chr, range(1, 255)))
if isinstance(allChars, unicode):
allChars.encode("utf-8")
reactor = self.buildReactor()
def processFinished(finishedArgs):
output, err, code = finishedArgs
output = output.split(b'\0')
# Drop the trailing \0.
output.pop()
self.assertEqual(args, output)
def shutdown(result):
reactor.stop()
return result
def spawnChild():
d = succeed(None)
d.addCallback(lambda dummy: utils.getProcessOutputAndValue(
pyExe, [b"-m", us] + args, env=properEnv,
reactor=reactor))
d.addCallback(processFinished)
d.addBoth(shutdown)
reactor.callWhenRunning(spawnChild)
self.runReactor(reactor)
globals().update(ProcessTestsBuilder.makeTestCaseClasses())
class PTYProcessTestsBuilder(ProcessTestsBuilderBase):
"""
Builder defining tests relating to L{IReactorProcess} for child processes
which have a PTY.
"""
usePTY = True
if platform.isWindows():
skip = "PTYs are not supported on Windows."
elif platform.isMacOSX():
skip = "PTYs are flaky from a Darwin bug. See #8840."
skippedReactors = {
"twisted.internet.pollreactor.PollReactor":
"OS X's poll() does not support PTYs"}
globals().update(PTYProcessTestsBuilder.makeTestCaseClasses())
class PotentialZombieWarningTests(TestCase):
"""
Tests for L{twisted.internet.error.PotentialZombieWarning}.
"""
def test_deprecated(self):
"""
Accessing L{PotentialZombieWarning} via the
I{PotentialZombieWarning} attribute of L{twisted.internet.error}
results in a deprecation warning being emitted.
"""
from twisted.internet import error
error.PotentialZombieWarning
warnings = self.flushWarnings([self.test_deprecated])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"twisted.internet.error.PotentialZombieWarning was deprecated in "
"Twisted 10.0.0: There is no longer any potential for zombie "
"process.")
self.assertEqual(len(warnings), 1)
class ProcessIsUnimportableOnUnsupportedPlatormsTests(TestCase):
"""
Tests to ensure that L{twisted.internet.process} is unimportable on
platforms where it does not work (namely Windows).
"""
def test_unimportableOnWindows(self):
"""
L{twisted.internet.process} is unimportable on Windows.
"""
with self.assertRaises(ImportError):
import twisted.internet.process
twisted.internet.process # shh pyflakes
if not platform.isWindows():
test_unimportableOnWindows.skip = "Only relevant on Windows."
|
|
"""
plugins/Paypal.py
Author: Trey Stout
Date Added: Mon Jul 24 15:29:48 CDT 2006
New interface to the same old shit...
"""
## STD LIBS
from md5 import md5
from datetime import date, datetime
from xml.dom import minidom
from pprint import pprint, pformat
from math import floor
import time
## OUR LIBS
from AZTKAPI import AZTKAPI
from decorators import stack
from PaymentPlugin import PaymentPlugin
import errors, aztk_config, validation
## 3RD PARTY LIBS
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet import ssl
from twisted.web import client
import SOAPpy
class Paypal(AZTKAPI, PaymentPlugin):
enable_node = True
def start(self):
self.log.info("start called on paypal plugin")
if aztk_config.setup.get('site', 'environment') in ['sandbox', 'development', 'staging']:
self.log.info("using sandbox info for paypal")
self.host = self._cfg_sandbox_host
self.port = self._cfg_sandbox_port
self.url = self._cfg_sandbox_url
self.api_username = self._cfg_sandbox_username
self.api_password = self._cfg_sandbox_password
self.private_ssl_key = self._cfg_sandbox_private_key_file
self.public_ssl_key = self._cfg_sandbox_public_key_file
else:
self.log.info("using production info for paypal")
self.host = self._cfg_production_host
self.port = self._cfg_production_port
self.url = self._cfg_production_url
self.api_username = self._cfg_production_username
self.api_password = self._cfg_production_password
self.private_ssl_key = self._cfg_production_private_key_file
self.public_ssl_key = self._cfg_production_public_key_file
@stack
def random_txn_id(self):
from random import shuffle
import string
chunks = list(string.ascii_uppercase+string.digits)
shuffle(chunks)
return "FAKE_%s" % ''.join(chunks[0:12])
@stack
def _encode_for_shock_and_awe(self, user_info):
new_dict = {}
for key, value in user_info.items():
if isinstance(value, (str, unicode)):
new_dict[key] = value.encode('utf-8')
else:
new_dict[key] = value
return new_dict
@stack
def authorize_payment(self, user_info):
"""
run a payment across to paypal's SOAP API
"""
self.check_data(user_info)
user_info = self._encode_for_shock_and_awe(user_info)
self.log.info("running payment with this data: %s" % pformat(user_info))
if aztk_config.setup.get('site', 'environment') in ['sandbox', 'development', 'staging']:
## we can't use the sandbox unless someone actually logs into the sandbox website on paypal
## so just return a bunch of bogus crap
self.log.info("Returning dummy data")
return (0, {
'transaction_id': self.random_txn_id(),
'username': user_info['username'],
'amount': '29.95',
'avs_code': 'X',
'cvv2_code': 'M',
'processing_node': self.app.host,
})
def handle_result(result):
r = SOAPpy.parseSOAP(result)
if r.DoDirectPaymentResponse.Ack == "Success":
self.log.info(" - ")
self.log.info("Received success from PayPal for %s" % (user_info['username']) )
self.log.info("%s" % result)
payment_info = {
'transaction_id': r.DoDirectPaymentResponse.TransactionID,
'username': user_info['username'],
'amount': r.DoDirectPaymentResponse.Amount,
'avs_code': r.DoDirectPaymentResponse.AVSCode,
'cvv2_code': r.DoDirectPaymentResponse.CVV2Code,
'processing_node': self.app.host,
}
return (0, payment_info)
else:
self.log.info(" - ")
if isinstance(r.DoDirectPaymentResponse.Errors, (list, tuple)):
error_code = r.DoDirectPaymentResponse.Errors[0].ErrorCode
long_message = r.DoDirectPaymentResponse.Errors[0].LongMessage
else:
error_code = r.DoDirectPaymentResponse.Errors.ErrorCode
long_message = r.DoDirectPaymentResponse.Errors.LongMessage
self.log.info("Did NOT receive success from PayPal: %s" % long_message)
self.log.info("INFO: %s, %s" % (user_info['username'], self.app.host))
self.log.info("%s" % result)
return (1, "%s: %s" % (error_code, long_message))
def handle_fail(fail):
self.log.warning("Failed processing with PayPal: %s" % fail.getErrorMessage())
return (1, fail.getErrorMessage())
user_info['api_username'] = self.api_username
user_info['api_password'] = self.api_password
user_info['client_ip'] = '127.0.0.69'
body = self.direct_payment_body % user_info
self.log.info("BODY\n\n%s\n\n" % body)
try:
context = ssl.DefaultOpenSSLContextFactory(self.private_ssl_key, self.public_ssl_key)
factory = client.HTTPClientFactory('https://%s%s' % (self.host, self.url), "POST", str(body), timeout=self._cfg_timeout, agent='Zoto AZTK2')
except Exception, ex:
self.log.warning("Failed to build SSL connection [%s]" % ex)
d = Deferred()
d.callback((1, 'Failed to build SSL connection [%s]' % ex))
return d
d = factory.deferred
d.addCallback(handle_result)
d.addErrback(handle_fail)
self.app.reactor.connectSSL(self.host, self.port, factory, context)
return d
direct_payment_body = """
<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:ns4="urn:ebay:apis:eBLBaseComponents"
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SOAP-ENV:Header>
<RequesterCredentials xmlns="urn:ebay:api:PayPalAPI" SOAP-ENV:actor="http://schemas.xmlsoap.org/soap/actor/next" SOAP-ENV:mustUnderstand="1">
<ns4:Credentials xmlns:ebl="urn:ebay:apis:eBLBaseComponents">
<ns4:Username xsi:type="xsd:string">%(api_username)s</ns4:Username>
<ns4:Password xsi:type="xsd:string">%(api_password)s</ns4:Password>
<ns4:Subject xsi:type="xsd:string"></ns4:Subject>
</ns4:Credentials>
</RequesterCredentials>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<DoDirectPaymentReq xmlns="urn:ebay:api:PayPalAPI">
<DoDirectPaymentRequest>
<ns4:Version xsi:type="xsd:string">2.1</ns4:Version>
<ns4:DoDirectPaymentRequestDetails xmlns="urn:ebay:apis:eBLBaseComponents">
<PaymentAction>Sale</PaymentAction>
<PaymentDetails>
<OrderTotal xsi:type="xsd:string" currencyID="USD">%(cost)s</OrderTotal>
<OrderDescription xsi:type="xsd:string">%(product_title)s</OrderDescription>
<Custom xsi:type="xsd:string">%(username)s</Custom>
</PaymentDetails>
<CreditCard>
<CreditCardType>%(card_type)s</CreditCardType>
<CreditCardNumber xsi:type="xsd:string">%(card_number)s</CreditCardNumber>
<ExpMonth xsi:type="xsd:int">%(card_expire_month)s</ExpMonth>
<ExpYear xsi:type="xsd:int">%(card_expire_year)s</ExpYear>
<CVV2 xsi:type="xsd:string">%(cvv2_code)s</CVV2>
<CardOwner>
<Payer>%(email)s</Payer>
<PayerName>
<FirstName>%(first_name)s</FirstName>
<LastName>%(last_name)s</LastName>
</PayerName>
<Address>
<Name xsi:type="xsd:string">Primary</Name>
<Street1 xsi:type="xsd:string">%(address1)s</Street1>
<Street2 xsi:type="xsd:string">%(address2)s</Street2>
<CityName xsi:type="xsd:string">%(city)s</CityName>
<StateOrProvince xsi:type="xsd:string">%(state)s</StateOrProvince>
<Country xsi:type="xsd:string">%(country)s</Country>
<PostalCode xsi:type="xsd:string">%(zip)s</PostalCode>
</Address>
</CardOwner>
</CreditCard>
<IPAddress xsi:type="xsd:string">%(client_ip)s</IPAddress>
</ns4:DoDirectPaymentRequestDetails>
</DoDirectPaymentRequest>
</DoDirectPaymentReq>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"""
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.engine import clients
from heat.engine import scheduler
from heat.engine.resources import nova_utils
from heat.engine import resource
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
class Server(resource.Resource):
block_mapping_schema = {
'device_name': {
'Type': 'String',
'Required': True,
'Description': _('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda')},
'volume_id': {
'Type': 'String',
'Description': _('The ID of the volume to boot from. Only one of '
'volume_id or snapshot_id should be provided')},
'snapshot_id': {
'Type': 'String',
'Description': _('The ID of the snapshot to create a volume '
'from')},
'volume_size': {
'Type': 'String',
'Description': _('The size of the volume, in GB. It is safe to '
'leave this blank and have the Compute service '
'infer the size')},
'delete_on_termination': {
'Type': 'Boolean',
'Description': _('Indicate whether the volume should be deleted '
'when the server is terminated')}
}
networks_schema = {
'uuid': {
'Type': 'String',
'Description': _('ID of network to create a port on')},
'fixed_ip': {
'Type': 'String',
'Description': _('Fixed IP address to specify for the port '
'created on the requested network')},
'port': {
'Type': 'String',
'Description': _('ID of an existing port to associate with '
'this server')},
}
properties_schema = {
'name': {
'Type': 'String',
'Description': _('Optional server name')},
'image': {
'Type': 'String',
'Description': _('The ID or name of the image to boot with')},
'block_device_mapping': {
'Type': 'List',
'Description': _('Block device mappings for this server'),
'Schema': {
'Type': 'Map',
'Schema': block_mapping_schema
}
},
'flavor': {
'Type': 'String',
'Description': _('The ID or name of the flavor to boot onto'),
'Required': True},
'flavor_update_policy': {
'Type': 'String',
'Description': _('Policy on how to apply a flavor update; either '
'by requesting a server resize or by replacing '
'the entire server'),
'Default': 'RESIZE',
'AllowedValues': ['RESIZE', 'REPLACE']},
'key_name': {
'Type': 'String',
'Description': _('Name of keypair to inject into the server')},
'availability_zone': {
'Type': 'String',
'Description': _('Name of the availability zone for server '
'placement')},
'security_groups': {
'Type': 'List',
'Description': _('List of security group names')},
'networks': {
'Type': 'List',
'Description': _('An ordered list of nics to be '
'added to this server, with information about '
'connected networks, fixed ips, port etc'),
'Schema': {
'Type': 'Map',
'Schema': networks_schema
}
},
'scheduler_hints': {
'Type': 'Map',
'Description': _('Arbitrary key-value pairs specified by the '
'client to help boot a server')},
'metadata': {
'Type': 'Map',
'Description': _('Arbitrary key/value metadata to store for this '
'server. A maximum of five entries is allowed, '
'and both keys and values must be 255 characters '
'or less')},
'user_data': {
'Type': 'String',
'Description': _('User data script to be executed by cloud-init')},
'reservation_id': {
'Type': 'String',
'Description': _('A UUID for the set of servers being requested')
},
'config_drive': {
'Type': 'String',
'Description': _('value for config drive either boolean, or '
'volume-id')
},
# diskConfig translates to API attribute OS-DCF:diskConfig
# hence the camel case instead of underscore to separate the words
'diskConfig': {
'Type': 'String',
'Description': _('Control how the disk is partitioned when the '
'server is created'),
'AllowedValues': ['AUTO', 'MANUAL']}
}
attributes_schema = {
'show': _('A dict of all server details as returned by the API'),
'addresses': _('A dict of all network addresses as returned by '
'the API'),
'networks': _('A dict of assigned network addresses of the form: '
'{"public": [ip1, ip2...], "private": [ip3, ip4]}'),
'first_address': _('Convenience attribute to fetch the first '
'assigned network address, or an '
'empty string if nothing has been assigned '
'at this time. Result may not be predictable '
'if the server has addresses from more than one '
'network.'),
'instance_name': _('AWS compatible instance name'),
'accessIPv4': _('The manually assigned alternative public IPv4 '
'address of the server'),
'accessIPv6': _('The manually assigned alternative public IPv6 '
'address of the server'),
}
update_allowed_keys = ('Metadata', 'Properties')
update_allowed_properties = ('flavor', 'flavor_update_policy')
def __init__(self, name, json_snippet, stack):
super(Server, self).__init__(name, json_snippet, stack)
self.mime_string = None
def get_mime_string(self, userdata):
if not self.mime_string:
self.mime_string = nova_utils.build_userdata(self, userdata)
return self.mime_string
def physical_resource_name(self):
name = self.properties.get('name')
if name:
return name
return super(Server, self).physical_resource_name()
def handle_create(self):
security_groups = self.properties.get('security_groups', [])
userdata = self.properties.get('user_data', '')
flavor = self.properties['flavor']
availability_zone = self.properties['availability_zone']
key_name = self.properties['key_name']
if key_name:
# confirm keypair exists
nova_utils.get_keypair(self.nova(), key_name)
image = self.properties.get('image')
if image:
image = nova_utils.get_image_id(self.nova(), image)
flavor_id = nova_utils.get_flavor_id(self.nova(), flavor)
instance_meta = self.properties.get('metadata')
scheduler_hints = self.properties.get('scheduler_hints')
nics = self._build_nics(self.properties.get('networks'))
block_device_mapping = self._build_block_device_mapping(
self.properties.get('block_device_mapping'))
reservation_id = self.properties.get('reservation_id')
config_drive = self.properties.get('config_drive')
disk_config = self.properties.get('diskConfig')
server = None
try:
server = self.nova().servers.create(
name=self.physical_resource_name(),
image=image,
flavor=flavor_id,
key_name=key_name,
security_groups=security_groups,
userdata=self.get_mime_string(userdata),
meta=instance_meta,
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone,
block_device_mapping=block_device_mapping,
reservation_id=reservation_id,
config_drive=config_drive,
disk_config=disk_config)
finally:
# Avoid a race condition where the thread could be cancelled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
return server
def check_create_complete(self, server):
return self._check_active(server)
def _check_active(self, server):
if server.status != 'ACTIVE':
server.get()
# Some clouds append extra (STATUS) strings to the status
short_server_status = server.status.split('(')[0]
if short_server_status in nova_utils.deferred_server_statuses:
return False
elif server.status == 'ACTIVE':
return True
elif server.status == 'ERROR':
exc = exception.Error(_('Creation of server %s failed.') %
server.name)
raise exc
else:
exc = exception.Error(_('Creation of server %(server)s failed '
'with unknown status: %(status)s') %
dict(server=server.name,
status=server.status))
raise exc
@staticmethod
def _build_block_device_mapping(bdm):
if not bdm:
return None
bdm_dict = {}
for mapping in bdm:
mapping_parts = []
if mapping.get('snapshot_id'):
mapping_parts.append(mapping.get('snapshot_id'))
mapping_parts.append('snap')
else:
mapping_parts.append(mapping.get('volume_id'))
mapping_parts.append('')
if (mapping.get('volume_size') or
mapping.get('delete_on_termination')):
mapping_parts.append(mapping.get('volume_size', '0'))
if mapping.get('delete_on_termination'):
mapping_parts.append(str(mapping.get('delete_on_termination')))
bdm_dict[mapping.get('device_name')] = ':'.join(mapping_parts)
return bdm_dict
@staticmethod
def _build_nics(networks):
if not networks:
return None
nics = []
for net_data in networks:
nic_info = {}
if net_data.get('uuid'):
nic_info['net-id'] = net_data['uuid']
if net_data.get('fixed_ip'):
nic_info['v4-fixed-ip'] = net_data['fixed_ip']
if net_data.get('port'):
nic_info['port-id'] = net_data['port']
nics.append(nic_info)
return nics
def _resolve_attribute(self, name):
if name == 'first_address':
return nova_utils.server_to_ipaddress(
self.nova(), self.resource_id) or ''
server = self.nova().servers.get(self.resource_id)
if name == 'addresses':
return server.addresses
if name == 'networks':
return server.networks
if name == 'instance_name':
return server._info.get('OS-EXT-SRV-ATTR:instance_name')
if name == 'accessIPv4':
return server.accessIPv4
if name == 'accessIPv6':
return server.accessIPv6
if name == 'show':
return server._info
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata = tmpl_diff['Metadata']
if 'flavor' in prop_diff:
flavor_update_policy = (
prop_diff.get('flavor_update_policy') or
self.properties.get('flavor_update_policy'))
if flavor_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
flavor = prop_diff['flavor']
flavor_id = nova_utils.get_flavor_id(self.nova(), flavor)
server = self.nova().servers.get(self.resource_id)
server.resize(flavor_id)
checker = scheduler.TaskRunner(nova_utils.check_resize,
server, flavor)
checker.start()
return checker
def check_update_complete(self, checker):
return checker.step() if checker is not None else True
def metadata_update(self, new_metadata=None):
'''
Refresh the metadata if new_metadata is None
'''
if new_metadata is None:
self.metadata = self.parsed_template('Metadata')
def validate(self):
'''
Validate any of the provided params
'''
super(Server, self).validate()
# check validity of key
key_name = self.properties.get('key_name', None)
if key_name:
nova_utils.get_keypair(self.nova(), key_name)
# either volume_id or snapshot_id needs to be specified, but not both
# for block device mapping.
bdm = self.properties.get('block_device_mapping') or []
bootable_vol = False
for mapping in bdm:
if mapping['device_name'] == 'vda':
bootable_vol = True
if mapping.get('volume_id') and mapping.get('snapshot_id'):
raise exception.ResourcePropertyConflict('volume_id',
'snapshot_id')
if not mapping.get('volume_id') and not mapping.get('snapshot_id'):
msg = _('Either volume_id or snapshot_id must be specified for'
' device mapping %s') % mapping['device_name']
raise exception.StackValidationFailed(message=msg)
# make sure the image exists if specified.
image = self.properties.get('image', None)
if image:
nova_utils.get_image_id(self.nova(), image)
elif not image and not bootable_vol:
msg = _('Neither image nor bootable volume is specified for'
' instance %s') % self.name
raise exception.StackValidationFailed(message=msg)
def handle_delete(self):
'''
Delete a server, blocking until it is disposed by OpenStack
'''
if self.resource_id is None:
return
try:
server = self.nova().servers.get(self.resource_id)
except clients.novaclient.exceptions.NotFound:
pass
else:
delete = scheduler.TaskRunner(nova_utils.delete_server, server)
delete(wait_time=0.2)
self.resource_id = None
def handle_suspend(self):
'''
Suspend a server - note we do not wait for the SUSPENDED state,
this is polled for by check_suspend_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.nova().servers.get(self.resource_id)
except clients.novaclient.exceptions.NotFound:
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
logger.debug('suspending server %s' % self.resource_id)
# We want the server.suspend to happen after the volume
# detachement has finished, so pass both tasks and the server
suspend_runner = scheduler.TaskRunner(server.suspend)
return server, suspend_runner
def check_suspend_complete(self, cookie):
server, suspend_runner = cookie
if not suspend_runner.started():
suspend_runner.start()
if suspend_runner.done():
if server.status == 'SUSPENDED':
return True
server.get()
logger.debug('%s check_suspend_complete status = %s' %
(self.name, server.status))
if server.status in list(nova_utils.deferred_server_statuses +
['ACTIVE']):
return server.status == 'SUSPENDED'
else:
exc = exception.Error(_('Suspend of server %(server)s failed '
'with unknown status: %(status)s') %
dict(server=server.name,
status=server.status))
raise exc
def handle_resume(self):
'''
Resume a server - note we do not wait for the ACTIVE state,
this is polled for by check_resume_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.nova().servers.get(self.resource_id)
except clients.novaclient.exceptions.NotFound:
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
logger.debug('resuming server %s' % self.resource_id)
server.resume()
return server
def check_resume_complete(self, server):
return self._check_active(server)
def resource_mapping():
return {
'OS::Nova::Server': Server,
}
|
|
#! /usr/bin/python
import os
import subprocess
import sys
import time
import json
from shock import Client as ShockClient
from biokbase.userandjobstate.client import UserAndJobState
from biokbase.auth import kb_config
from ConfigParser import ConfigParser
from Bio import SeqIO
# Exception thrown when a command failed
class CommandError(Exception):
pass
# Default URL for production server
DefaultURL = 'https://kbase.us/services/cbd/'
'''
'''
def get_url():
# Just return the default URL when running in IRIS.
if 'KB_RUNNING_IN_IRIS' in os.environ:
return DefaultURL
# Get the URL from the config file or use the default if it is not set.
config = get_config(kb_config)
if 'url' in config:
currentURL = config['url']
else:
currentURL = DefaultURL;
return currentURL
'''
'''
def set_url(newURL):
# Check for special value for the default URL.
if newURL == 'default':
newURL = DefaultURL
# Just return the URL when running in IRIS. There is no place to save it.
if 'KB_RUNNING_IN_IRIS' in os.environ:
return newURL
# Save the new URL to the config file.
config = read_config(kb_config)
config.set('CompressionBasedDistance', 'url', newURL)
with open(kb_config, 'w') as configfile:
config.write(configfile)
return newURL
'''
'''
def read_config(filename=None):
# Use default config file if one is not specified.
if filename == None:
filename = os.path.join(os.environ['KB_TOP'], 'deployment.cfg')
# Read the config file.
config = ConfigParser()
try:
config.read(filename)
except Exception as e:
print "Error while reading config file %s: %s" % (filename, e)
# Make sure there is a CompressionBasedDistance section in the config file.
if not config.has_section('CompressionBasedDistance'):
config.add_section('CompressionBasedDistance')
with open(filename, 'w') as configfile:
config.write(configfile)
return config
'''
'''
def get_config(filename=None):
# Read the config file.
config = read_config(filename)
# Extract the CompressionBasedDistance section from the config file.
sectionConfig = dict()
for nameval in config.items('CompressionBasedDistance'):
sectionConfig[nameval[0]] = nameval[1]
return sectionConfig
'''
'''
def make_job_dir(workDirectory, jobID):
jobDirectory = os.path.join(workDirectory, jobID)
if not os.path.exists(jobDirectory):
os.makedirs(jobDirectory, 0775)
return jobDirectory
''' Extract sequences from a sequence file.
The args dictionary includes the following keys:
sourceFile Path to input sequence file
format Format of input sequence file
destFile Path to output file with raw sequence reads
sequenceLen Minimum length to trim reads to (0 means to not trim)
maxReads Maximum number of reads to include in output file (0 for no maximum)
minReads Minimum number of reads to include in output file (0 for no minimum)
nodeId Node ID of sequence file in Shock (when set file is downloaded from Shock)
shockURL URL of Shock server endpoint
auth Authorization token for user
@param args Dictionary of argument values
@return 0 when successful
'''
def extract_seq(args):
# Download the file from Shock to the working directory.
if args['nodeId'] is not None:
shockClient = ShockClient(args['shockUrl'], args['auth'])
shockClient.download_to_path(args['nodeId'], args['sourceFile'])
# Extract the sequences from the source file.
numReads = 0
with open(args['destFile'], 'w') as f:
if args['sequenceLen'] > 0: # A length to trim to was specified
for seqRecord in SeqIO.parse(args['sourceFile'], args['format']):
seq = str(seqRecord.seq)
if len(seq) < args['sequenceLen']:
continue
if len(seq) > args['sequenceLen']:
seq = seq[:args['sequenceLen']]
f.write(str(seq) + '\n')
numReads += 1
if numReads == args['maxReads']:
break
elif args['maxReads'] > 0:
for seqRecord in SeqIO.parse(args['sourceFile'], args['format']):
f.write(str(seqRecord.seq) + '\n')
numReads += 1
if numReads == args['maxReads']:
break
else:
for seqRecord in SeqIO.parse(args['sourceFile'], args['format']):
f.write(str(seqRecord.seq) + '\n')
# Delete the file if it does not have enough reads.
if args['minReads'] > 0 and numReads < args['minReads']:
os.remove(args['destFile'])
return 0
''' Run a command in a new process.
@param args List of arguments for command where the first element is the path to the command
@raise CommandError: Error running command
@raise OSError: Error starting command
@return 0 when successful
'''
def run_command(args):
err = CommandError()
try:
proc = subprocess.Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
(err.stdout, err.stderr) = proc.communicate()
err.retcode = proc.returncode
if err.retcode < 0:
err.cmd = ' '.join(args)
err.message = "'%s' was terminated by signal %d" %(args[0], -err.retcode)
raise err
else:
if err.retcode > 0:
err.cmd = ' '.join(args)
err.message = "'%s' failed with return code %d" %(args[0], err.retcode)
raise err
except OSError as e:
err.cmd = ' '.join(args)
err.message = "Failed to run '%s': %s" %(args[0], e.strerror)
err.stdout = ''
err.stderr = ''
err.retcode = 255
raise err
return 0
''' Get a timestamp in the format required by user and job state service.
@param deltaSeconds Number of seconds to add to the current time to get a time in the future
@return Timestamp string
'''
def timestamp(deltaSeconds):
# Just UTC timestamps to avoid timezone issues.
now = time.time() + deltaSeconds
ts = time.gmtime(time.time() + deltaSeconds)
return time.strftime('%Y-%m-%dT%H:%M:%S+0000', ts)
''' Convert a job info tuple into a dictionary. '''
def job_info_dict(infoTuple):
info = dict()
info['id'] = infoTuple[0]
info['service'] = infoTuple[1]
info['stage'] = infoTuple[2]
info['started'] = infoTuple[3]
info['status'] = infoTuple[4]
info['last_update'] = infoTuple[5]
info['total_progress'] = infoTuple[6]
info['max_progress'] = infoTuple[7]
info['progress_type'] = infoTuple[8]
info['est_complete'] = infoTuple[9]
info['complete'] = infoTuple[10]
info['error'] = infoTuple[11]
info['description'] = infoTuple[12]
info['results'] = infoTuple[13]
return info
''' Parse the input file for building a matrix.
@param inputPath Path to input file with list of files
@raise IOError: Error opening input file
@return List of paths to sequence files, dictionary of file extensions, number of missing files
'''
def parse_input_file(inputPath):
# Open the input file with the list of files.
try:
infile = open(inputPath, "r")
except IOError as e:
print "Error opening input list file '%s': %s" %(inputPath, e.strerror)
return None, None, 1
# Make sure all of the files in the list of files exist.
numMissingFiles = 0
fileList = list()
extensions = dict()
for line in infile:
line = line.strip('\n\r')
if line and line[0] != '#': # Skip empty lines
fields = line.split('\t')
filename = fields[0]
if os.path.isfile(filename):
fileList.append(filename)
ext = os.path.splitext(filename)[1].split('.')[-1]
extensions[ext] = 1
else:
print "'%s' does not exist" %(filename)
numMissingFiles += 1
infile.close()
if numMissingFiles > 0:
print "%d files are not accessible. Update %s with correct file names" %(numMissingFiles, inputPath)
return fileList, extensions, numMissingFiles
''' Start a job to build a matrix.
@param config Dictionary of configuration variables
@param context Dictionary of current context variables
@param input Dictionary of input variables
@return Job ID
'''
def start_job(config, context, input):
# Create a user and job state client and authenticate as the user.
ujsClient = UserAndJobState(config['userandjobstate_url'], token=context['token'])
# Create a job to track building the distance matrix.
status = 'initializing'
description = 'cbd-buildmatrix with %d files for user %s' %(len(input['node_ids'])+len(input['file_paths']), context['user_id'])
progress = { 'ptype': 'task', 'max': 6 }
job_id = ujsClient.create_and_start_job(context['token'], status, description, progress, timestamp(3600))
# Create working directory for job and build file names.
jobDirectory = make_job_dir(config['work_folder_path'], job_id)
jobDataFilename = os.path.join(jobDirectory, 'jobdata.json')
outputFilename = os.path.join(jobDirectory, 'stdout.log')
errorFilename = os.path.join(jobDirectory, 'stderr.log')
# Save data required for running the job.
# Another option is to create a key of the jobid and store state.
jobData = { 'id': job_id, 'input': input, 'context': context, 'config': config }
json.dump(jobData, open(jobDataFilename, "w"), indent=4)
# Start worker to run the job.
jobScript = os.path.join(os.environ['KB_TOP'], 'bin/cbd-runjob')
cmdline = "nohup %s %s >%s 2>%s &" %(jobScript, jobDataFilename, outputFilename, errorFilename)
status = os.system(cmdline)
return job_id
|
|
"""
Commands that are available from the connect screen.
"""
import re
import time
from collections import defaultdict
from random import getrandbits
from django.conf import settings
from django.contrib.auth import authenticate
from evennia.players.models import PlayerDB
from evennia.objects.models import ObjectDB
from evennia.server.models import ServerConfig
from evennia.comms.models import ChannelDB
from evennia.utils import create, logger, utils
from evennia.commands.cmdhandler import CMD_LOGINSTART
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
# limit symbol import for API
__all__ = ("CmdUnconnectedConnect", "CmdUnconnectedCreate",
"CmdUnconnectedQuit", "CmdUnconnectedLook", "CmdUnconnectedHelp")
MULTISESSION_MODE = settings.MULTISESSION_MODE
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
# Helper function to throttle failed connection attempts.
# This can easily be used to limit player creation too,
# (just supply a different storage dictionary), but this
# would also block dummyrunner, so it's not added as default.
_LATEST_FAILED_LOGINS = defaultdict(list)
def _throttle(session, maxlim=None, timeout=None, storage=_LATEST_FAILED_LOGINS):
"""
This will check the session's address against the
_LATEST_LOGINS dictionary to check they haven't
spammed too many fails recently.
Args:
session (Session): Session failing
maxlim (int): max number of attempts to allow
timeout (int): number of timeout seconds after
max number of tries has been reached.
Returns:
throttles (bool): True if throttling is active,
False otherwise.
Notes:
If maxlim and/or timeout are set, the function will
just do the comparison, not append a new datapoint.
"""
address = session.address
if isinstance(address, tuple):
address = address[0]
now = time.time()
if maxlim and timeout:
# checking mode
latest_fails = storage[address]
if latest_fails and len(latest_fails) >= maxlim:
# too many fails recently
if now - latest_fails[-1] < timeout:
# too soon - timeout in play
return True
else:
# timeout has passed. Reset faillist
storage[address] = []
return False
else:
return False
else:
# store the time of the latest fail
storage[address].append(time.time())
return False
def create_guest_player(session):
"""
Creates a guest player/character for this session, if one is available.
Args:
session (Session): the session which will use the guest player/character.
Returns:
GUEST_ENABLED (boolean), player (Player):
the boolean is whether guest accounts are enabled at all.
the Player which was created from an available guest name.
"""
# check if guests are enabled.
if not settings.GUEST_ENABLED:
return False, None
# Check IP bans.
bans = ServerConfig.objects.conf("server_bans")
if bans and any(tup[2].match(session.address) for tup in bans if tup[2]):
# this is a banned IP!
string = "|rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.|x"
session.msg(string)
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
return True, None
try:
# Find an available guest name.
playername = None
for name in settings.GUEST_LIST:
if not PlayerDB.objects.filter(username__iexact=playername).count():
playername = name
break
if not playername:
session.msg("All guest accounts are in use. Please try again later.")
return True, None
else:
# build a new player with the found guest playername
password = "%016x" % getrandbits(64)
home = ObjectDB.objects.get_id(settings.GUEST_HOME)
permissions = settings.PERMISSION_GUEST_DEFAULT
typeclass = settings.BASE_CHARACTER_TYPECLASS
ptypeclass = settings.BASE_GUEST_TYPECLASS
new_player = _create_player(session, playername, password, permissions, ptypeclass)
if new_player:
_create_character(session, new_player, typeclass, home, permissions)
return True, new_player
except Exception:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't,
# we won't see any errors at all.
session.msg("An error occurred. Please e-mail an admin if the problem persists.")
logger.log_trace()
raise
def create_normal_player(session, name, password):
"""
Creates a player with the given name and password.
Args:
session (Session): the session which is requesting to create a player.
name (str): the name that the player wants to use for login.
password (str): the password desired by this player, for login.
Returns:
player (Player): the player which was created from the name and password.
"""
# check for too many login errors too quick.
if _throttle(session, maxlim=5, timeout=5*60):
# timeout is 5 minutes.
session.msg("|RYou made too many connection attempts. Try again in a few minutes.|n")
return None
# Match account name and check password
player = authenticate(username=name, password=password)
if not player:
# No playername or password match
session.msg("Incorrect login information given.")
# this just updates the throttle
_throttle(session)
# calls player hook for a failed login if possible.
player = PlayerDB.objects.get_player_from_name(name)
if player:
player.at_failed_login(session)
return None
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0] == player.name.lower() for tup in bans)
or
any(tup[2].match(session.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "|rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.|x"
session.msg(string)
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
return None
return player
class CmdUnconnectedConnect(COMMAND_DEFAULT_CLASS):
"""
connect to the game
Usage (at login screen):
connect playername password
connect "player name" "pass word"
Use the create command to first create an account before logging in.
If you have spaces in your name, enclose it in double quotes.
"""
key = "connect"
aliases = ["conn", "con", "co"]
locks = "cmd:all()" # not really needed
arg_regex = r"\s.*?|$"
def func(self):
"""
Uses the Django admin api. Note that unlogged-in commands
have a unique position in that their func() receives
a session object instead of a source_object like all
other types of logged-in commands (this is because
there is no object yet before the player has logged in)
"""
session = self.caller
# check for too many login errors too quick.
if _throttle(session, maxlim=5, timeout=5*60, storage=_LATEST_FAILED_LOGINS):
# timeout is 5 minutes.
session.msg("|RYou made too many connection attempts. Try again in a few minutes.|n")
return
args = self.args
# extract double quote parts
parts = [part.strip() for part in re.split(r"\"", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no double quotes being found, or a guest login
parts = parts[0].split(None, 1)
# Guest login
if len(parts) == 1 and parts[0].lower() == "guest":
enabled, new_player = create_guest_player(session)
if new_player:
session.sessionhandler.login(session, new_player)
if enabled:
return
if len(parts) != 2:
session.msg("\n\r Usage (without <>): connect <name> <password>")
return
name, password = parts
player = create_normal_player(session, name, password)
if player:
session.sessionhandler.login(session, player)
class CmdUnconnectedCreate(COMMAND_DEFAULT_CLASS):
"""
create a new player account
Usage (at login screen):
create <playername> <password>
create "player name" "pass word"
This creates a new player account.
If you have spaces in your name, enclose it in double quotes.
"""
key = "create"
aliases = ["cre", "cr"]
locks = "cmd:all()"
arg_regex = r"\s.*?|$"
def func(self):
"""Do checks and create account"""
session = self.caller
args = self.args.strip()
# extract double quoted parts
parts = [part.strip() for part in re.split(r"\"", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no quotes being found
parts = parts[0].split(None, 1)
if len(parts) != 2:
string = "\n Usage (without <>): create <name> <password>" \
"\nIf <name> or <password> contains spaces, enclose it in double quotes."
session.msg(string)
return
playername, password = parts
# sanity checks
if not re.findall(r"^[\w. \+\_\-']+$", playername) or not (0 < len(playername) <= 30):
# this echoes the restrictions made by django's auth
# module (except not allowing spaces, for convenience of
# logging in).
string = "\n\r Playername can max be 30 characters or fewer. Letters, spaces, digits and -/_/+/' only."
session.msg(string)
return
# strip excessive spaces in playername
playername = re.sub(r"\s+", " ", playername).strip()
if PlayerDB.objects.filter(username__iexact=playername):
# player already exists (we also ignore capitalization here)
session.msg("Sorry, there is already a player with the name '%s'." % playername)
return
# Reserve playernames found in GUEST_LIST
if settings.GUEST_LIST and playername.lower() in (guest.lower() for guest in settings.GUEST_LIST):
string = "\n\r That name is reserved. Please choose another Playername."
session.msg(string)
return
if not re.findall(r"^[\w. @+\-']+$", password) or not (3 < len(password)):
string = "\n\r Password should be longer than 3 characers. Letters, spaces, digits and @/./+/-/_/' only." \
"\nFor best security, make it longer than 8 characters. You can also use a phrase of" \
"\nmany words if you enclose the password in double quotes."
session.msg(string)
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0] == playername.lower() for tup in bans)
or
any(tup[2].match(session.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "|rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.|x"
session.msg(string)
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
return
# everything's ok. Create the new player account.
try:
permissions = settings.PERMISSION_PLAYER_DEFAULT
typeclass = settings.BASE_CHARACTER_TYPECLASS
new_player = _create_player(session, playername, password, permissions)
if new_player:
if MULTISESSION_MODE < 2:
default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)
_create_character(session, new_player, typeclass, default_home, permissions)
# tell the caller everything went well.
string = "A new account '%s' was created. Welcome!"
if " " in playername:
string += "\n\nYou can now log in with the command 'connect \"%s\" <your password>'."
else:
string += "\n\nYou can now log with the command 'connect %s <your password>'."
session.msg(string % (playername, playername))
except Exception:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't,
# we won't see any errors at all.
session.msg("An error occurred. Please e-mail an admin if the problem persists.")
logger.log_trace()
class CmdUnconnectedQuit(COMMAND_DEFAULT_CLASS):
"""
quit when in unlogged-in state
Usage:
quit
We maintain a different version of the quit command
here for unconnected players for the sake of simplicity. The logged in
version is a bit more complicated.
"""
key = "quit"
aliases = ["q", "qu"]
locks = "cmd:all()"
def func(self):
"""Simply close the connection."""
session = self.caller
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
class CmdUnconnectedLook(COMMAND_DEFAULT_CLASS):
"""
look when in unlogged-in state
Usage:
look
This is an unconnected version of the look command for simplicity.
This is called by the server and kicks everything in gear.
All it does is display the connect screen.
"""
key = CMD_LOGINSTART
aliases = ["look", "l"]
locks = "cmd:all()"
def func(self):
"""Show the connect screen."""
connection_screen = utils.random_string_from_module(CONNECTION_SCREEN_MODULE)
if not connection_screen:
connection_screen = "No connection screen found. Please contact an admin."
self.caller.msg(connection_screen)
class CmdUnconnectedHelp(COMMAND_DEFAULT_CLASS):
"""
get help when in unconnected-in state
Usage:
help
This is an unconnected version of the help command,
for simplicity. It shows a pane of info.
"""
key = "help"
aliases = ["h", "?"]
locks = "cmd:all()"
def func(self):
"""Shows help"""
string = \
"""
You are not yet logged into the game. Commands available at this point:
|wcreate|n - create a new account
|wconnect|n - connect with an existing account
|wlook|n - re-show the connection screen
|whelp|n - show this help
|wencoding|n - change the text encoding to match your client
|wscreenreader|n - make the server more suitable for use with screen readers
|wquit|n - abort the connection
First create an account e.g. with |wcreate Anna c67jHL8p|n
(If you have spaces in your name, use double quotes: |wcreate "Anna the Barbarian" c67jHL8p|n
Next you can connect to the game: |wconnect Anna c67jHL8p|n
You can use the |wlook|n command if you want to see the connect screen again.
"""
self.caller.msg(string)
class CmdUnconnectedEncoding(COMMAND_DEFAULT_CLASS):
"""
set which text encoding to use in unconnected-in state
Usage:
encoding/switches [<encoding>]
Switches:
clear - clear your custom encoding
This sets the text encoding for communicating with Evennia. This is mostly
an issue only if you want to use non-ASCII characters (i.e. letters/symbols
not found in English). If you see that your characters look strange (or you
get encoding errors), you should use this command to set the server
encoding to be the same used in your client program.
Common encodings are utf-8 (default), latin-1, ISO-8859-1 etc.
If you don't submit an encoding, the current encoding will be displayed
instead.
"""
key = "encoding"
aliases = ("@encoding", "@encode")
locks = "cmd:all()"
def func(self):
"""
Sets the encoding.
"""
if self.session is None:
return
sync = False
if 'clear' in self.switches:
# remove customization
old_encoding = self.session.protocol_flags.get("ENCODING", None)
if old_encoding:
string = "Your custom text encoding ('%s') was cleared." % old_encoding
else:
string = "No custom encoding was set."
self.session.protocol_flags["ENCODING"] = "utf-8"
sync = True
elif not self.args:
# just list the encodings supported
pencoding = self.session.protocol_flags.get("ENCODING", None)
string = ""
if pencoding:
string += "Default encoding: |g%s|n (change with |w@encoding <encoding>|n)" % pencoding
encodings = settings.ENCODINGS
if encodings:
string += "\nServer's alternative encodings (tested in this order):\n |g%s|n" % ", ".join(encodings)
if not string:
string = "No encodings found."
else:
# change encoding
old_encoding = self.session.protocol_flags.get("ENCODING", None)
encoding = self.args
try:
utils.to_str(utils.to_unicode("test-string"), encoding=encoding)
except LookupError:
string = "|rThe encoding '|w%s|r' is invalid. Keeping the previous encoding '|w%s|r'.|n"\
% (encoding, old_encoding)
else:
self.session.protocol_flags["ENCODING"] = encoding
string = "Your custom text encoding was changed from '|w%s|n' to '|w%s|n'." % (old_encoding, encoding)
sync = True
if sync:
self.session.sessionhandler.session_portal_sync(self.session)
self.caller.msg(string.strip())
class CmdUnconnectedScreenreader(COMMAND_DEFAULT_CLASS):
"""
Activate screenreader mode.
Usage:
screenreader
Used to flip screenreader mode on and off before logging in (when
logged in, use @option screenreader on).
"""
key = "screenreader"
aliases = "@screenreader"
def func(self):
"""Flips screenreader setting."""
new_setting = not self.session.protocol_flags.get("SCREENREADER", False)
self.session.protocol_flags["SCREENREADER"] = new_setting
string = "Screenreader mode turned |w%s|n." % ("on" if new_setting else "off")
self.caller.msg(string)
self.session.sessionhandler.session_portal_sync(self.session)
def _create_player(session, playername, password, permissions, typeclass=None, email=None):
"""
Helper function, creates a player of the specified typeclass.
"""
try:
new_player = create.create_player(playername, email, password, permissions=permissions, typeclass=typeclass)
except Exception as e:
session.msg("There was an error creating the Player:\n%s\n If this problem persists, contact an admin." % e)
logger.log_trace()
return False
# This needs to be set so the engine knows this player is
# logging in for the first time. (so it knows to call the right
# hooks during login later)
new_player.db.FIRST_LOGIN = True
# join the new player to the public channel
pchannel = ChannelDB.objects.get_channel(settings.DEFAULT_CHANNELS[0]["key"])
if not pchannel or not pchannel.connect(new_player):
string = "New player '%s' could not connect to public channel!" % new_player.key
logger.log_err(string)
return new_player
def _create_character(session, new_player, typeclass, home, permissions):
"""
Helper function, creates a character based on a player's name.
This is meant for Guest and MULTISESSION_MODE < 2 situations.
"""
try:
new_character = create.create_object(typeclass, key=new_player.key, home=home, permissions=permissions)
# set playable character list
new_player.db._playable_characters.append(new_character)
# allow only the character itself and the player to puppet this character (and Immortals).
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
(new_character.id, new_player.id))
# If no description is set, set a default description
if not new_character.db.desc:
new_character.db.desc = "This is a Player."
# We need to set this to have @ic auto-connect to this character
new_player.db._last_puppet = new_character
except Exception as e:
session.msg("There was an error creating the Character:\n%s\n If this problem persists, contact an admin." % e)
logger.log_trace()
|
|
from __future__ import absolute_import
import copy
import six
import pytest
import pytz
from sentry.utils.compat.mock import patch
from datetime import timedelta
from six.moves.urllib.parse import urlencode
from selenium.webdriver.common.keys import Keys
from sentry.discover.models import DiscoverSavedQuery
from sentry.testutils import AcceptanceTestCase, SnubaTestCase
from sentry.utils.samples import load_data
from sentry.testutils.helpers.datetime import iso_format, before_now, timestamp_format
FEATURE_NAMES = [
"organizations:discover-basic",
"organizations:discover-query",
"organizations:performance-view",
]
def all_events_query(**kwargs):
options = {
"sort": ["-timestamp"],
"field": ["title", "event.type", "project", "user.display", "timestamp"],
"name": ["All Events"],
}
options.update(kwargs)
return urlencode(options, doseq=True)
def errors_query(**kwargs):
options = {
"sort": ["-title"],
"name": ["Errors"],
"field": ["title", "count(id)", "count_unique(user)", "project"],
"query": ["event.type:error"],
}
options.update(kwargs)
return urlencode(options, doseq=True)
def transactions_query(**kwargs):
options = {
"sort": ["-count"],
"name": ["Transactions"],
"field": ["transaction", "project", "count()"],
"statsPeriod": ["14d"],
"query": ["event.type:transaction"],
}
options.update(kwargs)
return urlencode(options, doseq=True)
def generate_transaction(trace=None, span=None):
start_datetime = before_now(minutes=1, milliseconds=500)
end_datetime = before_now(minutes=1)
event_data = load_data(
"transaction",
timestamp=end_datetime,
start_timestamp=start_datetime,
trace=trace,
span=span,
)
event_data.update({"event_id": "a" * 32})
# generate and build up span tree
reference_span = event_data["spans"][0]
parent_span_id = reference_span["parent_span_id"]
span_tree_blueprint = {
"a": {},
"b": {"bb": {"bbb": {"bbbb": "bbbbb"}}},
"c": {},
"d": {},
"e": {},
}
time_offsets = {
"a": (timedelta(), timedelta(milliseconds=10)),
"b": (timedelta(milliseconds=120), timedelta(milliseconds=250)),
"bb": (timedelta(milliseconds=130), timedelta(milliseconds=10)),
"bbb": (timedelta(milliseconds=140), timedelta(milliseconds=10)),
"bbbb": (timedelta(milliseconds=150), timedelta(milliseconds=10)),
"bbbbb": (timedelta(milliseconds=160), timedelta(milliseconds=90)),
"c": (timedelta(milliseconds=260), timedelta(milliseconds=100)),
"d": (timedelta(milliseconds=375), timedelta(milliseconds=50)),
"e": (timedelta(milliseconds=400), timedelta(milliseconds=100)),
}
def build_span_tree(span_tree, spans, parent_span_id):
for span_id, child in sorted(span_tree.items(), key=lambda item: item[0]):
span = copy.deepcopy(reference_span)
# non-leaf node span
span["parent_span_id"] = parent_span_id.ljust(16, "0")
span["span_id"] = span_id.ljust(16, "0")
(start_delta, span_length) = time_offsets.get(span_id, (timedelta(), timedelta()))
span_start_time = start_datetime + start_delta
span["start_timestamp"] = timestamp_format(span_start_time)
span["timestamp"] = timestamp_format(span_start_time + span_length)
spans.append(span)
if isinstance(child, dict):
spans = build_span_tree(child, spans, span_id)
elif isinstance(child, six.string_types):
parent_span_id = span_id
span_id = child
span = copy.deepcopy(reference_span)
# leaf node span
span["parent_span_id"] = parent_span_id.ljust(16, "0")
span["span_id"] = span_id.ljust(16, "0")
(start_delta, span_length) = time_offsets.get(span_id, (timedelta(), timedelta()))
span_start_time = start_datetime + start_delta
span["start_timestamp"] = timestamp_format(span_start_time)
span["timestamp"] = timestamp_format(span_start_time + span_length)
spans.append(span)
return spans
event_data["spans"] = build_span_tree(span_tree_blueprint, [], parent_span_id)
return event_data
class OrganizationEventsV2Test(AcceptanceTestCase, SnubaTestCase):
def setUp(self):
super(OrganizationEventsV2Test, self).setUp()
self.user = self.create_user("[email protected]", is_superuser=True)
self.org = self.create_organization(name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.login_as(self.user)
self.landing_path = u"/organizations/{}/discover/queries/".format(self.org.slug)
self.result_path = u"/organizations/{}/discover/results/".format(self.org.slug)
def wait_until_loaded(self):
self.browser.wait_until_not(".loading-indicator")
self.browser.wait_until_not('[data-test-id="loading-placeholder"]')
def test_events_default_landing(self):
with self.feature(FEATURE_NAMES):
self.browser.get(self.landing_path)
self.wait_until_loaded()
self.browser.snapshot("events-v2 - default landing")
def test_all_events_query_empty_state(self):
with self.feature(FEATURE_NAMES):
self.browser.get(self.result_path + "?" + all_events_query())
self.wait_until_loaded()
self.browser.snapshot("events-v2 - all events query - empty state")
with self.feature(FEATURE_NAMES):
# expect table to expand to the right when no tags are provided
self.browser.get(self.result_path + "?" + all_events_query(tag=[]))
self.wait_until_loaded()
self.browser.snapshot("events-v2 - all events query - empty state - no tags")
@patch("django.utils.timezone.now")
def test_all_events_query(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
min_ago = iso_format(before_now(minutes=1))
two_min_ago = iso_format(before_now(minutes=2))
self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"timestamp": min_ago,
"fingerprint": ["group-1"],
},
project_id=self.project.id,
assert_no_errors=False,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "this is bad.",
"timestamp": two_min_ago,
"fingerprint": ["group-2"],
"user": {
"id": "123",
"email": "[email protected]",
"username": "haveibeenpwned",
"ip_address": "8.8.8.8",
"name": "Someone",
},
},
project_id=self.project.id,
assert_no_errors=False,
)
with self.feature(FEATURE_NAMES):
self.browser.get(self.result_path + "?" + all_events_query())
self.wait_until_loaded()
# This test is flakey in that we sometimes load this page before the event is processed
# depend on pytest-retry to reload the page
self.browser.wait_until_not(
'[data-test-id="grid-editable"] [data-test-id="empty-state"]', timeout=2
)
self.browser.snapshot("events-v2 - all events query - list")
with self.feature(FEATURE_NAMES):
# expect table to expand to the right when no tags are provided
self.browser.get(self.result_path + "?" + all_events_query(tag=[]))
self.wait_until_loaded()
self.browser.snapshot("events-v2 - all events query - list - no tags")
def test_errors_query_empty_state(self):
with self.feature(FEATURE_NAMES):
self.browser.get(self.result_path + "?" + errors_query())
self.wait_until_loaded()
self.browser.snapshot("events-v2 - errors query - empty state")
self.browser.click_when_visible('[data-test-id="grid-edit-enable"]')
self.browser.snapshot(
"events-v2 - errors query - empty state - querybuilder - column edit state"
)
@patch("django.utils.timezone.now")
def test_errors_query(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
min_ago = iso_format(before_now(minutes=1))
self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"timestamp": min_ago,
"fingerprint": ["group-1"],
"type": "error",
},
project_id=self.project.id,
assert_no_errors=False,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "oh no",
"timestamp": min_ago,
"fingerprint": ["group-1"],
"type": "error",
},
project_id=self.project.id,
assert_no_errors=False,
)
self.store_event(
data={
"event_id": "c" * 32,
"message": "this is bad.",
"timestamp": min_ago,
"fingerprint": ["group-2"],
"type": "error",
},
project_id=self.project.id,
assert_no_errors=False,
)
with self.feature(FEATURE_NAMES):
self.browser.get(self.result_path + "?" + errors_query())
self.wait_until_loaded()
self.browser.snapshot("events-v2 - errors")
def test_transactions_query_empty_state(self):
with self.feature(FEATURE_NAMES):
self.browser.get(self.result_path + "?" + transactions_query())
self.wait_until_loaded()
self.browser.snapshot("events-v2 - transactions query - empty state")
with self.feature(FEATURE_NAMES):
# expect table to expand to the right when no tags are provided
self.browser.get(self.result_path + "?" + transactions_query(tag=[]))
self.wait_until_loaded()
self.browser.snapshot("events-v2 - transactions query - empty state - no tags")
@patch("django.utils.timezone.now")
def test_transactions_query(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
event_data = generate_transaction()
self.store_event(data=event_data, project_id=self.project.id, assert_no_errors=True)
with self.feature(FEATURE_NAMES):
self.browser.get(self.result_path + "?" + transactions_query())
self.wait_until_loaded()
self.browser.wait_until_not(
'[data-test-id="grid-editable"] [data-test-id="empty-state"]', timeout=2
)
self.browser.snapshot("events-v2 - transactions query - list")
@patch("django.utils.timezone.now")
def test_event_detail_view_from_all_events(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
min_ago = iso_format(before_now(minutes=1))
event_data = load_data("python")
event_data.update(
{
"event_id": "a" * 32,
"timestamp": min_ago,
"received": min_ago,
"fingerprint": ["group-1"],
}
)
self.store_event(data=event_data, project_id=self.project.id, assert_no_errors=False)
with self.feature(FEATURE_NAMES):
# Get the list page.
self.browser.get(self.result_path + "?" + all_events_query())
self.wait_until_loaded()
# View Event
self.browser.elements('[data-test-id="view-event"]')[0].click()
self.wait_until_loaded()
header = self.browser.element('[data-test-id="event-header"] span')
assert event_data["message"] in header.text
self.browser.snapshot("events-v2 - single error details view")
@patch("django.utils.timezone.now")
def test_event_detail_view_from_errors_view(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
event_data = load_data("javascript")
event_data.update(
{
"timestamp": iso_format(before_now(minutes=5)),
"event_id": "d" * 32,
"fingerprint": ["group-1"],
}
)
self.store_event(data=event_data, project_id=self.project.id)
with self.feature(FEATURE_NAMES):
# Get the list page
self.browser.get(self.result_path + "?" + errors_query() + "&statsPeriod=24h")
self.wait_until_loaded()
# Open the stack
self.browser.element('[data-test-id="open-stack"]').click()
self.wait_until_loaded()
# View Event
self.browser.elements('[data-test-id="view-event"]')[0].click()
self.wait_until_loaded()
self.browser.snapshot("events-v2 - error event detail view")
@patch("django.utils.timezone.now")
def test_event_detail_view_from_transactions_query(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
event_data = generate_transaction(trace="a" * 32, span="ab" * 8)
self.store_event(data=event_data, project_id=self.project.id, assert_no_errors=True)
# Create a child event that is linked to the parent so we have coverage
# of traversal buttons.
child_event = generate_transaction(
trace=event_data["contexts"]["trace"]["trace_id"], span="bc" * 8
)
child_event["event_id"] = "b" * 32
child_event["contexts"]["trace"]["parent_span_id"] = event_data["spans"][4]["span_id"]
child_event["transaction"] = "z-child-transaction"
child_event["spans"] = child_event["spans"][0:3]
self.store_event(data=child_event, project_id=self.project.id, assert_no_errors=True)
with self.feature(FEATURE_NAMES):
# Get the list page
self.browser.get(self.result_path + "?" + transactions_query())
self.wait_until_loaded()
# Open the stack
self.browser.elements('[data-test-id="open-stack"]')[0].click()
self.wait_until_loaded()
# View Event
self.browser.elements('[data-test-id="view-event"]')[0].click()
self.wait_until_loaded()
# Open a span detail so we can check the search by trace link.
# Click on the 6th one as a missing instrumentation span is inserted.
self.browser.elements('[data-test-id="span-row"]')[6].click()
# Wait until the child event loads.
child_button = '[data-test-id="view-child-transaction"]'
self.browser.wait_until(child_button)
self.browser.snapshot("events-v2 - transactions event detail view")
# Click on the child transaction.
self.browser.click(child_button)
self.wait_until_loaded()
@patch("django.utils.timezone.now")
def test_transaction_event_detail_view_ops_filtering(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
event_data = generate_transaction(trace="a" * 32, span="ab" * 8)
self.store_event(data=event_data, project_id=self.project.id, assert_no_errors=True)
with self.feature(FEATURE_NAMES):
# Get the list page
self.browser.get(self.result_path + "?" + transactions_query())
self.wait_until_loaded()
# Open the stack
self.browser.elements('[data-test-id="open-stack"]')[0].click()
self.wait_until_loaded()
# View Event
self.browser.elements('[data-test-id="view-event"]')[0].click()
self.wait_until_loaded()
# Interact with ops filter dropdown
self.browser.elements('[data-test-id="filter-button"]')[0].click()
# select all ops
self.browser.elements(
'[data-test-id="op-filter-dropdown"] [data-test-id="checkbox-fancy"]'
)[0].click()
# un-select django.middleware
self.browser.elements(
'[data-test-id="op-filter-dropdown"] [data-test-id="checkbox-fancy"]'
)[1].click()
self.browser.snapshot("events-v2 - transactions event detail view - ops filtering")
def test_create_saved_query(self):
# Simulate a custom query
query = {"field": ["project.id", "count()"], "query": "event.type:error"}
query_name = "A new custom query"
with self.feature(FEATURE_NAMES):
# Go directly to the query builder view
self.browser.get(self.result_path + "?" + urlencode(query, doseq=True))
self.wait_until_loaded()
# Open the save as drawer
self.browser.element('[data-test-id="button-save-as"]').click()
# Fill out name and submit form.
self.browser.element('input[name="query_name"]').send_keys(query_name)
self.browser.element('[data-test-id="button-save-query"]').click()
self.browser.wait_until(
'div[name="discover2-query-name"][value="{}"]'.format(query_name)
)
# Page title should update.
title_input = self.browser.element('div[name="discover2-query-name"]')
assert title_input.get_attribute("value") == query_name
# Saved query should exist.
assert DiscoverSavedQuery.objects.filter(name=query_name).exists()
def test_view_and_rename_saved_query(self):
# Create saved query to rename
query = DiscoverSavedQuery.objects.create(
name="Custom query",
organization=self.org,
version=2,
query={"fields": ["title", "project.id", "count()"], "query": "event.type:error"},
)
with self.feature(FEATURE_NAMES):
# View the query list
self.browser.get(self.landing_path)
self.wait_until_loaded()
# Look at the results for our query.
self.browser.element('[data-test-id="card-{}"]'.format(query.name)).click()
self.wait_until_loaded()
input = self.browser.element('div[name="discover2-query-name"]')
input.click()
input.send_keys(Keys.END + "updated!")
# Move focus somewhere else to trigger a blur and update the query
self.browser.element("table").click()
new_name = "Custom queryupdated!"
new_card_selector = 'div[name="discover2-query-name"][value="{}"]'.format(new_name)
self.browser.wait_until(new_card_selector)
# Assert the name was updated.
assert DiscoverSavedQuery.objects.filter(name=new_name).exists()
def test_delete_saved_query(self):
# Create saved query with ORM
query = DiscoverSavedQuery.objects.create(
name="Custom query",
organization=self.org,
version=2,
query={"fields": ["title", "project.id", "count()"], "query": "event.type:error"},
)
with self.feature(FEATURE_NAMES):
# View the query list
self.browser.get(self.landing_path)
self.wait_until_loaded()
# Get the card with the new query
card_selector = '[data-test-id="card-{}"]'.format(query.name)
card = self.browser.element(card_selector)
# Open the context menu
card.find_element_by_css_selector('[data-test-id="context-menu"]').click()
# Delete the query
card.find_element_by_css_selector('[data-test-id="delete-query"]').click()
# Wait for card to clear
self.browser.wait_until_not(card_selector)
assert DiscoverSavedQuery.objects.filter(name=query.name).exists() is False
def test_duplicate_query(self):
# Create saved query with ORM
query = DiscoverSavedQuery.objects.create(
name="Custom query",
organization=self.org,
version=2,
query={"fields": ["title", "project.id", "count()"], "query": "event.type:error"},
)
with self.feature(FEATURE_NAMES):
# View the query list
self.browser.get(self.landing_path)
self.wait_until_loaded()
# Get the card with the new query
card_selector = '[data-test-id="card-{}"]'.format(query.name)
card = self.browser.element(card_selector)
# Open the context menu, and duplicate
card.find_element_by_css_selector('[data-test-id="context-menu"]').click()
card.find_element_by_css_selector('[data-test-id="duplicate-query"]').click()
duplicate_name = "{} copy".format(query.name)
# Wait for new element to show up.
self.browser.element('[data-test-id="card-{}"]'.format(duplicate_name))
# Assert the new query exists and has 'copy' added to the name.
assert DiscoverSavedQuery.objects.filter(name=duplicate_name).exists()
@pytest.mark.skip(reason="causing timeouts in github actions and travis")
@patch("django.utils.timezone.now")
def test_drilldown_result(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
min_ago = iso_format(before_now(minutes=1))
events = (
("a" * 32, "oh no", "group-1"),
("b" * 32, "oh no", "group-1"),
("c" * 32, "this is bad", "group-2"),
)
for event in events:
self.store_event(
data={
"event_id": event[0],
"message": event[1],
"timestamp": min_ago,
"fingerprint": [event[2]],
"type": "error",
},
project_id=self.project.id,
)
query = {"field": ["message", "project", "count()"], "query": "event.type:error"}
with self.feature(FEATURE_NAMES):
# Go directly to the query builder view
self.browser.get(self.result_path + "?" + urlencode(query, doseq=True))
self.wait_until_loaded()
# Click the first drilldown
self.browser.element('[data-test-id="expand-count"]').click()
self.wait_until_loaded()
assert self.browser.element_exists_by_test_id("grid-editable"), "table should exist."
headers = self.browser.elements('[data-test-id="grid-editable"] thead th')
expected = ["", "MESSAGE", "PROJECT", "ID"]
actual = [header.text for header in headers]
assert expected == actual
@pytest.mark.skip(reason="not done")
@patch("django.utils.timezone.now")
def test_usage(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
# TODO: load events
# go to landing
# go to a precanned query
# save query 1
# add environment column
# update query
# add condition from facet map
# delete a column
# click and drag a column
# save as query 2
# load save query 1
# sort column
# update query
# delete save query 1
|
|
"""fips verb to build the oryol samples webpage"""
import os
import yaml
import shutil
import subprocess
import glob
from string import Template
from mod import log, util, project, emscripten, android, nacl
from tools import texexport
GitHubSamplesURL = 'https://github.com/floooh/oryol/tree/master/code/Samples/'
BuildEmscripten = True
BuildPNaCl = True
BuildWasm = False
ExportAssets = True
#-------------------------------------------------------------------------------
def deploy_webpage(fips_dir, proj_dir, webpage_dir) :
"""builds the final webpage under under fips-deploy/oryol-webpage"""
ws_dir = util.get_workspace_dir(fips_dir)
# load the websamples.yml file, should have been created during the last build
with open(webpage_dir + '/websamples.yml', 'r') as f :
samples = yaml.load(f.read())
# create directories
for platform in ['asmjs', 'wasm', 'pnacl'] :
platform_dir = '{}/{}'.format(webpage_dir, platform)
if not os.path.isdir(platform_dir) :
os.makedirs(platform_dir)
# link to the Extension Samples
content = '<div class="thumb">\n'
content += ' <div class="thumb-title">To Extension Samples...</div>\n'
content += ' <div class="img-frame"><a href="http://floooh.github.com/oryol-samples/index.html"><img class="image" src="ext_samples.jpg"></img></a></div>\n'
content += '</div>\n'
# build the thumbnail gallery
for sample in samples :
if sample['name'] != '__end__' :
log.info('> adding thumbnail for {}'.format(sample['name']))
name = sample['name']
imgPath = sample['image']
types = sample['type']
desc = sample['desc']
head, tail = os.path.split(imgPath)
if tail == 'none' :
imgFileName = 'dummy.jpg'
else :
imgFileName = tail
content += '<div class="thumb">\n'
content += ' <div class="thumb-title">{}</div>\n'.format(name)
content += ' <div class="img-frame"><a href="asmjs/{}.html"><img class="image" src="{}" title="{}"></img></a></div>\n'.format(name,imgFileName,desc)
content += ' <div class="thumb-bar">\n'
content += ' <ul class="thumb-list">\n'
if BuildEmscripten and 'emscripten' in types :
content += ' <li class="thumb-item"><a class="thumb-link" href="asmjs/{}.html">asm.js</a></li>\n'.format(name)
if BuildPNaCl and 'pnacl' in types :
content += ' <li class="thumb-item"><a class="thumb-link" href="pnacl/{}.html">pnacl</a></li>\n'.format(name)
if BuildWasm and 'emscripten' in types :
content += ' <li class="thumb-item"><a class="thumb-link" href="wasm/{}.html">wasm</a></li>\n'.format(name)
content += ' </ul>\n'
content += ' </div>\n'
content += '</div>\n'
# populate the html template, and write to the build directory
with open(proj_dir + '/web/index.html', 'r') as f :
templ = Template(f.read())
html = templ.safe_substitute(samples=content)
with open(webpage_dir + '/index.html', 'w') as f :
f.write(html)
# copy other required files
for name in ['style.css', 'dummy.jpg', 'emsc.js', 'pnacl.js', 'wasm.js', 'about.html', 'favicon.png', 'ext_samples.jpg'] :
log.info('> copy file: {}'.format(name))
shutil.copy(proj_dir + '/web/' + name, webpage_dir + '/' + name)
# generate emscripten HTML pages
if BuildEmscripten and emscripten.check_exists(fips_dir) :
emsc_deploy_dir = '{}/fips-deploy/oryol/emsc-ninja-release'.format(ws_dir)
for sample in samples :
name = sample['name']
if name != '__end__' and 'emscripten' in sample['type'] :
log.info('> generate emscripten HTML page: {}'.format(name))
for ext in ['js', 'html.mem'] :
src_path = '{}/{}.{}'.format(emsc_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/asmjs/'.format(webpage_dir))
with open(proj_dir + '/web/emsc.html', 'r') as f :
templ = Template(f.read())
src_url = GitHubSamplesURL + sample['src'];
html = templ.safe_substitute(name=name, source=src_url)
with open('{}/asmjs/{}.html'.format(webpage_dir, name, name), 'w') as f :
f.write(html)
# generate WebAssembly HTML pages
if BuildWasm and emscripten.check_exists(fips_dir) :
wasm_deploy_dir = '{}/fips-deploy/oryol/wasm-ninja-release'.format(ws_dir)
for sample in samples :
name = sample['name']
if name != '__end__' and 'emscripten' in sample['type'] :
log.info('> generate wasm HTML page: {}'.format(name))
for ext in ['js', 'wasm.mappedGlobals'] :
src_path = '{}/{}.{}'.format(wasm_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/wasm/'.format(webpage_dir))
for ext in ['html.mem', 'wasm'] :
src_path = '{}/{}.{}'.format(wasm_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/wasm/{}.{}.txt'.format(webpage_dir, name, ext))
with open(proj_dir + '/web/wasm.html', 'r') as f :
templ = Template(f.read())
src_url = GitHubSamplesURL + sample['src'];
html = templ.safe_substitute(name=name, source=src_url)
with open('{}/wasm/{}.html'.format(webpage_dir, name), 'w') as f :
f.write(html)
# generate PNaCl HTML pages
if BuildPNaCl and nacl.check_exists(fips_dir) :
pnacl_deploy_dir = '{}/fips-deploy/oryol/pnacl-ninja-release'.format(ws_dir)
for sample in samples :
name = sample['name']
if name != '__end__' and 'pnacl' in sample['type'] :
log.info('> generate PNaCl HTML page: {}'.format(name))
for ext in ['nmf', 'pexe'] :
src_path = '{}/{}.{}'.format(pnacl_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/pnacl/'.format(webpage_dir))
with open(proj_dir + '/web/pnacl.html', 'r') as f :
templ = Template(f.read())
src_url = GitHubSamplesURL + sample['src'];
html = templ.safe_substitute(name=name, source=src_url)
with open('{}/pnacl/{}.html'.format(webpage_dir, name), 'w') as f :
f.write(html)
# copy the screenshots
for sample in samples :
if sample['name'] != '__end__' :
img_path = sample['image']
head, tail = os.path.split(img_path)
if tail != 'none' :
log.info('> copy screenshot: {}'.format(tail))
shutil.copy(img_path, webpage_dir + '/' + tail)
#-------------------------------------------------------------------------------
def export_assets(fips_dir, proj_dir, webpage_dir) :
tex_srcdir = proj_dir + '/data'
tex_dstdir = webpage_dir + '/data'
texexport.configure(proj_dir, tex_srcdir, tex_dstdir)
texexport.exportSampleTextures()
for ext in ['txt'] :
for dataFile in glob.glob(proj_dir + '/data/*.{}'.format(ext)) :
shutil.copy(dataFile, '{}/data/'.format(webpage_dir))
#-------------------------------------------------------------------------------
def build_deploy_webpage(fips_dir, proj_dir) :
# if webpage dir exists, clear it first
ws_dir = util.get_workspace_dir(fips_dir)
webpage_dir = '{}/fips-deploy/oryol-webpage'.format(ws_dir)
if os.path.isdir(webpage_dir) :
shutil.rmtree(webpage_dir)
os.makedirs(webpage_dir)
# compile samples
if BuildPNaCl and nacl.check_exists(fips_dir) :
project.gen(fips_dir, proj_dir, 'pnacl-ninja-release')
project.build(fips_dir, proj_dir, 'pnacl-ninja-release')
if BuildEmscripten and emscripten.check_exists(fips_dir) :
project.gen(fips_dir, proj_dir, 'emsc-ninja-release')
project.build(fips_dir, proj_dir, 'emsc-ninja-release')
if BuildWasm and emscripten.check_exists(fips_dir) :
project.gen(fips_dir, proj_dir, 'wasm-ninja-release')
project.build(fips_dir, proj_dir, 'wasm-ninja-release')
# export sample assets
if ExportAssets :
export_assets(fips_dir, proj_dir, webpage_dir)
# deploy the webpage
deploy_webpage(fips_dir, proj_dir, webpage_dir)
log.colored(log.GREEN, 'Generated Samples web page under {}.'.format(webpage_dir))
#-------------------------------------------------------------------------------
def serve_webpage(fips_dir, proj_dir) :
ws_dir = util.get_workspace_dir(fips_dir)
webpage_dir = '{}/fips-deploy/oryol-webpage'.format(ws_dir)
p = util.get_host_platform()
if p == 'osx' :
try :
subprocess.call(
'open http://localhost:8000 ; python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt :
pass
elif p == 'win':
try:
subprocess.call(
'cmd /c start http://localhost:8000 && python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt:
pass
elif p == 'linux':
try:
subprocess.call(
'xdg-open http://localhost:8000; python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt:
pass
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, args) :
if len(args) > 0 :
if args[0] == 'build' :
build_deploy_webpage(fips_dir, proj_dir)
elif args[0] == 'serve' :
serve_webpage(fips_dir, proj_dir)
else :
log.error("Invalid param '{}', expected 'build' or 'serve'".format(args[0]))
else :
log.error("Param 'build' or 'serve' expected")
#-------------------------------------------------------------------------------
def help() :
log.info(log.YELLOW +
'fips webpage build\n' +
'fips webpage serve\n' +
log.DEF +
' build oryol samples webpage')
|
|
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
import logging
import time
import pytest
import requests
from mocker.endpoints.mesos import AGENT1_ID, AGENT3_ID
from util import GuardedSubprocess, LineBufferFilter, SearchCriteria
log = logging.getLogger(__name__)
@pytest.fixture()
def empty_file(tmp_file):
open(tmp_file, 'w').close()
return tmp_file
class TestSecretKeyFilePathEnvVarBehaviour:
@pytest.mark.parametrize('role', ['master', 'agent'])
def test_if_not_defining_the_var_is_handled(self, nginx_class, role):
# Scanning for the exact log entry is bad, but in this case - can't be
# avoided.
filter_regexp = {
'AUTH_TOKEN_VERIFICATION_KEY_FILE_PATH not set.':
SearchCriteria(1, False)
}
ar = nginx_class(role=role, auth_token_verification_key_file_path=None)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
@pytest.mark.parametrize('role', ['master', 'agent'])
def test_if_var_pointing_to_empty_file_is_handled(
self, nginx_class, role, empty_file):
# Scanning for the exact log entry is bad, but in this case - can't be
# avoided.
filter_regexp = {'Auth token verification key not set': SearchCriteria(1, False)}
ar = nginx_class(role=role, auth_token_verification_key_file_path=empty_file)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
# TODO: ATM in Agent-Open there are no paths we can test auth with
@pytest.mark.parametrize('role,use_empty',
[('master', False), ('master', True)],
)
def test_if_bad_var_fails_all_requests(
self, nginx_class, role, use_empty, empty_file, valid_user_header):
if use_empty:
ar = nginx_class(role=role, auth_token_verification_key_file_path=empty_file)
else:
ar = nginx_class(role=role, auth_token_verification_key_file_path=None)
url = ar.make_url_from_path()
with GuardedSubprocess(ar):
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 401
class TestDefaultSchemeEnvVarBehaviour:
def test_if_default_scheme_is_honoured_by_agent_endpoint(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {'Default scheme: https://': SearchCriteria(1, False)}
ar = nginx_class(default_scheme="https://")
agent_id = AGENT3_ID
url_good = ar.make_url_from_path('/agent/{}/blah/blah'.format(agent_id))
agent_id = AGENT1_ID
url_bad = ar.make_url_from_path('/agent/{}/blah/blah'.format(agent_id))
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
resp = requests.get(url_bad,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 502
resp = requests.get(url_good,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'https://127.0.0.1:15401'
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_default_scheme_is_honourded_by_mleader_endpoint(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {'Default scheme: https://': SearchCriteria(1, False)}
cache_poll_period = 3
ar = nginx_class(cache_poll_period=cache_poll_period,
cache_expiration=cache_poll_period - 1,
default_scheme="https://")
url = ar.make_url_from_path('/system/v1/leader/marathon/foo/bar/baz')
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 502
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='change_leader',
aux_data="127.0.0.4:443")
# First poll (2s) + normal poll interval(4s) < 2 * normal poll
# interval(4s)
time.sleep(cache_poll_period * 2)
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'https://127.0.0.4:443'
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
class TestUpstreamsEnvVarBehaviour:
def test_if_marathon_upstream_env_is_honoured(
self, nginx_class, mocker, valid_user_header):
# Stage 0 - setup the environment:
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='record_requests')
mocker.send_command(endpoint_id='http://127.0.0.2:8080',
func_name='record_requests')
# Stage 1 - we set Marathon upstream to http://127.0.0.1:8080 and
# verify that all the requests from cache go there:
filter_regexp = {
'Marathon upstream: http://127.0.0.1:8080': SearchCriteria(1, True),
'< http://127.0.0.1:8080/v2/leader': SearchCriteria(1, True),
(r'< http://127.0.0.1:8080/v2/apps'
r'\?embed=apps\.tasks\&label=DCOS_SERVICE_NAME'): SearchCriteria(1, True),
}
ar = nginx_class(upstream_marathon="http://127.0.0.1:8080")
url = ar.make_url_from_path('/system/v1/leader/marathon/foo/bar/baz')
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
requests.get(url,
allow_redirects=False,
headers=valid_user_header)
lbf.scan_log_buffer()
m1_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='get_recorded_requests')
assert len(m1_requests) == 2
m2_requests = mocker.send_command(endpoint_id='http://127.0.0.2:8080',
func_name='get_recorded_requests')
assert len(m2_requests) == 0
assert lbf.extra_matches == {}
# Stage 1 ends
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='erase_recorded_requests')
# Stage 2 - we set Marathon upstream to http://127.0.0.2:8080 and
# verify that all the requests go to the new upstream
filter_regexp = {
'Marathon upstream: http://127.0.0.2:8080': SearchCriteria(1, True),
'< http://127.0.0.2:8080/v2/leader': SearchCriteria(1, True),
(r'< http://127.0.0.2:8080/v2/apps'
r'\?embed=apps\.tasks\&label=DCOS_SERVICE_NAME'): SearchCriteria(1, True),
}
ar = nginx_class(upstream_marathon="http://127.0.0.2:8080")
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
requests.get(url,
allow_redirects=False,
headers=valid_user_header)
lbf.scan_log_buffer()
m1_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='get_recorded_requests')
assert len(m1_requests) == 0
m2_requests = mocker.send_command(endpoint_id='http://127.0.0.2:8080',
func_name='get_recorded_requests')
assert len(m2_requests) == 2
assert lbf.extra_matches == {}
def test_if_mesos_upstream_env_is_honoured(
self, nginx_class, mocker, valid_user_header):
# Stage 0 - setup the environment:
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='record_requests')
mocker.send_command(endpoint_id='http://127.0.0.3:5050',
func_name='record_requests')
# Stage 1 - we set Mesos upstream to http://127.0.0.2:5050 and
# verify that all the requests from cache go there:
filter_regexp = {
'Mesos upstream: http://127.0.0.2:5050': SearchCriteria(1, True),
'< http://127.0.0.2:5050/master/state-summary': SearchCriteria(1, True),
}
ar = nginx_class(upstream_mesos="http://127.0.0.2:5050")
agent_id = AGENT1_ID
url = ar.make_url_from_path('/agent/{}/blah/blah'.format(agent_id))
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
requests.get(url,
allow_redirects=False,
headers=valid_user_header)
lbf.scan_log_buffer()
m1_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
assert len(m1_requests) == 1
m2_requests = mocker.send_command(endpoint_id='http://127.0.0.3:5050',
func_name='get_recorded_requests')
assert len(m2_requests) == 0
assert lbf.extra_matches == {}
# Stage 1 ends
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='erase_recorded_requests')
# Stage 2 - we set Mesos upstream to http://127.0.0.2:8080 and
# verify that all the requests go to the new upstream
filter_regexp = {
'Mesos upstream: http://127.0.0.3:5050': SearchCriteria(1, True),
'< http://127.0.0.3:5050/master/state-summary': SearchCriteria(1, True),
}
ar = nginx_class(upstream_mesos="http://127.0.0.3:5050")
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
requests.get(url,
allow_redirects=False,
headers=valid_user_header)
lbf.scan_log_buffer()
m1_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
assert len(m1_requests) == 0
m2_requests = mocker.send_command(endpoint_id='http://127.0.0.3:5050',
func_name='get_recorded_requests')
assert len(m2_requests) == 1
assert lbf.extra_matches == {}
class TestHostIPVarBehavriour:
def test_if_absent_var_is_handled(self, nginx_class, mocker):
filter_regexp = {
'Local Mesos Master IP: unknown': SearchCriteria(1, True),
}
ar = nginx_class(host_ip=None)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
@pytest.mark.parametrize(
"invalid_ip",
["not-an-ip", "1,3,4,4", "1.2.3.300", 'aaa.1.2.3.4', '1.2.3.4.bccd'])
def test_if_var_is_verified(self, invalid_ip, nginx_class, mocker):
filter_regexp = {
'Local Mesos Master IP: unknown': SearchCriteria(1, True),
'HOST_IP var is not a valid ipv4: {}'.format(invalid_ip):
SearchCriteria(1, True),
}
ar = nginx_class(host_ip=invalid_ip)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
@pytest.mark.parametrize("valid_ip", ["1.2.3.4", "255.255.255.255", "0.0.0.1"])
def test_if_var_is_honoured(self, valid_ip, nginx_class, mocker):
filter_regexp = {
'Local Mesos Master IP: {}'.format(valid_ip): SearchCriteria(1, True),
}
ar = nginx_class(host_ip=valid_ip)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
class TestAuthModuleDisablingMaster:
@pytest.mark.parametrize(
"enable_keyword",
["enabled", "true", "yes", "of_course", "make it so!",
"disabled", "no", "no way", "please no"])
def test_if_auth_module_is_enabled_by_unless_false_str_is_provided(
self, nginx_class, mocker, enable_keyword):
filter_regexp = {
'Activate authentication module.': SearchCriteria(1, True),
}
ar = nginx_class(auth_enabled=enable_keyword)
url = ar.make_url_from_path('/exhibitor/foo/bar')
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
resp = requests.get(url,
allow_redirects=False)
assert resp.status_code == 401
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_auth_module_can_be_disabled(self, nginx_class, mocker):
filter_regexp = {
("ADMINROUTER_ACTIVATE_AUTH_MODULE set to `false`. "
"Deactivate authentication module."): SearchCriteria(1, True),
}
ar = nginx_class(auth_enabled='false')
url = ar.make_url_from_path('/exhibitor/foo/bar')
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
resp = requests.get(url,
allow_redirects=False)
assert resp.status_code == 200
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
|
|
"""\
NAME
AnalyticDerivatives.py
SYNOPSIS
Workhorse of the force.py module.
DESCRIPTION
AUTHOR
Hatem H. Helal, [email protected]
REPORT BUGS
Report bugs to [email protected]
COPYRIGHT
"""
from NumWrap import array2string
from math import sqrt
from PGBF import PGBF,coulomb
from pyints import grad_nuc_att
def der_Hcore_element(a,bfi,bfj,atoms):
"""
Finds the derivative of the core-Hamiltonian matrix elements, which can
be written as
H_ij = T_ij + VNe_ij
Where T_ij is the kinetic energy integral and VNe_ij is the nuclear
attraction integral.
"""
dTij_dXa,dTij_dYa,dTij_dZa = der_kinetic_integral(a,bfi,bfj)
dVij_dXa,dVij_dYa,dVij_dZa = der_nuc_att(a,bfi,bfj,atoms)
dHij_dXa = dTij_dXa + dVij_dXa
dHij_dYa = dTij_dYa + dVij_dYa
dHij_dZa = dTij_dZa + dVij_dZa
return dHij_dXa,dHij_dYa,dHij_dZa
def der_kinetic_integral(a,bfi,bfj):
"""
The kinetic energy operator does not depend on the atomic position so we only
have to consider differentiating the Gaussian functions. There are 4 possible
cases we have to evaluate
Case 1: Neither of the basis functions depends on the position of atom A which gives:
dT_ij/dXa = 0
Cases 2 and 3: Only one of the basis functions depends the position of atom A which
gives us either of the following possible integrals to evaluate
dT_ij/dXa = integral{dr dg_i/dXa T g_j }
dT_ij/dXa = integral{dr g_i T dg_j/dXa }
Case 4: Both of the basis functions depend on the position of atom A which gives the
following integral to evaluate
dT_ij/dXa = integral{dr dg_i/dXa T g_j + g_i T dg_j/dXa }
"""
dTij_dXa,dTij_dYa,dTij_dZa = 0.0,0.0,0.0
#we use atom ids on the CGBFs to evaluate which of the 4 above case we have
#bfi is centered on atom a
if bfi.atid==a:
for upbf in bfj.prims:
for vpbf in bfi.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = upbf.coef*vpbf.coef
#x component
v = PGBF(alpha,origin,(l+1,m,n))
terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.kinetic(upbf)
if l>0:
v = PGBF(alpha,origin,(l-1,m,n))
termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dXa += terma + termb
#y component
v = PGBF(alpha,origin,(l,m+1,n))
terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.kinetic(upbf)
if m>0:
v = PGBF(alpha,origin,(l,m-1,n))
termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dYa += terma + termb
#z component
v = PGBF(alpha,origin,(l,m,n+1))
terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.kinetic(upbf)
if n>0:
v = PGBF(alpha,origin,(l,m,n-1))
termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dZa += terma + termb
#bfj is centered on atom a
if bfj.atid==a:
for upbf in bfi.prims:
for vpbf in bfj.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = upbf.coef*vpbf.coef
#x component
v = PGBF(alpha,origin,(l+1,m,n))
terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.kinetic(upbf)
if l>0:
v = PGBF(alpha,origin,(l-1,m,n))
termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dXa += terma + termb
#y component
v = PGBF(alpha,origin,(l,m+1,n))
terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.kinetic(upbf)
if m>0:
v = PGBF(alpha,origin,(l,m-1,n))
termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dYa += terma + termb
#z component
v = PGBF(alpha,origin,(l,m,n+1))
terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.kinetic(upbf)
if n>0:
v = PGBF(alpha,origin,(l,m,n-1))
termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dZa += terma + termb
return dTij_dXa,dTij_dYa,dTij_dZa
def der_nuc_att(a,bfi,bfj,atoms):
"""
This function finds the atomic gradient of the nuclear attraction integrals. Since the
nuclear attraction operator explicitly depends on the atomic coordinates we find
grad <i|V|j> = <grad i|V|j> + <i|V|grad j> + <i| grad V |j>
The first two terms are straightforward to evaluate using the recursion relation for the
derivative of a Gaussian basis function. The last term found through the nuclear_gradient
function in the primitive Gaussian class.
"""
dVij_dXa,dVij_dYa,dVij_dZa = 0.0,0.0,0.0
if bfi.atid==a: #bfi is centered on atom a
for upbf in bfj.prims:
for vpbf in bfi.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = upbf.coef*vpbf.coef
#x component
v = PGBF(alpha,origin,(l+1,m,n))
terma=0.0
for atom in atoms:
if atom.atid != a:
terma += atom.atno*sqrt(alpha*(2.0*l+1.0))*coefs*v.nuclear(upbf,atom.pos())
if l>0:
v = PGBF(alpha,origin,(l-1,m,n))
termb=0.0
for atom in atoms:
if atom.atid != a:
termb += -2*l*atom.atno*sqrt(alpha/(2.0*l-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dXa += terma + termb
#y component
v = PGBF(alpha,origin,(l,m+1,n))
terma=0.0
for atom in atoms:
if atom.atid != a:
terma += atom.atno*sqrt(alpha*(2.0*m+1.0))*coefs*v.nuclear(upbf,atom.pos())
if m>0:
v = PGBF(alpha,origin,(l,m-1,n))
termb=0.0
for atom in atoms:
if atom.atid != a:
termb += -2*m*atom.atno*sqrt(alpha/(2.0*m-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dYa += terma + termb
#z component
v = PGBF(alpha,origin,(l,m,n+1))
terma=0.0
for atom in atoms:
if atom.atid != a:
terma += atom.atno*sqrt(alpha*(2.0*n+1.0))*coefs*v.nuclear(upbf,atom.pos())
if n>0:
v = PGBF(alpha,origin,(l,m,n-1))
termb=0.0
for atom in atoms:
if atom.atid != a:
termb += -2*n*atom.atno*sqrt(alpha/(2.0*n-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dZa += terma + termb
#bfj is centered on atom a
if bfj.atid==a:
for upbf in bfi.prims:
for vpbf in bfj.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = upbf.coef*vpbf.coef
#x component
v = PGBF(alpha,origin,(l+1,m,n))
terma=0.0
for atom in atoms:
if atom.atid != a:
terma += atom.atno*sqrt(alpha*(2.0*l+1.0))*coefs*v.nuclear(upbf,atom.pos())
if l>0:
v = PGBF(alpha,origin,(l-1,m,n))
termb=0.0
for atom in atoms:
if atom.atid != a:
termb += -2*l*atom.atno*sqrt(alpha/(2.0*l-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dXa += terma + termb
#y component
v = PGBF(alpha,origin,(l,m+1,n))
terma=0.0
for atom in atoms:
if atom.atid != a:
terma += atom.atno*sqrt(alpha*(2.0*m+1.0))*coefs*v.nuclear(upbf,atom.pos())
if m>0:
v = PGBF(alpha,origin,(l,m-1,n))
termb=0.0
for atom in atoms:
if atom.atid != a:
termb += -2*m*atom.atno*sqrt(alpha/(2.0*m-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dYa += terma + termb
#z component
v = PGBF(alpha,origin,(l,m,n+1))
terma=0.0
for atom in atoms:
if atom.atid != a:
terma += atom.atno*sqrt(alpha*(2.0*n+1.0))*coefs*v.nuclear(upbf,atom.pos())
if n>0:
v = PGBF(alpha,origin,(l,m,n-1))
termb = 0.0
for atom in atoms:
if atom.atid != a:
termb += -2*n*atom.atno*sqrt(alpha/(2.0*n-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dZa += terma + termb
#finally evaluate grad<i| Vatom |j>
for atom in atoms:
if atom.atid==a:
for upbf in bfi.prims:
for vpbf in bfj.prims:
prefactor = upbf.coef*vpbf.coef*atom.atno
l = upbf.nuclear_gradient(bfi.atid,bfj.atid,atom.atid,vpbf,atom.pos())
#l = upbf.nuclear_gradient(vpbf,atom.pos())
dVij_dXa+=prefactor*l[0]
dVij_dYa+=prefactor*l[1]
dVij_dZa+=prefactor*l[2]
return dVij_dXa,dVij_dYa,dVij_dZa
def der_nuc_att_old(a,bfi,bfj,atoms):
"""
This function finds the atomic gradient of the nuclear attraction integrals. Since the
nuclear attraction operator explicitly depends on the atomic coordinates we find
grad <i|V|j> = <grad i|V|j> + <i|V|grad j> + <i| grad V |j>
The first two terms are straightforward to evaluate using the recursion relation for the
derivative of a Gaussian basis function. The last term found through the nuclear_gradient
function in the primitive Gaussian class.
"""
dVij_dXa,dVij_dYa,dVij_dZa = 0.0,0.0,0.0
if bfi.atid==a: #bfi is centered on atom a
for upbf in bfj.prims:
for vpbf in bfi.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = upbf.coef*vpbf.coef
#x component
v = PGBF(alpha,origin,(l+1,m,n))
terma=0.0
for atom in atoms:
terma += atom.atno*sqrt(alpha*(2.0*l+1.0))*coefs*v.nuclear(upbf,atom.pos())
if l>0:
v.reset_powers(l-1,m,n)
#v.normalize()
termb=0.0
for atom in atoms:
termb += -2*l*atom.atno*sqrt(alpha/(2.0*l-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dXa += terma + termb
#y component
v.reset_powers(l,m+1,n)
#v.normalize()
terma=0.0
for atom in atoms:
terma += atom.atno*sqrt(alpha*(2.0*m+1.0))*coefs*v.nuclear(upbf,atom.pos())
if m>0:
v.reset_powers(l,m-1,n)
#v.normalize()
termb=0.0
for atom in atoms:
termb += -2*m*atom.atno*sqrt(alpha/(2.0*m-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dYa += terma + termb
#z component
v.reset_powers(l,m,n+1)
#v.normalize()
terma=0.0
for atom in atoms:
terma += atom.atno*sqrt(alpha*(2.0*n+1.0))*coefs*v.nuclear(upbf,atom.pos())
if n>0:
v.reset_powers(l,m,n-1)
#v.normalize()
termb=0.0
for atom in atoms:
termb += -2*n*atom.atno*sqrt(alpha/(2.0*n-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dZa += terma + termb
#bfj is centered on atom a
if bfj.atid==a:
for upbf in bfi.prims:
for vpbf in bfj.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = upbf.coef*vpbf.coef
#x component
v = PGBF(alpha,origin,(l+1,m,n))
#v.normalize()
terma=0.0
for atom in atoms:
terma += atom.atno*sqrt(alpha*(2.0*l+1.0))*coefs*v.nuclear(upbf,atom.pos())
if l>0:
v.reset_powers(l-1,m,n)
#v.normalize()
termb=0.0
for atom in atoms:
termb += -2*l*atom.atno*sqrt(alpha/(2.0*l-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dXa += terma + termb
#y component
v.reset_powers(l,m+1,n)
#v.normalize()
terma=0.0
for atom in atoms:
terma += atom.atno*sqrt(alpha*(2.0*m+1.0))*coefs*v.nuclear(upbf,atom.pos())
if m>0:
v.reset_powers(l,m-1,n)
#v.normalize()
termb=0.0
for atom in atoms:
termb += -2*m*atom.atno*sqrt(alpha/(2.0*m-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dYa += terma + termb
#z component
v.reset_powers(l,m,n+1)
#v.normalize()
terma=0.0
for atom in atoms:
terma += atom.atno*sqrt(alpha*(2.0*n+1.0))*coefs*v.nuclear(upbf,atom.pos())
if n>0:
v.reset_powers(l,m,n-1)
#v.normalize()
termb = 0.0
for atom in atoms:
termb += -2*n*atom.atno*sqrt(alpha/(2.0*n-1.0))*coefs*v.nuclear(upbf,atom.pos())
else: termb = 0.0
dVij_dZa += terma + termb
#finally evaluate <i| grad V |j>
for atom in atoms:
if atom.atid==a:
for upbf in bfi.prims:
for vpbf in bfj.prims:
prefactor = upbf.coef*vpbf.coef*atom.atno
list = upbf.nuclear_gradient(vpbf,atom.pos())
dVij_dXa+=prefactor*list[0]
dVij_dYa+=prefactor*list[1]
dVij_dZa+=prefactor*list[2]
return dVij_dXa,dVij_dYa,dVij_dZa
def der_overlap_element(a,bfi, bfj):
"""
finds the derivative of the overlap integral with respect to the
atomic coordinate of atom "a". Note there are four possible cases
for evaluating this integral:
1. Neither of the basis functions depend on the position of atom a
ie. they are centered on atoms other than atom a
2 and 3. One of the basis functions depends on the position of atom a
so we need to evaluate the derivative of a Gaussian with the
recursion (right word?) relation derived on page 442 of Szabo.
4. Both of the basis functions are centered on atom a, which through the
recursion relation for the derivative of a Gaussian basis function will
require the evaluation of 4 overlap integrals...
this function will return a 3 element list with the derivatives of the overlap
integrals with respect to the atomic coordinates Xa,Ya,Za.
"""
dSij_dXa,dSij_dYa,dSij_dZa = 0.0,0.0,0.0
#we use atom ids on the CGBFs to evaluate which of the 4 above case we have
if bfi.atid==a: #bfi is centered on atom a
for upbf in bfj.prims:
for vpbf in bfi.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = upbf.coef*vpbf.coef
#x component
v = PGBF(alpha,origin,(l+1,m,n))
terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.overlap(upbf)
if l>0:
v = PGBF(alpha,origin,(l-1,m,n))
termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.overlap(upbf)
else: termb = 0.0
dSij_dXa += terma + termb
#y component
v = PGBF(alpha,origin,(l,m+1,n))
terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.overlap(upbf)
if m>0:
v = PGBF(alpha,origin,(l,m-1,n))
termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.overlap(upbf)
else: termb = 0.0
dSij_dYa += terma + termb
#z component
v = PGBF(alpha,origin,(l,m,n+1))
terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.overlap(upbf)
if n>0:
v = PGBF(alpha,origin,(l,m,n-1))
termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.overlap(upbf)
else: termb = 0.0
dSij_dZa += terma + termb
#bfj is centered on atom a
if bfj.atid==a:
for upbf in bfi.prims:
for vpbf in bfj.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = upbf.coef*vpbf.coef
#x component
v = PGBF(alpha,origin,(l+1,m,n))
terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.overlap(upbf)
if l>0:
v = PGBF(alpha,origin,(l-1,m,n))
termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.overlap(upbf)
else: termb = 0.0
dSij_dXa += terma + termb
#y component
v = PGBF(alpha,origin,(l,m+1,n))
terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.overlap(upbf)
if m>0:
v = PGBF(alpha,origin,(l,m-1,n))
termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.overlap(upbf)
else: termb = 0.0
dSij_dYa += terma + termb
#z component
v = PGBF(alpha,origin,(l,m,n+1))
terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.overlap(upbf)
if n>0:
v = PGBF(alpha,origin,(l,m,n-1))
termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.overlap(upbf)
else: termb = 0.0
dSij_dZa += terma + termb
return dSij_dXa,dSij_dYa,dSij_dZa
def der_Jints(a, bfi,bfj,bfk,bfl):
"""
This function will find the atomic gradient of the Coloumb integral over
basis functions i,j,k, and l as in
grad_a <ij|kl> = <gi j|kl> + <i gj|kl> + <ij|gk l> + <ij|k gl>
"""
dJint_dXa,dJint_dYa,dJint_dZa = 0.0,0.0,0.0
if bfi.atid==a: #bfi is centered on atom a
for tpbf in bfi.prims:
for upbf in bfj.prims:
for vpbf in bfk.prims:
for wpbf in bfl.prims:
alpha = tpbf.exp
l,m,n = tpbf.powers
origin = tpbf.origin
coefs = tpbf.coef*upbf.coef*vpbf.coef*wpbf.coef
#x component
tmp = PGBF(alpha, origin,(l+1,m,n)) #temp pgbf
terma = sqrt(alpha*(2.0*l+1.0))*coefs*coulomb(tmp,upbf,vpbf,wpbf)
if l>0:
tmp = PGBF(alpha,origin,(l-1,m,n))
termb = -2*l*sqrt(alpha/(2.*l-1))*coefs*coulomb(tmp,upbf,vpbf,wpbf)
else: termb = 0.0
dJint_dXa += terma+termb
#y component
tmp = PGBF(alpha,origin,(l,m+1,n))
terma = sqrt(alpha*(2.0*m+1.0))*coefs*coulomb(tmp,upbf,vpbf,wpbf)
if m>0:
tmp = PGBF(alpha,origin,(l,m-1,n))
termb = -2*m*sqrt(alpha/(2.*m-1))*coefs*coulomb(tmp,upbf,vpbf,wpbf)
else: termb=0.0
dJint_dYa += terma + termb
#z component
tmp = PGBF(alpha,origin,(l,m,n+1))
terma = sqrt(alpha*(2.0*n+1.0))*coefs*coulomb(tmp,upbf,vpbf,wpbf)
if n>0:
tmp = PGBF(alpha,origin,(l,m,n-1))
termb = -2*n*sqrt(alpha/(2.*n-1))*coefs*coulomb(tmp,upbf,vpbf,wpbf)
else: termb=0.0
dJint_dZa += terma + termb
if bfj.atid==a: #bfj is centered on atom a
for tpbf in bfi.prims:
for upbf in bfj.prims:
for vpbf in bfk.prims:
for wpbf in bfl.prims:
alpha = upbf.exp
l,m,n = upbf.powers
origin = upbf.origin
coefs = tpbf.coef*upbf.coef*vpbf.coef*wpbf.coef
#x component
tmp = PGBF(alpha, origin,(l+1,m,n)) #temp pgbf
terma = sqrt(alpha*(2.0*l+1.0))*coefs*coulomb(tpbf,tmp,vpbf,wpbf)
if l>0:
tmp = PGBF(alpha,origin,(l-1,m,n))
termb = -2*l*sqrt(alpha/(2.*l-1))*coefs*coulomb(tpbf,tmp,vpbf,wpbf)
else: termb = 0.0
dJint_dXa += terma+termb
#y component
tmp = PGBF(alpha,origin,(l,m+1,n))
terma = sqrt(alpha*(2.0*m+1.0))*coefs*coulomb(tpbf,tmp,vpbf,wpbf)
if m>0:
tmp = PGBF(alpha,origin,(l,m-1,n))
termb = -2*m*sqrt(alpha/(2.*m-1))*coefs*coulomb(tpbf,tmp,vpbf,wpbf)
else: termb=0.0
dJint_dYa += terma + termb
#z component
tmp = PGBF(alpha,origin,(l,m,n+1))
terma = sqrt(alpha*(2.0*n+1.0))*coefs*coulomb(tpbf,tmp,vpbf,wpbf)
if n>0:
tmp = PGBF(alpha,origin,(l,m,n-1))
termb = -2*n*sqrt(alpha/(2.*n-1))*coefs*coulomb(tpbf,tmp,vpbf,wpbf)
else: termb=0.0
dJint_dZa += terma + termb
if bfk.atid==a: #bfk is centered on atom a
for tpbf in bfi.prims:
for upbf in bfj.prims:
for vpbf in bfk.prims:
for wpbf in bfl.prims:
alpha = vpbf.exp
l,m,n = vpbf.powers
origin = vpbf.origin
coefs = tpbf.coef*upbf.coef*vpbf.coef*wpbf.coef
#x component
tmp = PGBF(alpha, origin,(l+1,m,n)) #temp pgbf
terma = sqrt(alpha*(2.0*l+1.0))*coefs*coulomb(tpbf,upbf,tmp,wpbf)
if l>0:
tmp = PGBF(alpha,origin,(l-1,m,n))
termb = -2*l*sqrt(alpha/(2.*l-1))*coefs*coulomb(tpbf,upbf,tmp,wpbf)
else: termb = 0.0
dJint_dXa += terma+termb
#y component
tmp = PGBF(alpha,origin,(l,m+1,n))
terma = sqrt(alpha*(2.0*m+1.0))*coefs*coulomb(tpbf,upbf,tmp,wpbf)
if m>0:
tmp = PGBF(alpha,origin,(l,m-1,n))
termb = -2*m*sqrt(alpha/(2.*m-1))*coefs*coulomb(tpbf,upbf,tmp,wpbf)
else: termb=0.0
dJint_dYa += terma + termb
#z component
tmp = PGBF(alpha,origin,(l,m,n+1))
terma = sqrt(alpha*(2.0*n+1.0))*coefs*coulomb(tpbf,upbf,tmp,wpbf)
if n>0:
tmp = PGBF(alpha,origin,(l,m,n-1))
termb = -2*n*sqrt(alpha/(2.*n-1))*coefs*coulomb(tpbf,upbf,tmp,wpbf)
else: termb=0.0
dJint_dZa += terma + termb
if bfl.atid==a: #bfl is centered on atom a
for tpbf in bfi.prims:
for upbf in bfj.prims:
for vpbf in bfk.prims:
for wpbf in bfl.prims:
alpha = wpbf.exp
l,m,n = wpbf.powers
origin = wpbf.origin
coefs = tpbf.coef*upbf.coef*vpbf.coef*wpbf.coef
#x component
tmp = PGBF(alpha, origin,(l+1,m,n)) #temp pgbf
terma = sqrt(alpha*(2.0*l+1.0))*coefs*coulomb(tpbf,upbf,vpbf,tmp)
if l>0:
tmp = PGBF(alpha,origin,(l-1,m,n))
termb = -2*l*sqrt(alpha/(2.*l-1))*coefs*coulomb(tpbf,upbf,vpbf,tmp)
else: termb = 0.0
dJint_dXa += terma+termb
#y component
tmp = PGBF(alpha,origin,(l,m+1,n))
terma = sqrt(alpha*(2.0*m+1.0))*coefs*coulomb(tpbf,upbf,vpbf,tmp)
if m>0:
tmp = PGBF(alpha,origin,(l,m-1,n))
termb = -2*m*sqrt(alpha/(2.*m-1))*coefs*coulomb(tpbf,upbf,vpbf,tmp)
else: termb=0.0
dJint_dYa += terma + termb
#z component
tmp = PGBF(alpha,origin,(l,m,n+1))
terma = sqrt(alpha*(2.0*n+1.0))*coefs*coulomb(tpbf,upbf,vpbf,tmp)
if n>0:
tmp = PGBF(alpha,origin,(l,m,n-1))
termb = -2*n*sqrt(alpha/(2.*n-1))*coefs*coulomb(tpbf,upbf,vpbf,tmp)
else: termb=0.0
dJint_dZa += terma + termb
return dJint_dXa,dJint_dYa,dJint_dZa
|
|
# -*- encoding: utf-8 -*-
#
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import importutils
from oslo_utils import uuidutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.oneview import common
from ironic.drivers.modules.oneview import deploy_utils
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
oneview_models = importutils.try_import('oneview_client.models')
oneview_exceptions = importutils.try_import('oneview_client.exceptions')
POWER_ON = 'On'
POWER_OFF = 'Off'
ERROR = 'error'
@mock.patch.object(common, 'get_oneview_client', spec_set=True, autospec=True)
class OneViewPowerDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(OneViewPowerDriverTestCase, self).setUp()
self.config(manager_url='https://1.2.3.4', group='oneview')
self.config(username='user', group='oneview')
self.config(password='password', group='oneview')
mgr_utils.mock_the_extension_manager(driver='fake_oneview')
self.driver = driver_factory.get_driver('fake_oneview')
self.node = obj_utils.create_test_node(
self.context, driver='fake_oneview',
properties=db_utils.get_test_oneview_properties(),
driver_info=db_utils.get_test_oneview_driver_info(),
)
self.info = common.get_oneview_info(self.node)
@mock.patch.object(common, 'validate_oneview_resources_compatibility',
spect_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'is_node_in_use_by_oneview',
spect_set=True, autospec=True)
def test_power_interface_validate(self, mock_is_node_in_use_by_oneview,
mock_validate, mock_get_ov_client):
mock_is_node_in_use_by_oneview.return_value = False
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.power.validate(task)
self.assertTrue(mock_validate.called)
def test_power_interface_validate_fail(self, mock_get_ov_client):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
id=999,
driver='fake_oneview')
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.power.validate, task)
@mock.patch.object(common, 'validate_oneview_resources_compatibility',
spect_set=True, autospec=True)
def test_power_interface_validate_fail_exception(self, mock_validate,
mock_get_ov_client):
mock_validate.side_effect = exception.OneViewError('message')
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(common, 'validate_oneview_resources_compatibility',
spect_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'is_node_in_use_by_oneview',
spect_set=True, autospec=True)
def test_power_validate_fail_node_used_by_oneview(
self, mock_is_node_in_use_by_oneview, mock_validate,
mock_get_ov_client):
mock_validate.return_value = True
mock_is_node_in_use_by_oneview.return_value = True
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(common, 'validate_oneview_resources_compatibility',
spect_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'is_node_in_use_by_oneview',
spect_set=True, autospec=True)
def test_validate_fail_node_in_use_by_oneview(
self, mock_is_node_in_use_by_oneview, mock_validate,
mock_get_ov_client):
mock_validate.return_value = True
mock_is_node_in_use_by_oneview.side_effect = (
exception.OneViewError('message'))
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
def test_power_interface_get_properties(self, mock_get_ov_client):
expected = common.COMMON_PROPERTIES
self.assertItemsEqual(expected, self.driver.power.get_properties())
def test_get_power_state(self, mock_get_ov_client):
oneview_client = mock_get_ov_client()
oneview_client.get_node_power_state.return_value = POWER_ON
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.power.get_power_state(task)
oneview_client.get_node_power_state.assert_called_once_with(self.info)
def test_get_power_state_fail(self, mock_get_ov_client):
oneview_client = mock_get_ov_client()
oneview_client.get_node_power_state.side_effect = \
oneview_exceptions.OneViewException()
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.OneViewError,
self.driver.power.get_power_state,
task
)
def test_set_power_on(self, mock_get_ov_client):
sp_uri = '/any/server-profile'
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
oneview_client = mock_get_ov_client.return_value
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
oneview_client.power_on.return_value = POWER_ON
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = sp_uri
task.node.driver_info = driver_info
self.driver.power.set_power_state(task, states.POWER_ON)
self.info['applied_server_profile_uri'] = sp_uri
oneview_client.power_on.assert_called_once_with(self.info)
def test_set_power_off(self, mock_get_ov_client):
sp_uri = '/any/server-profile'
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
oneview_client = mock_get_ov_client.return_value
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
oneview_client.power_off.return_value = POWER_OFF
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = sp_uri
task.node.driver_info = driver_info
self.driver.power.set_power_state(task, states.POWER_OFF)
self.info['applied_server_profile_uri'] = sp_uri
oneview_client.power_off.assert_called_once_with(self.info)
def test_set_power_on_fail(self, mock_get_ov_client):
sp_uri = '/any/server-profile'
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
exc = oneview_exceptions.OneViewException()
oneview_client.power_on.side_effect = exc
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = sp_uri
task.node.driver_info = driver_info
self.assertRaises(exception.OneViewError,
self.driver.power.set_power_state, task,
states.POWER_ON)
self.info['applied_server_profile_uri'] = sp_uri
oneview_client.power_on.assert_called_once_with(self.info)
def test_set_power_off_fail(self, mock_get_ov_client):
sp_uri = '/any/server-profile'
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
exc = oneview_exceptions.OneViewException()
oneview_client.power_off.side_effect = exc
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = sp_uri
task.node.driver_info = driver_info
self.assertRaises(exception.OneViewError,
self.driver.power.set_power_state, task,
states.POWER_OFF)
self.info['applied_server_profile_uri'] = sp_uri
oneview_client.power_off.assert_called_once_with(self.info)
def test_set_power_invalid_state(self, mock_get_ov_client):
sp_uri = '/any/server-profile'
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
exc = oneview_exceptions.OneViewException()
oneview_client.power_off.side_effect = exc
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = sp_uri
task.node.driver_info = driver_info
self.assertRaises(exception.InvalidParameterValue,
self.driver.power.set_power_state, task,
'fake state')
def test_set_power_reboot(self, mock_get_ov_client):
sp_uri = '/any/server-profile'
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
oneview_client.power_off.return_value = POWER_OFF
oneview_client.power_on.return_value = POWER_ON
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = sp_uri
task.node.driver_info = driver_info
self.driver.power.set_power_state(task, states.REBOOT)
self.info['applied_server_profile_uri'] = sp_uri
oneview_client.power_off.assert_called_once_with(self.info)
oneview_client.power_off.assert_called_once_with(self.info)
oneview_client.power_on.assert_called_once_with(self.info)
def test_reboot(self, mock_get_ov_client):
sp_uri = '/any/server-profile'
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
oneview_client.power_off.return_value = POWER_OFF
oneview_client.power_on.return_value = POWER_ON
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = sp_uri
task.node.driver_info = driver_info
self.driver.power.reboot(task)
self.info['applied_server_profile_uri'] = sp_uri
oneview_client.power_off.assert_called_once_with(self.info)
oneview_client.power_on.assert_called_once_with(self.info)
def test_reboot_fail(self, mock_get_ov_client):
sp_uri = '/any/server-profile'
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
exc = oneview_exceptions.OneViewException()
oneview_client.power_off.side_effect = exc
self.driver.power.oneview_client = oneview_client
with task_manager.acquire(self.context,
self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = sp_uri
task.node.driver_info = driver_info
self.assertRaises(exception.OneViewError,
self.driver.power.reboot, task)
self.info['applied_server_profile_uri'] = sp_uri
oneview_client.power_off.assert_called_once_with(self.info)
self.assertFalse(oneview_client.power_on.called)
|
|
import datetime
import hashlib
import random
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.timezone import now as datetime_now
except ImportError:
datetime_now = datetime.datetime.now
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, new_user,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt + username).hexdigest()
profile = self.create(user=user, activation_key=activation_key)
return profile
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
try:
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
profile.delete()
except User.DoesNotExist:
profile.delete()
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(User, unique=True, verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __unicode__(self):
return u"Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= datetime_now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
|
from os import path as op
import numpy as np
from numpy.polynomial import legendre
from numpy.testing import (assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal)
from nose.tools import assert_raises, assert_true
from mne.forward import _make_surface_mapping, make_field_map
from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
_get_legen_table,
_get_legen_lut_fast,
_get_legen_lut_accurate,
_do_cross_dots)
from mne.forward._make_forward import _create_meg_coils
from mne.forward._field_interpolation import _setup_dots
from mne.surface import get_meg_helmet_surf, get_head_surf
from mne.datasets import testing
from mne import read_evokeds, pick_types
from mne.fixes import partial
from mne.externals.six.moves import zip
from mne.utils import run_tests_if_main, slow_test
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
data_path = testing.data_path(download=False)
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = op.join(data_path, 'subjects')
def test_legendre_val():
"""Test Legendre polynomial (derivative) equivalence
"""
rng = np.random.RandomState(0)
# check table equiv
xs = np.linspace(-1., 1., 1000)
n_terms = 100
# True, numpy
vals_np = legendre.legvander(xs, n_terms - 1)
# Table approximation
for fun, nc in zip([_get_legen_lut_fast, _get_legen_lut_accurate],
[100, 50]):
lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
vals_i = fun(xs, lut)
# Need a "1:" here because we omit the first coefficient in our table!
assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
rtol=1e-2, atol=5e-3)
# Now let's look at our sums
ctheta = rng.rand(20, 30) * 2.0 - 1.0
beta = rng.rand(20, 30) * 0.8
lut_fun = partial(fun, lut=lut)
c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
c1.shape = beta.shape
# compare to numpy
n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
coeffs = np.zeros((n_terms,) + beta.shape)
coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *
(2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
# can't use tensor=False here b/c it isn't in old numpy
c2 = np.empty((20, 30))
for ci1 in range(20):
for ci2 in range(30):
c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],
coeffs[:, ci1, ci2])
assert_allclose(c1, c2, 1e-2, 1e-3) # close enough...
# compare fast and slow for MEG
ctheta = rng.rand(20 * 30) * 2.0 - 1.0
beta = rng.rand(20 * 30) * 0.8
lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)
fun = partial(_get_legen_lut_fast, lut=lut)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)
fun = partial(_get_legen_lut_accurate, lut=lut)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
def test_legendre_table():
"""Test Legendre table calculation
"""
# double-check our table generation
n = 10
for ch_type in ['eeg', 'meg']:
lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
lut1 = lut1[:, :n - 1].copy()
n_fact1 = n_fact1[:n - 1].copy()
lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
assert_allclose(lut1, lut2)
assert_allclose(n_fact1, n_fact2)
@testing.requires_testing_data
def test_make_field_map_eeg():
"""Test interpolation of EEG field onto head
"""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads
surf = get_head_surf('sample', subjects_dir=subjects_dir)
# we must have trans if surface is in MRI coords
assert_raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
evoked.pick_types(meg=False, eeg=True)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
# trans is necessary for EEG only
assert_raises(RuntimeError, make_field_map, evoked, None,
subject='sample', subjects_dir=subjects_dir)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
assert_true(len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf
assert_true(len(fmd[0]['ch_names']), 59)
@testing.requires_testing_data
@slow_test
def test_make_field_map_meg():
"""Test interpolation of MEG field onto helmet | head
"""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
info = evoked.info
surf = get_meg_helmet_surf(info)
# let's reduce the number of channels by a bunch to speed it up
info['bads'] = info['ch_names'][:200]
# bad ch_type
assert_raises(ValueError, _make_surface_mapping, info, surf, 'foo')
# bad mode
assert_raises(ValueError, _make_surface_mapping, info, surf, 'meg',
mode='foo')
# no picks
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
assert_raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
surf, 'meg')
# bad surface def
nn = surf['nn']
del surf['nn']
assert_raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['nn'] = nn
cf = surf['coord_frame']
del surf['coord_frame']
assert_raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['coord_frame'] = cf
# now do it with make_field_map
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj() # avoid projection warnings
fmd = make_field_map(evoked, None,
subject='sample', subjects_dir=subjects_dir)
assert_true(len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (304, 106)) # maps data onto surf
assert_true(len(fmd[0]['ch_names']), 106)
assert_raises(ValueError, make_field_map, evoked, ch_type='foobar')
# now test the make_field_map on head surf for MEG
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, trans_fname, meg_surf='head',
subject='sample', subjects_dir=subjects_dir)
assert_true(len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (642, 106)) # maps data onto surf
assert_true(len(fmd[0]['ch_names']), 106)
assert_raises(ValueError, make_field_map, evoked, meg_surf='foobar',
subjects_dir=subjects_dir, trans=trans_fname)
@testing.requires_testing_data
def test_make_field_map_meeg():
"""Test making a M/EEG field map onto helmet & head"""
evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0]
picks = pick_types(evoked.info, meg=True, eeg=True)
picks = picks[::10]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1)
assert_equal(maps[0]['data'].shape, (642, 6)) # EEG->Head
assert_equal(maps[1]['data'].shape, (304, 31)) # MEG->Helmet
# reasonable ranges
for map_ in maps:
assert_true(0.5 < map_['data'].max() < 2)
assert_true(-2 < map_['data'].min() < -0.5)
# calculated from correct looking mapping on 2015/12/26
assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 16.6088,
atol=1e-3, rtol=1e-3)
assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 20.1245,
atol=1e-3, rtol=1e-3)
def _setup_args(info):
"""Helper to test_as_meg_type_evoked."""
coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
int_rad, noise, lut_fun, n_fact = _setup_dots('fast', coils, 'meg')
my_origin = np.array([0., 0., 0.04])
args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,
ch_type='meg', lut=lut_fun, n_fact=n_fact)
return args_dict
@testing.requires_testing_data
def test_as_meg_type_evoked():
"""Test interpolation of data on to virtual channels."""
# validation tests
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
assert_raises(ValueError, evoked.as_type, 'meg')
assert_raises(ValueError, evoked.copy().pick_types(meg='grad').as_type,
'meg')
# channel names
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert_true(all('_virtual' in ch for ch in virt_evoked.info['ch_names']))
# pick from and to channels
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
info_from, info_to = evoked_from.info, evoked_to.info
# set up things
args1, args2 = _setup_args(info_from), _setup_args(info_to)
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
# test cross dots
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
# correlation test
evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert_true(np.corrcoef(data1, data2)[0, 1] > 0.95)
run_tests_if_main()
|
|
import os.path
import socket
import logging
import time
import xmltodict
from runfolder import __version__ as version
from arteria.web.state import State
from arteria.web.state import validate_state
from runfolder.lib.instrument import InstrumentFactory
class RunfolderInfo:
"""
Information about a runfolder. Status must be defined in RunfolderState:
"""
def __init__(self, host, path, state, metadata):
"""
Initializes the object
:param host: The host where the runfolder exists
:param path: The file system path to the runfolder on the host
:param state: The state of the runfolder (see RunfolderState)
"""
self.host = host
self.path = path
self.state = state
self.service_version = version
self.metadata = metadata
def __repr__(self):
return "{0}: {1}@{2}".format(self.state, self.path, self.host)
class RunfolderService:
"""Watches a set of directories on the server and reacts when one of them
has a runfolder that's ready for processing"""
def __init__(self, configuration_svc, logger=None):
self._configuration_svc = configuration_svc
self._logger = logger or logging.getLogger(__name__)
# NOTE: These methods were added so that they could be easily mocked out.
# It would probably be nicer to move them inline and mock the system calls
# or have them in a separate provider class required in the constructor
@staticmethod
def _host():
return socket.gethostname()
@staticmethod
def _file_exists(path):
return os.path.isfile(path)
@staticmethod
def _file_exists_and_is_older_than(path, minutes):
if not os.path.isfile(path):
return False
modification_time = os.path.getmtime(path)
return (time.time() - modification_time) >= minutes * 60
@staticmethod
def _dir_exists(path):
return os.path.isdir(path)
@staticmethod
def _subdirectories(path):
return os.listdir(path)
def _validate_is_being_monitored(self, path):
"""
Validate that this is a subdirectory (potentially non-existing)
of a monitored path
:raises PathNotMonitored
"""
def is_parent_dir(parent_dir, child_dir):
actual_parent = os.path.split(child_dir)[0]
return os.path.normpath(parent_dir) == actual_parent
monitored = list(self._monitored_directories())
is_monitored = any([is_parent_dir(mon, path) for mon in monitored])
if not is_monitored:
self._logger.warn("Validation error: {} is not monitored {}".format(path, monitored))
raise PathNotMonitored(
"The path '{}' is not being monitored.".format(path))
def create_runfolder(self, path):
"""
Creates a runfolder at the path.
Provided for integration tests only and only available if the
config value can_create_runfolder is True.
:raises PathNotMonitored
:raises DirectoryAlreadyExists
"""
self._requires_enabled("can_create_runfolder")
self._validate_is_being_monitored(path)
if os.path.exists(path):
raise DirectoryAlreadyExists("The path {0} already exists and can't be overridden".format(path))
os.makedirs(path)
self._logger.info(
"Created a runfolder at {0} - intended for tests only".format(path))
runparameters_path = os.path.join(path, "runParameters.xml")
if os.path.isfile(runparameters_path):
raise CannotOverrideFile("runParameters.xml already exists at {0}".format(runparameters_path))
runparameters_dict = {
'RunParameters': {
'ReagentKitBarcode': 'AB1234567-123V1',
'RfidsInfo': {
'LibraryTubeSerialBarcode': 'NV0012345-LIB'
},
'ScannerID': 'M04499'
}
}
output_xml = xmltodict.unparse(runparameters_dict, pretty=True)
with open(runparameters_path, 'a') as f:
f.write(output_xml)
self._logger.info(
"Added 'runParameters.xml' to '{0}' - intended for tests only".format(runparameters_path))
def add_sequencing_finished_marker(self, path):
"""
Adds the marker that sets the `ready` state of a runfolder.
This marker is generally added by the sequencer
Provided for integration tests only and only available if the config value
can_create_runfolder is set to True.
:raises DirectoryDoesNotExist
:raises CannotOverrideFile
"""
self._requires_enabled("can_create_runfolder")
if not os.path.isdir(path):
raise DirectoryDoesNotExist(
"The path '{0}' is not an existing directory".format(path))
full_path = os.path.join(path, "RTAComplete.txt")
if os.path.isfile(full_path):
raise CannotOverrideFile("The complete marker already exists at {0}".format(full_path))
open(full_path, 'a').close()
self._logger.info(
"Added the 'RTAComplete.txt' marker to '{0}' - intended for tests only".format(full_path))
def get_runfolder_by_path(self, path):
"""
Returns a RunfolderInfo by its Linux file path
:raises PathNotMonitored
:raises DirectoryDoesNotExist
"""
self._logger.debug("get_runfolder_by_path({0})".format(path))
self._validate_is_being_monitored(path)
if not self._dir_exists(path):
raise DirectoryDoesNotExist("Directory does not exist: '{0}'".format(path))
info = RunfolderInfo(self._host(), path, self.get_runfolder_state(path),
self.get_metadata(path))
return info
def _get_runfolder_state_from_state_file(self, runfolder):
"""
Reads the state in the state file at .arteria/state, returns
State.NONE if nothing is available
"""
state_file = os.path.join(runfolder, ".arteria", "state")
if self._file_exists(state_file):
with open(state_file, 'r') as f:
state = f.read()
state = state.strip()
return state
else:
return State.NONE
def get_runfolder_state(self, runfolder):
"""
Returns the state of a runfolder. The possible states are defined in
State
If the file .arteria/state exists, it will determine the state. If it doesn't
exist, the existence of the marker file RTAComplete.txt determines the state.
"""
instrument = InstrumentFactory.get_instrument(self.read_run_parameters(runfolder))
completed_marker_file = instrument.completed_marker_file()
completed_grace_minutes = None
try:
completed_grace_minutes = self._configuration_svc["completed_marker_grace_minutes"]
except KeyError:
pass
if completed_grace_minutes is None:
completed_grace_minutes = 0
state = self._get_runfolder_state_from_state_file(runfolder)
if state == State.NONE:
ready = True
completed_marker = os.path.join(runfolder, completed_marker_file)
if not self._file_exists_and_is_older_than(completed_marker, completed_grace_minutes):
ready = False
if ready:
state = State.READY
return state
@staticmethod
def set_runfolder_state(runfolder, state):
"""
Sets the state of a runfolder
:raises DirectoryDoesNotExist
"""
validate_state(state)
arteria_dir = os.path.join(runfolder, ".arteria")
state_file = os.path.join(arteria_dir, "state")
if not os.path.exists(runfolder):
raise DirectoryDoesNotExist(
"Directory does not exist: '{0}'".format(runfolder))
if not os.path.exists(arteria_dir):
os.makedirs(arteria_dir)
with open(state_file, 'w') as f:
f.write(state)
def is_runfolder_ready(self, directory):
"""Returns True if the runfolder is ready"""
state = self.get_runfolder_state(directory)
return state == State.READY
def _monitored_directories(self):
"""Lists all directories monitored for new runfolders"""
monitored = self._configuration_svc["monitored_directories"]
if (monitored is not None) and (type(monitored) is not list):
raise ConfigurationError("monitored_directories must be a list")
for directory in monitored:
yield os.path.abspath(directory)
def next_runfolder(self):
"""Returns the next available runfolder. Returns None if there is none available."""
available = self.list_runfolders(state=State.READY)
try:
first = next(available)
except StopIteration:
first = None
self._logger.info(
"Searching for next available runfolder, found: {0}".format(first))
return first
def list_available_runfolders(self):
return self.list_runfolders(State.READY)
def list_runfolders(self, state):
"""
Lists all the runfolders on the host, filtered by state. State
can be any of the values in RunfolderState. Specify None for no filtering.
"""
runfolders = self._enumerate_runfolders()
if state:
validate_state(state)
return (runfolder for runfolder in runfolders if runfolder.state == state)
else:
return runfolders
def _enumerate_runfolders(self):
"""Enumerates all runfolders in any monitored directory"""
for monitored_root in self._monitored_directories():
self._logger.debug("Checking subdirectories of {0}".format(monitored_root))
for subdir in self._subdirectories(monitored_root):
directory = os.path.join(monitored_root, subdir)
self._logger.debug("Found potential runfolder {0}".format(directory))
state = self.get_runfolder_state(directory)
info = RunfolderInfo(self._host(), directory, state,
self.get_metadata(directory))
yield info
def _requires_enabled(self, config_key):
"""Raises an ActionNotEnabled exception if the specified config value is false"""
if not self._configuration_svc[config_key]:
raise ActionNotEnabled("The action {0} is not enabled".format(config_key))
def get_metadata(self, path):
run_parameters = self.read_run_parameters(path)
reagent_kit_barcode = self.get_reagent_kit_barcode(path, run_parameters)
library_tube_barcode = self.get_library_tube_barcode(path, run_parameters)
metadata = {}
if reagent_kit_barcode:
metadata['reagent_kit_barcode'] = reagent_kit_barcode
if library_tube_barcode:
metadata['library_tube_barcode'] = library_tube_barcode
return metadata
def get_reagent_kit_barcode(self, path, run_parameters):
try:
barcode = run_parameters['RunParameters']['ReagentKitBarcode']
except KeyError:
# Reagent kit barcode is not available for all run types,
# it is therefore expected to not be found in all cases
self._logger.debug("Reagent kit barcode not found")
return None
except TypeError:
self._logger.debug("[Rr]unParameters.xml not found")
return None
return barcode
def get_library_tube_barcode(self, path, run_parameters):
try:
barcode = run_parameters['RunParameters']['RfidsInfo']['LibraryTubeSerialBarcode']
except KeyError:
# Library tube barcode is not available for all run types,
# it is therefore expected to not be found in all cases
self._logger.debug("Library tube barcode not found")
return None
except TypeError:
self._logger.debug("[Rr]unParameters.xml not found")
return None
return barcode
def read_run_parameters(self, path):
alt_1 = os.path.join(path, "runParameters.xml")
alt_2 = os.path.join(path, "RunParameters.xml")
if os.path.exists(alt_1):
with open(alt_1) as f:
return xmltodict.parse(f.read())
elif os.path.exists(alt_2):
with open(alt_2) as f:
return xmltodict.parse(f.read())
else:
return None
class CannotOverrideFile(Exception):
pass
class DirectoryDoesNotExist(Exception):
pass
class PathNotMonitored(Exception):
pass
class DirectoryAlreadyExists(Exception):
pass
class ActionNotEnabled(Exception):
pass
class InvalidRunfolderState(Exception):
pass
class ConfigurationError(Exception):
pass
|
|
"""
Updated on 19.12.2009
@author: alen, pinda
Inspired by:
http://github.com/leah/python-oauth/blob/master/oauth/example/client.py
http://github.com/facebook/tornado/blob/master/tornado/auth.py
"""
import time
import base64
import urllib
import urllib2
# parse_qsl was moved from the cgi namespace to urlparse in Python2.6.
# this allows backwards compatibility
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from xml.dom import minidom
import oauth2 as oauth
from openid.consumer import consumer as openid
from openid.consumer.discover import DiscoveryFailure
from openid.store.interface import OpenIDStore as OIDStore
from openid.association import Association as OIDAssociation
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import gettext as _
from django.conf import settings
from django.utils import simplejson
from django.contrib.sites.models import Site
from socialregistration.models import OpenIDStore as OpenIDStoreModel, OpenIDNonce
from urlparse import urlparse
USE_HTTPS = bool(getattr(settings, 'SOCIALREGISTRATION_USE_HTTPS', False))
def _https():
if USE_HTTPS:
return 's'
else:
return ''
class OpenIDStore(OIDStore):
max_nonce_age = 6 * 60 * 60
def storeAssociation(self, server_url, assoc=None):
stored_assoc = OpenIDStoreModel.objects.create(
server_url=server_url,
handle=assoc.handle,
secret=base64.encodestring(assoc.secret),
issued=assoc.issued,
lifetime=assoc.issued,
assoc_type=assoc.assoc_type
)
def getAssociation(self, server_url, handle=None):
stored_assocs = OpenIDStoreModel.objects.filter(
server_url=server_url
)
if handle:
stored_assocs = stored_assocs.filter(handle=handle)
stored_assocs.order_by('-issued')
if stored_assocs.count() == 0:
return None
return_val = None
for stored_assoc in stored_assocs:
assoc = OIDAssociation(
stored_assoc.handle, base64.decodestring(stored_assoc.secret),
stored_assoc.issued, stored_assoc.lifetime, stored_assoc.assoc_type
)
if assoc.getExpiresIn() == 0:
stored_assoc.delete()
else:
if return_val is None:
return_val = assoc
return return_val
def removeAssociation(self, server_url, handle):
stored_assocs = OpenIDStoreModel.objects.filter(
server_url=server_url
)
if handle:
stored_assocs = stored_assocs.filter(handle=handle)
stored_assocs.delete()
def useNonce(self, server_url, timestamp, salt):
try:
nonce = OpenIDNonce.objects.get(
server_url=server_url,
timestamp=timestamp,
salt=salt
)
except OpenIDNonce.DoesNotExist:
nonce = OpenIDNonce.objects.create(
server_url=server_url,
timestamp=timestamp,
salt=salt
)
return True
return False
class OpenID(object):
def __init__(self, request, return_to, endpoint):
"""
@param request: : django.http.HttpRequest object
@param return_to: URL to redirect back to once the user authenticated
the application on the OpenID provider
@param endpoint: URL to the OpenID provider we're connecting to
"""
self.request = request
self.return_to = return_to
self.endpoint = endpoint
self.store = OpenIDStore()
self.consumer = openid.Consumer(self.request.session, self.store)
self.result = None
def get_redirect(self):
auth_request = self.consumer.begin(self.endpoint)
redirect_url = auth_request.redirectURL(
'http%s://%s/' % (_https(), Site.objects.get_current().domain),
self.return_to
)
return HttpResponseRedirect(redirect_url)
def complete(self):
self.result = self.consumer.complete(
dict(self.request.GET.items()),
'http%s://%s%s' % (_https(), Site.objects.get_current(),
self.request.path)
)
def is_valid(self):
if self.result is None:
self.complete()
return self.result.status == openid.SUCCESS
def get_token_prefix(url):
"""
Returns a prefix for the token to store in the session so we can hold
more than one single oauth provider's access key in the session.
Example:
The request token url ``http://twitter.com/oauth/request_token``
returns ``twitter.com``
"""
return urllib2.urlparse.urlparse(url).netloc
class OAuthError(Exception):
pass
class OAuthClient(object):
def __init__(self, request, consumer_key, consumer_secret, request_token_url,
access_token_url, authorization_url, callback_url, parameters=None):
self.request = request
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorization_url = authorization_url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.consumer = oauth.Consumer(consumer_key, consumer_secret)
self.client = oauth.Client(self.consumer)
self.signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.parameters = parameters
self.callback_url = callback_url
self.errors = []
self.request_token = None
self.access_token = None
def _get_request_token(self):
"""
Obtain a temporary request token to authorize an access token and to
sign the request to obtain the access token
"""
if self.request_token is None:
response, content = self.client.request(self.request_token_url, "GET")
if response['status'] != '200':
raise OAuthError(
_('Invalid response while obtaining request token from "%s".') % get_token_prefix(self.request_token_url))
self.request_token = dict(parse_qsl(content))
self.request.session['oauth_%s_request_token' % get_token_prefix(self.request_token_url)] = self.request_token
return self.request_token
def _get_access_token(self):
"""
Obtain the access token to access private resources at the API endpoint.
"""
if self.access_token is None:
request_token = self._get_rt_from_session()
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
self.client = oauth.Client(self.consumer, token)
response, content = self.client.request(self.access_token_url, "GET")
if response['status'] != '200':
raise OAuthError(
_('Invalid response while obtaining access token from "%s".') % get_token_prefix(self.request_token_url))
self.access_token = dict(parse_qsl(content))
self.request.session['oauth_%s_access_token' % get_token_prefix(self.request_token_url)] = self.access_token
return self.access_token
def _get_rt_from_session(self):
"""
Returns the request token cached in the session by ``_get_request_token``
"""
try:
return self.request.session['oauth_%s_request_token' % get_token_prefix(self.request_token_url)]
except KeyError:
raise OAuthError(_('No request token saved for "%s".') % get_token_prefix(self.request_token_url))
def _get_authorization_url(self):
request_token = self._get_request_token()
return '%s?oauth_token=%s&oauth_callback=%s' % (self.authorization_url,
request_token['oauth_token'], '%s%s' % (Site.objects.get_current().domain,
reverse(self.callback_url)))
def is_valid(self):
try:
self._get_rt_from_session()
self._get_access_token()
except OAuthError, e:
self.errors.append(e.args[0])
return False
return True
def get_redirect(self):
"""
Returns a ``HttpResponseRedirect`` object to redirect the user to the
URL the OAuth provider handles authorization.
"""
return HttpResponseRedirect(self._get_authorization_url())
class OAuth(object):
"""
Base class to perform oauth signed requests from access keys saved in a user's
session.
See the ``OAuthTwitter`` class below for an example.
"""
def __init__(self, request, consumer_key, secret_key, request_token_url):
self.request = request
self.consumer_key = consumer_key
self.secret_key = secret_key
self.consumer = oauth.Consumer(consumer_key, secret_key)
self.request_token_url = request_token_url
def _get_at_from_session(self):
"""
Get the saved access token for private resources from the session.
"""
try:
return self.request.session['oauth_%s_access_token' % get_token_prefix(self.request_token_url)]
except KeyError:
raise OAuthError(
_('No access token saved for "%s".') % get_token_prefix(self.request_token_url))
def query(self, url, method="GET", params=dict(), headers=dict()):
"""
Request a API endpoint at ``url`` with ``params`` being either the
POST or GET data.
"""
access_token = self._get_at_from_session()
token = oauth.Token(access_token['oauth_token'], access_token['oauth_token_secret'])
client = oauth.Client(self.consumer, token)
body = urllib.urlencode(params)
response, content = client.request(url, method=method, headers=headers,
body=body)
if response['status'] != '200':
raise OAuthError(
_('No access to private resources at "%s".') % get_token_prefix(self.request_token_url))
return content
class OAuthTwitter(OAuth):
"""
Verifying twitter credentials
"""
url = 'https://twitter.com/account/verify_credentials.json'
def get_user_info(self):
user = simplejson.loads(self.query(self.url))
return user
|
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import datetime
from hashlib import sha256
from hashlib import sha1
import hmac
import logging
from email.utils import formatdate
from operator import itemgetter
import functools
import time
import calendar
from botocore.exceptions import NoCredentialsError
from botocore.utils import normalize_url_path, percent_encode_sequence
from botocore.compat import HTTPHeaders
from botocore.compat import quote, unquote, urlsplit, parse_qs
from botocore.compat import urlunsplit
from botocore.compat import encodebytes
from botocore.compat import six
from botocore.compat import json
from botocore.compat import MD5_AVAILABLE
logger = logging.getLogger(__name__)
EMPTY_SHA256_HASH = (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
# This is the buffer size used when calculating sha256 checksums.
# Experimenting with various buffer sizes showed that this value generally
# gave the best result (in terms of performance).
PAYLOAD_BUFFER = 1024 * 1024
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
SIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ'
SIGNED_HEADERS_BLACKLIST = [
'expect',
'user-agent'
]
class BaseSigner(object):
REQUIRES_REGION = False
def add_auth(self, request):
raise NotImplementedError("add_auth")
class SigV2Auth(BaseSigner):
"""
Sign a request with Signature V2.
"""
def __init__(self, credentials):
self.credentials = credentials
def calc_signature(self, request, params):
logger.debug("Calculating signature using v2 auth.")
split = urlsplit(request.url)
path = split.path
if len(path) == 0:
path = '/'
string_to_sign = '%s\n%s\n%s\n' % (request.method,
split.netloc,
path)
lhmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
pairs = []
for key in sorted(params):
# Any previous signature should not be a part of this
# one, so we skip that particular key. This prevents
# issues during retries.
if key == 'Signature':
continue
value = six.text_type(params[key])
pairs.append(quote(key.encode('utf-8'), safe='') + '=' +
quote(value.encode('utf-8'), safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
logger.debug('String to sign: %s', string_to_sign)
lhmac.update(string_to_sign.encode('utf-8'))
b64 = base64.b64encode(lhmac.digest()).strip().decode('utf-8')
return (qs, b64)
def add_auth(self, request):
# The auth handler is the last thing called in the
# preparation phase of a prepared request.
# Because of this we have to parse the query params
# from the request body so we can update them with
# the sigv2 auth params.
if self.credentials is None:
raise NoCredentialsError
if request.data:
# POST
params = request.data
else:
# GET
params = request.param
params['AWSAccessKeyId'] = self.credentials.access_key
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['Timestamp'] = time.strftime(ISO8601, time.gmtime())
if self.credentials.token:
params['SecurityToken'] = self.credentials.token
qs, signature = self.calc_signature(request, params)
params['Signature'] = signature
return request
class SigV3Auth(BaseSigner):
def __init__(self, credentials):
self.credentials = credentials
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
if 'Date' in request.headers:
del request.headers['Date']
request.headers['Date'] = formatdate(usegmt=True)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
new_hmac.update(request.headers['Date'].encode('utf-8'))
encoded_signature = encodebytes(new_hmac.digest()).strip()
signature = ('AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s' %
(self.credentials.access_key, 'HmacSHA256',
encoded_signature.decode('utf-8')))
if 'X-Amzn-Authorization' in request.headers:
del request.headers['X-Amzn-Authorization']
request.headers['X-Amzn-Authorization'] = signature
class SigV4Auth(BaseSigner):
"""
Sign a request with Signature V4.
"""
REQUIRES_REGION = True
def __init__(self, credentials, service_name, region_name):
self.credentials = credentials
# We initialize these value here so the unit tests can have
# valid values. But these will get overriden in ``add_auth``
# later for real requests.
self._region_name = region_name
self._service_name = service_name
def _sign(self, key, msg, hex=False):
if hex:
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
else:
sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
return sig
def headers_to_sign(self, request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
header_map = HTTPHeaders()
split = urlsplit(request.url)
for name, value in request.headers.items():
lname = name.lower()
if lname not in SIGNED_HEADERS_BLACKLIST:
header_map[lname] = value
if 'host' not in header_map:
header_map['host'] = split.netloc
return header_map
def canonical_query_string(self, request):
# The query string can come from two parts. One is the
# params attribute of the request. The other is from the request
# url (in which case we have to re-split the url into its components
# and parse out the query string component).
if request.params:
return self._canonical_query_string_params(request.params)
else:
return self._canonical_query_string_url(urlsplit(request.url))
def _canonical_query_string_params(self, params):
l = []
for param in sorted(params):
value = str(params[param])
l.append('%s=%s' % (quote(param, safe='-_.~'),
quote(value, safe='-_.~')))
cqs = '&'.join(l)
return cqs
def _canonical_query_string_url(self, parts):
canonical_query_string = ''
if parts.query:
# [(key, value), (key2, value2)]
key_val_pairs = []
for pair in parts.query.split('&'):
key, _, value = pair.partition('=')
key_val_pairs.append((key, value))
sorted_key_vals = []
# Sort by the key names, and in the case of
# repeated keys, then sort by the value.
for key, value in sorted(key_val_pairs):
sorted_key_vals.append('%s=%s' % (key, value))
canonical_query_string = '&'.join(sorted_key_vals)
return canonical_query_string
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
headers = []
sorted_header_names = sorted(set(headers_to_sign))
for key in sorted_header_names:
value = ','.join(v.strip() for v in
sorted(headers_to_sign.get_all(key)))
headers.append('%s:%s' % (key, value))
return '\n'.join(headers)
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in set(headers_to_sign)]
l = sorted(l)
return ';'.join(l)
def payload(self, request):
if request.body and hasattr(request.body, 'seek'):
position = request.body.tell()
read_chunksize = functools.partial(request.body.read,
PAYLOAD_BUFFER)
checksum = sha256()
for chunk in iter(read_chunksize, b''):
checksum.update(chunk)
hex_checksum = checksum.hexdigest()
request.body.seek(position)
return hex_checksum
elif request.body:
# The request serialization has ensured that
# request.body is a bytes() type.
return sha256(request.body).hexdigest()
else:
return EMPTY_SHA256_HASH
def canonical_request(self, request):
cr = [request.method.upper()]
path = self._normalize_url_path(urlsplit(request.url).path)
cr.append(path)
cr.append(self.canonical_query_string(request))
headers_to_sign = self.headers_to_sign(request)
cr.append(self.canonical_headers(headers_to_sign) + '\n')
cr.append(self.signed_headers(headers_to_sign))
if 'X-Amz-Content-SHA256' in request.headers:
body_checksum = request.headers['X-Amz-Content-SHA256']
else:
body_checksum = self.payload(request)
cr.append(body_checksum)
return '\n'.join(cr)
def _normalize_url_path(self, path):
normalized_path = quote(normalize_url_path(path), safe='/~')
return normalized_path
def scope(self, request):
scope = [self.credentials.access_key]
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def credential_scope(self, request):
scope = []
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def string_to_sign(self, request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
sts = ['AWS4-HMAC-SHA256']
sts.append(request.context['timestamp'])
sts.append(self.credential_scope(request))
sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
return '\n'.join(sts)
def signature(self, string_to_sign, request):
key = self.credentials.secret_key
k_date = self._sign(('AWS4' + key).encode('utf-8'),
request.context['timestamp'][0:8])
k_region = self._sign(k_date, self._region_name)
k_service = self._sign(k_region, self._service_name)
k_signing = self._sign(k_service, 'aws4_request')
return self._sign(k_signing, string_to_sign, hex=True)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
# This could be a retry. Make sure the previous
# authorization header is removed first.
self._modify_request_before_signing(request)
canonical_request = self.canonical_request(request)
logger.debug("Calculating signature using v4 auth.")
logger.debug('CanonicalRequest:\n%s', canonical_request)
string_to_sign = self.string_to_sign(request, canonical_request)
logger.debug('StringToSign:\n%s', string_to_sign)
signature = self.signature(string_to_sign, request)
logger.debug('Signature:\n%s', signature)
self._inject_signature_to_request(request, signature)
def _inject_signature_to_request(self, request, signature):
l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)]
headers_to_sign = self.headers_to_sign(request)
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
request.headers['Authorization'] = ', '.join(l)
return request
def _modify_request_before_signing(self, request):
if 'Authorization' in request.headers:
del request.headers['Authorization']
self._set_necessary_date_headers(request)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
def _set_necessary_date_headers(self, request):
# The spec allows for either the Date _or_ the X-Amz-Date value to be
# used so we check both. If there's a Date header, we use the date
# header. Otherwise we use the X-Amz-Date header.
if 'Date' in request.headers:
del request.headers['Date']
datetime_timestamp = datetime.datetime.strptime(
request.context['timestamp'], SIGV4_TIMESTAMP)
request.headers['Date'] = formatdate(
int(calendar.timegm(datetime_timestamp.timetuple())))
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
else:
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
request.headers['X-Amz-Date'] = request.context['timestamp']
class S3SigV4Auth(SigV4Auth):
def __init__(self, credentials, service_name, region_name):
super(S3SigV4Auth, self).__init__(
credentials, service_name, region_name)
self._default_region_name = region_name
def add_auth(self, request):
# If we ever decide to share auth sessions, this could potentially be
# a source of concurrency bugs.
signing_context = request.context.get('signing', {})
self._region_name = signing_context.get(
'region', self._default_region_name)
super(S3SigV4Auth, self).add_auth(request)
def _modify_request_before_signing(self, request):
super(S3SigV4Auth, self)._modify_request_before_signing(request)
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
if self._should_sha256_sign_payload(request):
request.headers['X-Amz-Content-SHA256'] = self.payload(request)
else:
request.headers['X-Amz-Content-SHA256'] = 'UNSIGNED-PAYLOAD'
def _should_sha256_sign_payload(self, request):
# S3 allows optional body signing, so to minimize the performance
# impact, we opt to not SHA256 sign the body on streaming uploads,
# provided that we're on https.
client_config = request.context.get('client_config')
s3_config = getattr(client_config, 's3', None)
# The config could be None if it isn't set, or if the customer sets it
# to None.
if s3_config is None:
s3_config = {}
sign_payload = s3_config.get('payload_signing_enabled', None)
if sign_payload is not None:
return sign_payload
if 'Content-MD5' in request.headers and 'https' in request.url and \
request.context.get('has_streaming_input', False):
return False
return True
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
class SigV4QueryAuth(SigV4Auth):
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, service_name, region_name,
expires=DEFAULT_EXPIRES):
super(SigV4QueryAuth, self).__init__(credentials, service_name,
region_name)
self._expires = expires
def _modify_request_before_signing(self, request):
# Note that we're not including X-Amz-Signature.
# From the docs: "The Canonical Query String must include all the query
# parameters from the preceding table except for X-Amz-Signature.
signed_headers = self.signed_headers(self.headers_to_sign(request))
auth_params = {
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': self.scope(request),
'X-Amz-Date': request.context['timestamp'],
'X-Amz-Expires': self._expires,
'X-Amz-SignedHeaders': signed_headers,
}
if self.credentials.token is not None:
auth_params['X-Amz-Security-Token'] = self.credentials.token
# Now parse the original query string to a dict, inject our new query
# params, and serialize back to a query string.
url_parts = urlsplit(request.url)
# parse_qs makes each value a list, but in our case we know we won't
# have repeated keys so we know we have single element lists which we
# can convert back to scalar values.
query_dict = dict(
[(k, v[0]) for k, v in parse_qs(url_parts.query).items()])
# The spec is particular about this. It *has* to be:
# https://<endpoint>?<operation params>&<auth params>
# You can't mix the two types of params together, i.e just keep doing
# new_query_params.update(op_params)
# new_query_params.update(auth_params)
# percent_encode_sequence(new_query_params)
operation_params = ''
if request.data:
# We also need to move the body params into the query string.
# request.data will be populated, for example, with query services
# which normally form encode the params into the body.
# This means that request.data is a dict() of the operation params.
query_dict.update(request.data)
request.data = ''
if query_dict:
operation_params = percent_encode_sequence(query_dict) + '&'
new_query_string = (operation_params +
percent_encode_sequence(auth_params))
# url_parts is a tuple (and therefore immutable) so we need to create
# a new url_parts with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
p = url_parts
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
def _inject_signature_to_request(self, request, signature):
# Rather than calculating an "Authorization" header, for the query
# param quth, we just append an 'X-Amz-Signature' param to the end
# of the query string.
request.url += '&X-Amz-Signature=%s' % signature
class S3SigV4QueryAuth(SigV4QueryAuth):
"""S3 SigV4 auth using query parameters.
This signer will sign a request using query parameters and signature
version 4, i.e a "presigned url" signer.
Based off of:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
def payload(self, request):
# From the doc link above:
# "You don't include a payload hash in the Canonical Request, because
# when you create a presigned URL, you don't know anything about the
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
return "UNSIGNED-PAYLOAD"
class S3SigV4PostAuth(SigV4Auth):
"""
Presigns a s3 post
Implementation doc here:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
"""
def add_auth(self, request):
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256'
fields['x-amz-credential'] = self.scope(request)
fields['x-amz-date'] = request.context['timestamp']
conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'})
conditions.append({'x-amz-credential': self.scope(request)})
conditions.append({'x-amz-date': request.context['timestamp']})
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['x-amz-signature'] = self.signature(fields['policy'], request)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
class HmacV1Auth(BaseSigner):
# List of Query String Arguments of Interest
QSAOfInterest = ['accelerate', 'acl', 'cors', 'defaultObjectAcl',
'location', 'logging', 'partNumber', 'policy',
'requestPayment', 'torrent',
'versioning', 'versionId', 'versions', 'website',
'uploads', 'uploadId', 'response-content-type',
'response-content-language', 'response-expires',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'delete', 'lifecycle',
'tagging', 'restore', 'storageClass', 'notification',
'replication', 'requestPayment']
def __init__(self, credentials, service_name=None, region_name=None):
self.credentials = credentials
def sign_string(self, string_to_sign):
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha1)
new_hmac.update(string_to_sign.encode('utf-8'))
return encodebytes(new_hmac.digest()).strip().decode('utf-8')
def canonical_standard_headers(self, headers):
interesting_headers = ['content-md5', 'content-type', 'date']
hoi = []
if 'Date' in headers:
del headers['Date']
headers['Date'] = self._get_date()
for ih in interesting_headers:
found = False
for key in headers:
lk = key.lower()
if headers[key] is not None and lk == ih:
hoi.append(headers[key].strip())
found = True
if not found:
hoi.append('')
return '\n'.join(hoi)
def canonical_custom_headers(self, headers):
hoi = []
custom_headers = {}
for key in headers:
lk = key.lower()
if headers[key] is not None:
if lk.startswith('x-amz-'):
custom_headers[lk] = ','.join(v.strip() for v in
headers.get_all(key))
sorted_header_keys = sorted(custom_headers.keys())
for key in sorted_header_keys:
hoi.append("%s:%s" % (key, custom_headers[key]))
return '\n'.join(hoi)
def unquote_v(self, nv):
"""
TODO: Do we need this?
"""
if len(nv) == 1:
return nv
else:
return (nv[0], unquote(nv[1]))
def canonical_resource(self, split, auth_path=None):
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
# NOTE:
# The path in the canonical resource should always be the
# full path including the bucket name, even for virtual-hosting
# style addressing. The ``auth_path`` keeps track of the full
# path for the canonical resource and would be passed in if
# the client was using virtual-hosting style.
if auth_path is not None:
buf = auth_path
else:
buf = split.path
if split.query:
qsa = split.query.split('&')
qsa = [a.split('=', 1) for a in qsa]
qsa = [self.unquote_v(a) for a in qsa
if a[0] in self.QSAOfInterest]
if len(qsa) > 0:
qsa.sort(key=itemgetter(0))
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
return buf
def canonical_string(self, method, split, headers, expires=None,
auth_path=None):
cs = method.upper() + '\n'
cs += self.canonical_standard_headers(headers) + '\n'
custom_headers = self.canonical_custom_headers(headers)
if custom_headers:
cs += custom_headers + '\n'
cs += self.canonical_resource(split, auth_path=auth_path)
return cs
def get_signature(self, method, split, headers, expires=None,
auth_path=None):
if self.credentials.token:
del headers['x-amz-security-token']
headers['x-amz-security-token'] = self.credentials.token
string_to_sign = self.canonical_string(method,
split,
headers,
auth_path=auth_path)
logger.debug('StringToSign:\n%s', string_to_sign)
return self.sign_string(string_to_sign)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
logger.debug("Calculating signature using hmacv1 auth.")
split = urlsplit(request.url)
logger.debug('HTTP request method: %s', request.method)
signature = self.get_signature(request.method, split,
request.headers,
auth_path=request.auth_path)
self._inject_signature(request, signature)
def _get_date(self):
return formatdate(usegmt=True)
def _inject_signature(self, request, signature):
if 'Authorization' in request.headers:
# We have to do this because request.headers is not
# normal dictionary. It has the (unintuitive) behavior
# of aggregating repeated setattr calls for the same
# key value. For example:
# headers['foo'] = 'a'; headers['foo'] = 'b'
# list(headers) will print ['foo', 'foo'].
del request.headers['Authorization']
request.headers['Authorization'] = (
"AWS %s:%s" % (self.credentials.access_key, signature))
class HmacV1QueryAuth(HmacV1Auth):
"""
Generates a presigned request for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
#RESTAuthenticationQueryStringAuth
"""
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, expires=DEFAULT_EXPIRES):
self.credentials = credentials
self._expires = expires
def _get_date(self):
return str(int(time.time() + int(self._expires)))
def _inject_signature(self, request, signature):
query_dict = {}
query_dict['AWSAccessKeyId'] = self.credentials.access_key
query_dict['Signature'] = signature
for header_key in request.headers:
lk = header_key.lower()
# For query string requests, Expires is used instead of the
# Date header.
if header_key == 'Date':
query_dict['Expires'] = request.headers['Date']
# We only want to include relevant headers in the query string.
# These can be anything that starts with x-amz, is Content-MD5,
# or is Content-Type.
elif lk.startswith('x-amz-') or lk in ['content-md5',
'content-type']:
query_dict[lk] = request.headers[lk]
# Combine all of the identified headers into an encoded
# query string
new_query_string = percent_encode_sequence(query_dict)
# Create a new url with the presigned url.
p = urlsplit(request.url)
if p[3]:
# If there was a pre-existing query string, we should
# add that back before injecting the new query string.
new_query_string = '%s&%s' % (p[3], new_query_string)
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
class HmacV1PostAuth(HmacV1Auth):
"""
Generates a presigned post for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingHTTPPOST.html
"""
def add_auth(self, request):
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['AWSAccessKeyId'] = self.credentials.access_key
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['signature'] = self.sign_string(fields['policy'])
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
# Defined at the bottom instead of the top of the module because the Auth
# classes weren't defined yet.
AUTH_TYPE_MAPS = {
'v2': SigV2Auth,
'v4': SigV4Auth,
'v4-query': SigV4QueryAuth,
'v3': SigV3Auth,
'v3https': SigV3Auth,
's3': HmacV1Auth,
's3-query': HmacV1QueryAuth,
's3-presign-post': HmacV1PostAuth,
's3v4': S3SigV4Auth,
's3v4-query': S3SigV4QueryAuth,
's3v4-presign-post': S3SigV4PostAuth,
}
|
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network."""
import numpy as np
import os
import tensorflow as tf
from tensorflow.python.client import timeline
import cv2
from .nms_wrapper import nms_wrapper
from ..roi_data_layer.layer import RoIDataLayer
from ..utils.timer import Timer
from ..gt_data_layer import roidb as gdl_roidb
from ..roi_data_layer import roidb as rdl_roidb
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
# <<<< obsolete
_DEBUG = False
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, sess, network, imdb, roidb, output_dir, logdir, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.net = network
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.pretrained_model = pretrained_model
print 'Computing bounding-box regression targets...'
if cfg.TRAIN.BBOX_REG:
self.bbox_means, self.bbox_stds = rdl_roidb.add_bbox_regression_targets(roidb)
print 'done'
# For checkpoint
self.saver = tf.train.Saver(max_to_keep=100)
self.writer = tf.summary.FileWriter(logdir=logdir,
graph=tf.get_default_graph(),
flush_secs=5)
def snapshot(self, sess, iter):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.net
if cfg.TRAIN.BBOX_REG and net.layers.has_key('bbox_pred') and cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
# save original values
with tf.variable_scope('bbox_pred', reuse=True):
weights = tf.get_variable("weights")
biases = tf.get_variable("biases")
orig_0 = weights.eval()
orig_1 = biases.eval()
# scale and shift with bbox reg unnormalization; then save snapshot
weights_shape = weights.get_shape().as_list()
sess.run(weights.assign(orig_0 * np.tile(self.bbox_stds, (weights_shape[0],1))))
sess.run(biases.assign(orig_1 * self.bbox_stds + self.bbox_means))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix +
'_iter_{:d}'.format(iter+1) + '.ckpt')
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename)
print 'Wrote snapshot to: {:s}'.format(filename)
if cfg.TRAIN.BBOX_REG and net.layers.has_key('bbox_pred'):
# restore net to original state
sess.run(weights.assign(orig_0))
sess.run(biases.assign(orig_1))
def build_image_summary(self):
"""
A simple graph for write image summary
:return:
"""
log_image_data = tf.placeholder(tf.uint8, [None, None, 3])
log_image_name = tf.placeholder(tf.string)
# import tensorflow.python.ops.gen_logging_ops as logging_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.framework import ops as _ops
log_image = gen_logging_ops._image_summary(log_image_name, tf.expand_dims(log_image_data, 0), max_images=1)
_ops.add_to_collection(_ops.GraphKeys.SUMMARIES, log_image)
# log_image = tf.summary.image(log_image_name, tf.expand_dims(log_image_data, 0), max_outputs=1)
return log_image, log_image_data, log_image_name
def train_model(self, sess, max_iters, restore=False):
"""Network training loop."""
data_layer = get_data_layer(self.roidb, self.imdb.num_classes)
loss, cross_entropy, loss_box, rpn_cross_entropy, rpn_loss_box = \
self.net.build_loss(ohem=cfg.TRAIN.OHEM)
# scalar summary
tf.summary.scalar('rpn_rgs_loss', rpn_loss_box)
tf.summary.scalar('rpn_cls_loss', rpn_cross_entropy)
tf.summary.scalar('cls_loss', cross_entropy)
tf.summary.scalar('rgs_loss', loss_box)
tf.summary.scalar('loss', loss)
summary_op = tf.summary.merge_all()
# image writer
# NOTE: this image is independent to summary_op
log_image, log_image_data, log_image_name =\
self.build_image_summary()
# optimizer
if cfg.TRAIN.SOLVER == 'Adam':
opt = tf.train.AdamOptimizer(cfg.TRAIN.LEARNING_RATE)
elif cfg.TRAIN.SOLVER == 'RMS':
opt = tf.train.RMSPropOptimizer(cfg.TRAIN.LEARNING_RATE)
else:
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
# lr = tf.Variable(0.0, trainable=False)
momentum = cfg.TRAIN.MOMENTUM
opt = tf.train.MomentumOptimizer(lr, momentum)
global_step = tf.Variable(0, trainable=False)
with_clip = True
if with_clip:
tvars = tf.trainable_variables()
grads, norm = tf.clip_by_global_norm(tf.gradients(loss, tvars), 10.0)
train_op = opt.apply_gradients(zip(grads, tvars), global_step=global_step)
else:
train_op = opt.minimize(loss, global_step=global_step)
# intialize variables
sess.run(tf.global_variables_initializer())
restore_iter = 0
# load vgg16
if self.pretrained_model is not None and not restore:
try:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
except:
raise Exception('Check your pretrained model {:s}'.format(self.pretrained_model))
# resuming a trainer
if restore:
try:
ckpt = tf.train.get_checkpoint_state(self.output_dir)
print 'Restoring from {}...'.format(ckpt.model_checkpoint_path),
self.saver.restore(sess, ckpt.model_checkpoint_path)
stem = os.path.splitext(os.path.basename(ckpt.model_checkpoint_path))[0]
restore_iter = int(stem.split('_')[-1])
sess.run(global_step.assign(restore_iter))
print 'done'
except:
raise Exception('Check your pretrained {:s}'.format(ckpt.model_checkpoint_path))
last_snapshot_iter = -1
timer = Timer()
# for iter in range(max_iters):
for iter in range(restore_iter, max_iters):
timer.tic()
# learning rate
if iter != 0 and iter % cfg.TRAIN.STEPSIZE == 0:
sess.run(tf.assign(lr, lr.eval() * cfg.TRAIN.GAMMA))
# sess.run(tf.assign(lr, 0.0))
# get one batch
blobs = data_layer.forward()
if (iter + 1) % (cfg.TRAIN.DISPLAY) == 0:
print 'image: %s' %(blobs['im_name']),
feed_dict={
self.net.data: blobs['data'],
self.net.im_info: blobs['im_info'],
self.net.keep_prob: 0.5,
self.net.gt_boxes: blobs['gt_boxes'],
self.net.gt_ishard: blobs['gt_ishard'],
self.net.dontcare_areas: blobs['dontcare_areas']
}
res_fetches = [self.net.get_output('cls_prob'), # FRCNN class prob
self.net.get_output('bbox_pred'), # FRCNN rgs output
self.net.get_output('rois')] # RPN rgs output
fetch_list = [rpn_cross_entropy,
rpn_loss_box,
cross_entropy,
loss_box,
summary_op,
train_op] + res_fetches
if _DEBUG:
# add profiling
# link libcupti.so in LD_LIBRARY_PATH
#
# run_metadata = tf.RunMetadata()
# rpn_loss_cls_value, rpn_loss_box_value,loss_cls_value, loss_box_value,\
# summary_str, _, \
# cls_prob, bbox_pred, rois, \
# = sess.run(fetches=fetch_list,
# feed_dict=feed_dict,
# options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
# run_metadata=run_metadata
# )
#
# # write profiling
# trace = timeline.Timeline(step_stats=run_metadata.step_stats)
# with open('timeline.ctf.json', 'w') as trace_file:
# trace_file.write(trace.generate_chrome_trace_format())
fetch_list = [rpn_cross_entropy,
rpn_loss_box,
cross_entropy,
loss_box,
summary_op] + res_fetches
fetch_list += [self.net.get_output('rpn_cls_score_reshape'), self.net.get_output('rpn_cls_prob_reshape')]
fetch_list += []
rpn_loss_cls_value, rpn_loss_box_value, loss_cls_value, loss_box_value, \
summary_str, \
cls_prob, bbox_pred, rois, \
rpn_cls_score_reshape_np, rpn_cls_prob_reshape_np\
= sess.run(fetches=fetch_list, feed_dict=feed_dict)
else:
fetch_list = [rpn_cross_entropy,
rpn_loss_box,
cross_entropy,
loss_box,
summary_op,
train_op] + res_fetches
fetch_list += []
rpn_loss_cls_value, rpn_loss_box_value, loss_cls_value, loss_box_value, \
summary_str, _, \
cls_prob, bbox_pred, rois = sess.run(fetches=fetch_list, feed_dict=feed_dict)
self.writer.add_summary(summary=summary_str, global_step=global_step.eval())
_diff_time = timer.toc(average=False)
# image summary
if (iter) % cfg.TRAIN.LOG_IMAGE_ITERS == 0:
# plus mean
ori_im = np.squeeze(blobs['data']) + cfg.PIXEL_MEANS
ori_im = ori_im.astype(dtype=np.uint8, copy=False)
ori_im = _draw_gt_to_image(ori_im, blobs['gt_boxes'], blobs['gt_ishard'])
ori_im = _draw_dontcare_to_image(ori_im, blobs['dontcare_areas'])
# draw rects
# print 'rois:', rois.shape[0]
if cfg.TRAIN.BBOX_REG and cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
bbox_pred = bbox_pred * np.tile(self.bbox_stds, (bbox_pred.shape[0], 1)) + \
np.tile(self.bbox_means, (bbox_pred.shape[0], 1))
boxes, scores = _process_boxes_scores(cls_prob, bbox_pred, rois, blobs['im_info'][0][2], ori_im.shape)
res = nms_wrapper(scores, boxes, threshold=0.7)
image = cv2.cvtColor(_draw_boxes_to_image(ori_im, res), cv2.COLOR_BGR2RGB)
log_image_name_str = ('%06d_' % iter ) + blobs['im_name']
log_image_summary_op = \
sess.run(log_image, \
feed_dict={log_image_name: log_image_name_str,\
log_image_data: image})
self.writer.add_summary(log_image_summary_op, global_step=global_step.eval())
if (iter) % (cfg.TRAIN.DISPLAY) == 0:
print 'iter: %d / %d, total loss: %.4f, rpn_loss_cls: %.4f, rpn_loss_box: %.4f, loss_cls: %.4f, loss_box: %.4f, lr: %f'%\
(iter, max_iters, rpn_loss_cls_value + rpn_loss_box_value + loss_cls_value + loss_box_value ,\
rpn_loss_cls_value, rpn_loss_box_value,loss_cls_value, loss_box_value, lr.eval())
print 'speed: {:.3f}s / iter'.format(_diff_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
print 'Preparing training data...'
if cfg.TRAIN.HAS_RPN:
if cfg.IS_MULTISCALE:
# TODO: fix multiscale training (single scale is already a good trade-off)
print ('#### warning: multi-scale has not been tested.')
print ('#### warning: using single scale by setting IS_MULTISCALE: False.')
gdl_roidb.prepare_roidb(imdb)
else:
rdl_roidb.prepare_roidb(imdb)
else:
rdl_roidb.prepare_roidb(imdb)
print 'done'
return imdb.roidb
def get_data_layer(roidb, num_classes):
"""return a data layer."""
if cfg.TRAIN.HAS_RPN:
if cfg.IS_MULTISCALE:
# obsolete
# layer = GtDataLayer(roidb)
raise Exception("Calling caffe modules...")
else:
layer = RoIDataLayer(roidb, num_classes)
else:
layer = RoIDataLayer(roidb, num_classes)
return layer
def _process_boxes_scores(cls_prob, bbox_pred, rois, im_scale, im_shape):
"""
process the output tensors, to get the boxes and scores
"""
assert rois.shape[0] == bbox_pred.shape[0],\
'rois and bbox_pred must have the same shape'
boxes = rois[:, 1:5]
scores = cls_prob
if cfg.TEST.BBOX_REG:
pred_boxes = bbox_transform_inv(boxes, deltas=bbox_pred)
pred_boxes = clip_boxes(pred_boxes, im_shape)
else:
# Simply repeat the boxes, once for each class
# boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes = clip_boxes(boxes, im_shape)
return pred_boxes, scores
def _draw_boxes_to_image(im, res):
colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
(151, 0, 255), (243, 223, 48), (0, 117, 255),\
(58, 184, 14), (86, 67, 140), (121, 82, 6),\
(174, 29, 128), (115, 154, 81), (86, 255, 234)]
font = cv2.FONT_HERSHEY_SIMPLEX
image = np.copy(im)
cnt = 0
for ind, r in enumerate(res):
if r['dets'] is None: continue
dets = r['dets']
for i in range(0, dets.shape[0]):
(x1, y1, x2, y2, score) = dets[i, :]
cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
text = '{:s} {:.2f}'.format(r['class'], score)
cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
cnt = (cnt + 1)
return image
def _draw_gt_to_image(im, gt_boxes, gt_ishard):
image = np.copy(im)
for i in range(0, gt_boxes.shape[0]):
(x1, y1, x2, y2, score) = gt_boxes[i, :]
if gt_ishard[i] == 0:
cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (255, 255, 255), 2)
else:
cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)
return image
def _draw_dontcare_to_image(im, dontcare):
image = np.copy(im)
for i in range(0, dontcare.shape[0]):
(x1, y1, x2, y2) = dontcare[i, :]
cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2)
return image
def train_net(network, imdb, roidb, output_dir, log_dir, pretrained_model=None, max_iters=40000, restore=False):
"""Train a Fast R-CNN network."""
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 0.40
with tf.Session(config=config) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, logdir= log_dir, pretrained_model=pretrained_model)
print 'Solving...'
sw.train_model(sess, max_iters, restore=restore)
print 'done solving'
|
|
"""
This module allows one to use SWIG2 (SWIG version >= 1.3) wrapped
objects from Weave. SWIG-1.3 wraps objects differently from SWIG-1.1.
This module is a template for a SWIG2 wrapped converter. To wrap any
special code that uses SWIG the user simply needs to override the
defaults in the swig2_converter class. These special circumstances
arise when one has wrapped code that uses C++ namespaces. However,
for most straightforward SWIG wrappers this converter should work fine
out of the box.
Newer versions of SWIG (>=1.3.22) represent the wrapped object using a
PyCObject and also a PySwigObject (>=1.3.24). This code supports all
of these options transparently.
Since SWIG-1.3.x is under intense development there are several issues
to consider when using the swig2_converter.
1. For SWIG versions <= 1.3.19, the runtime code was built either
into the module or into a separate library called libswigpy (or
something like that). In the latter case, the users Python
modules were linked to this library and shared type information
(this was common for large projects with several modules that
needed to share type information). If you are using multiple
inheritance and want to be certain that type coercions from a
derived class to a base class are done correctly, you will need to
link to the libswigpy library. You will then need to add these to
the keyword arguments passed along to `weave.inline`:
a. Add a define_macros=[('SWIG_NOINCLUDE', None)]
b. Add the swigpy library to the libraries like so:
libraries=['swigpy']
c. If the libswigpy is in a non-standard location add the path
to the library_dirs argument as
`library_dirs=['/usr/local/lib']` or whatever.
OTOH if you do not need to link to libswigpy (this is likely if
you are not using multiple inheritance), then you do not need the
above. However you are likely to get an annoying message of the
form::
WARNING: swig_type_info is NULL.
for each SWIG object you are inlining (during each call). To
avoid this add a define_macros=[('NO_SWIG_WARN', None)].
2. Since keeping track of a separate runtime is a pain, for SWIG
versions >= 1.3.23 the type information was stored inside a
special module. Thus in these versions there is no need to link
to this special SWIG runtime library. This module handles these
cases automatically and nothing special need be done.
Using modules wrapped with different SWIG versions simultaneously.
Lets say you have library 'A' that is wrapped using SWIG version
1.3.20. Then lets say you have a library 'B' wrapped using
version 1.3.24. Now if you want to use both in weave.inline, we
have a serious problem. The trouble is that both 'A' and 'B' may
use different and incompatible runtime layouts. It is impossible
to get the type conversions right in these cases. Thus it is
strongly advised that you use one version of SWIG to wrap all of
the code that you intend to inline using weave. Note that you can
certainly use SWIG-1.3.23 for everything and do not have to use
the latest and greatest SWIG to use weave.inline. Just make sure
that when inlining SWIG wrapped objects that all such objects use
the same runtime layout. By default, if you are using different
versions and do need to inline these objects, the latest layout
will be assumed. This might leave you with holes in your feet,
but you have been warned. You can force the converter to use a
specific runtime version if you want (see the
`swig2_converter.__init__` method and its documentation).
Prabhu Ramachandran <[email protected]>
"""
from __future__ import absolute_import, print_function
import sys
from .c_spec import common_base_converter
from . import swigptr2
#----------------------------------------------------------------------
# Commonly used functions for the type query. This is done mainly to
# avoid code duplication.
#----------------------------------------------------------------------
swig2_common_code = \
'''
swig_type_info *
Weave_SWIG_TypeQuery(const char *name) {
swig_type_info *ty = SWIG_TypeQuery(name);
#ifndef NO_SWIG_WARN
if (ty == NULL) {
printf("WARNING: swig_type_info is NULL.\\n");
}
#endif
return ty;
}
'''
#----------------------------------------------------------------------
# This code obtains the C++ pointer given a a SWIG2 wrapped C++ object
# in Python.
#----------------------------------------------------------------------
swig2_py_to_c_template = \
"""
class %(type_name)s_handler
{
public:
%(c_type)s convert_to_%(type_name)s(PyObject* py_obj, const char* name)
{
%(c_type)s c_ptr;
swig_type_info *ty = Weave_SWIG_TypeQuery("%(c_type)s");
// work on this error reporting...
if (SWIG_ConvertPtr(py_obj, (void **) &c_ptr, ty,
SWIG_POINTER_EXCEPTION | 0) == -1) {
handle_conversion_error(py_obj,"%(type_name)s", name);
}
%(inc_ref_count)s
return c_ptr;
}
%(c_type)s py_to_%(type_name)s(PyObject* py_obj,const char* name)
{
%(c_type)s c_ptr;
swig_type_info *ty = Weave_SWIG_TypeQuery("%(c_type)s");
// work on this error reporting...
if (SWIG_ConvertPtr(py_obj, (void **) &c_ptr, ty,
SWIG_POINTER_EXCEPTION | 0) == -1) {
handle_bad_type(py_obj,"%(type_name)s", name);
}
%(inc_ref_count)s
return c_ptr;
}
};
%(type_name)s_handler x__%(type_name)s_handler = %(type_name)s_handler();
#define convert_to_%(type_name)s(py_obj,name) \\
x__%(type_name)s_handler.convert_to_%(type_name)s(py_obj,name)
#define py_to_%(type_name)s(py_obj,name) \\
x__%(type_name)s_handler.py_to_%(type_name)s(py_obj,name)
"""
#----------------------------------------------------------------------
# This code generates a new SWIG pointer object given a C++ pointer.
#
# Important note: The thisown flag of the returned object is set to 0
# by default.
#----------------------------------------------------------------------
swig2_c_to_py_template = """
PyObject* %(type_name)s_to_py(void *obj)
{
swig_type_info *ty = Weave_SWIG_TypeQuery("%(c_type)s");
return SWIG_NewPointerObj(obj, ty, 0);
}
"""
class swig2_converter(common_base_converter):
""" A converter for SWIG >= 1.3 wrapped objects."""
def __init__(self, class_name="undefined", pycobj=0, runtime_version=None):
"""Initializes the instance.
Parameters
----------
- class_name : `string`
Name of class, this is set dynamically at build time by the
`type_spec` method.
- pycobj : `int`
If `pycobj` is 0 then code is generated to deal with string
representations of the SWIG wrapped pointer. If it is 1,
then code is generated to deal with a PyCObject. If it is 2
then code is generated to deal with with PySwigObject.
- runtime_version : `int`
Specifies the SWIG_RUNTIME_VERSION to use. Defaults to
`None`. In this case the runtime is automatically
determined. This option is useful if you want to force the
runtime_version to be a specific one and override the
auto-detected one.
"""
self.class_name = class_name
self.pycobj = pycobj # This is on if a PyCObject has been used.
self.runtime_version = runtime_version
common_base_converter.__init__(self)
def _get_swig_runtime_version(self):
"""This method tries to deduce the SWIG runtime version. If
the SWIG runtime layout changes, the `SWIG_TypeQuery` function
will not work properly.
"""
versions = []
for key in sys.modules.keys():
idx = key.find('swig_runtime_data')
if idx > -1:
ver = int(key[idx+17:])
if ver not in versions:
versions.append(ver)
nver = len(versions)
if nver == 0:
return 0
elif nver == 1:
return versions[0]
else:
print("WARNING: Multiple SWIG versions detected. No version was")
print("explicitly specified. Using the highest possible version.")
return max(versions)
def init_info(self, runtime=0):
"""Keyword arguments:
runtime -- If false (default), the user does not need to
link to the swig runtime (libswipy). Newer versions of SWIG
(>=1.3.23) do not need to build a SWIG runtime library at
all. In these versions of SWIG the swig_type_info is stored
in a common module. swig_type_info stores the type
information and the type converters to cast pointers
correctly.
With earlier versions of SWIG (<1.3.22) one has to either
link the weave module with a SWIG runtime library
(libswigpy) in order to get the swig_type_info. Thus, if
`runtime` is True, the user must link to the swipy runtime
library and in this case type checking will be performed.
With these versions of SWIG, if runtime is `False`, no type
checking is done.
"""
common_base_converter.init_info(self)
# These are generated on the fly instead of defined at
# the class level.
self.type_name = self.class_name
self.c_type = self.class_name + "*"
self.return_type = self.class_name + "*"
self.to_c_return = None # not used
self.check_func = None # not used
if self.pycobj == 1:
self.define_macros.append(("SWIG_COBJECT_TYPES", None))
self.define_macros.append(("SWIG_COBJECT_PYTHON", None))
elif self.pycobj == 2:
self.define_macros.append(("SWIG_COBJECT_TYPES", None))
if self.runtime_version is None:
self.runtime_version = self._get_swig_runtime_version()
rv = self.runtime_version
if rv == 0:
# The runtime option is only useful for older versions of
# SWIG.
if runtime:
self.define_macros.append(("SWIG_NOINCLUDE", None))
self.support_code.append(swigptr2.swigptr2_code_v0)
elif rv == 1:
self.support_code.append(swigptr2.swigptr2_code_v1)
elif rv == 2:
self.support_code.append(swigptr2.swigptr2_code_v2)
elif rv == 3:
self.support_code.append(swigptr2.swigptr2_code_v3)
else:
raise AssertionError("Unsupported version of the SWIG runtime: %s" % rv)
self.support_code.append(swig2_common_code)
def _get_swig_type(self, value):
"""Given the object in the form of `value`, this method
returns information on the SWIG internal object repesentation
type. Different versions of SWIG use different object
representations. This method provides information on the type
of internal representation.
Currently returns one of ['', 'str', 'pycobj', 'pyswig'].
"""
swig_typ = ''
if hasattr(value, 'this'):
type_this = type(value.this)
type_str = str(type_this)
if isinstance(type_this, str):
try:
data = value.this.split('_')
if data[2] == 'p':
swig_typ = 'str'
except AttributeError:
pass
elif type_str == "<type 'PyCObject'>":
swig_typ = 'pycobj'
elif type_str.find('PySwig') > -1:
swig_typ = 'pyswig'
return swig_typ
def type_match(self,value):
""" This is a generic type matcher for SWIG-1.3 objects. For
specific instances, override this method. The method also
handles cases where SWIG uses a PyCObject for the `this`
attribute and not a string.
"""
if self._get_swig_type(value):
return 1
else:
return 0
def generate_build_info(self):
if self.class_name != "undefined":
res = common_base_converter.generate_build_info(self)
else:
# if there isn't a class_name, we don't want the
# support_code to be included
from . import base_info
res = base_info.base_info()
return res
def py_to_c_code(self):
return swig2_py_to_c_template % self.template_vars()
def c_to_py_code(self):
return swig2_c_to_py_template % self.template_vars()
def type_spec(self,name,value):
""" This returns a generic type converter for SWIG-1.3
objects. For specific instances, override this function if
necessary."""
# factory
swig_ob_type = self._get_swig_type(value)
pycobj = 0
if swig_ob_type == 'str':
class_name = value.this.split('_')[-1]
elif swig_ob_type == 'pycobj':
pycobj = 1
elif swig_ob_type == 'pyswig':
pycobj = 2
else:
raise AssertionError("Does not look like a SWIG object: %s" % value)
if pycobj:
class_name = value.__class__.__name__
if class_name[-3:] == 'Ptr':
class_name = class_name[:-3]
new_spec = self.__class__(class_name, pycobj, self.runtime_version)
new_spec.name = name
return new_spec
def __cmp__(self,other):
#only works for equal
res = -1
try:
res = cmp(self.name,other.name) or \
cmp(self.__class__, other.__class__) or \
cmp(self.class_name, other.class_name) or \
cmp(self.type_name,other.type_name)
except:
pass
return res
#----------------------------------------------------------------------
# Uncomment the next line if you want this to be a default converter
# that is magically invoked by inline.
#----------------------------------------------------------------------
#converters.default.insert(0, swig2_converter())
|
|
import os
import time
from lib.aws import AWS_USER
from .common import (
AmazonWebServices, run_command
)
from .test_airgap import get_bastion_node
from .test_custom_host_reg import (
random_test_name, RANCHER_SERVER_VERSION, HOST_NAME, AGENT_REG_CMD
)
BASTION_ID = os.environ.get("RANCHER_BASTION_ID", "")
NUMBER_OF_INSTANCES = int(os.environ.get("RANCHER_AIRGAP_INSTANCE_COUNT", "1"))
PROXY_HOST_NAME = random_test_name(HOST_NAME)
RANCHER_PROXY_INTERNAL_HOSTNAME = \
PROXY_HOST_NAME + "-internal.qa.rancher.space"
RANCHER_PROXY_HOSTNAME = PROXY_HOST_NAME + ".qa.rancher.space"
RESOURCE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
SSH_KEY_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'.ssh')
RANCHER_PROXY_PORT = os.environ.get("RANCHER_PROXY_PORT", "3131")
def deploy_proxy_server():
node_name = PROXY_HOST_NAME + "-proxy"
proxy_node = AmazonWebServices().create_node(node_name)
# Copy SSH Key to proxy and local dir and give it proper permissions
write_key_command = "cat <<EOT >> {}.pem\n{}\nEOT".format(
proxy_node.ssh_key_name, proxy_node.ssh_key)
proxy_node.execute_command(write_key_command)
local_write_key_command = \
"mkdir -p {} && cat <<EOT >> {}/{}.pem\n{}\nEOT".format(
SSH_KEY_DIR, SSH_KEY_DIR,
proxy_node.ssh_key_name, proxy_node.ssh_key)
run_command(local_write_key_command, log_out=False)
set_key_permissions_command = "chmod 400 {}.pem".format(
proxy_node.ssh_key_name)
proxy_node.execute_command(set_key_permissions_command)
local_set_key_permissions_command = "chmod 400 {}/{}.pem".format(
SSH_KEY_DIR, proxy_node.ssh_key_name)
run_command(local_set_key_permissions_command, log_out=False)
# Write the proxy config to the node and run the proxy
proxy_node.execute_command("mkdir -p /home/ubuntu/squid/")
copy_cfg_command = \
'scp -q -i {}/{}.pem -o StrictHostKeyChecking=no ' \
'-o UserKnownHostsFile=/dev/null {}/squid/squid.conf ' \
'{}@{}:~/squid/squid.conf'.format(
SSH_KEY_DIR, proxy_node.ssh_key_name, RESOURCE_DIR,
AWS_USER, proxy_node.host_name)
run_command(copy_cfg_command, log_out=True)
squid_cmd = "sudo docker run -d " \
"-v /home/ubuntu/squid/squid.conf:/etc/squid/squid.conf " \
"-p {}:3128 wernight/squid".format(RANCHER_PROXY_PORT)
proxy_node.execute_command(squid_cmd)
print("Proxy Server Details:\nNAME: {}\nHOST NAME: {}\n"
"INSTANCE ID: {}\n".format(node_name, proxy_node.host_name,
proxy_node.provider_node_id))
return proxy_node
def run_command_on_proxy_node(bastion_node, ag_node, cmd, log_out=False):
ag_command = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"{}"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address, cmd)
result = bastion_node.execute_command(ag_command)
if log_out:
print("Running command: {}".format(ag_command))
print("Result: {}".format(result))
return result
def prepare_airgap_proxy_node(bastion_node, number_of_nodes):
node_name = PROXY_HOST_NAME + "-agproxy"
ag_nodes = AmazonWebServices().create_multiple_nodes(
number_of_nodes, node_name, public_ip=False)
for num, ag_node in enumerate(ag_nodes):
ag_node_update_docker = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"sudo usermod -aG docker {}"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address, AWS_USER)
bastion_node.execute_command(ag_node_update_docker)
proxy_url = bastion_node.host_name + ":" + RANCHER_PROXY_PORT
proxy_info = '[Service]\nEnvironment=\"HTTP_PROXY={}\" ' \
'\"HTTPS_PROXY={}\" ' \
'\"NO_PROXY=localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,' \
'cattle-system.svc\"' \
.format(proxy_url, proxy_url)
bastion_node.execute_command('echo "{}" > http-proxy.conf'
.format(proxy_info))
ag_node_create_dir = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"sudo mkdir -p /etc/systemd/system/docker.service.d"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_create_dir)
copy_conf_cmd = \
'scp -q -i "{}".pem -o StrictHostKeyChecking=no -o ' \
'UserKnownHostsFile=/dev/null ~/http-proxy.conf ' \
'{}@{}:~/'.format(bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(copy_conf_cmd)
ag_node_mv_conf = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no ' \
'-o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo mv http-proxy.conf /etc/systemd/system/docker.service.d/ ' \
'&& sudo systemctl daemon-reload && ' \
'sudo systemctl restart docker"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_mv_conf)
print("Airgapped Proxy Instance Details:\n"
"NAME: {}-{}\nPRIVATE IP: {}\n"
"".format(node_name, num, ag_node.private_ip_address))
return ag_nodes
def deploy_proxy_rancher(bastion_node):
ag_node = prepare_airgap_proxy_node(bastion_node, 1)[0]
proxy_url = bastion_node.host_name + ":" + RANCHER_PROXY_PORT
deploy_rancher_command = \
'sudo docker run -d --privileged --restart=unless-stopped ' \
'-p 80:80 -p 443:443 ' \
'-e HTTP_PROXY={} ' \
'-e HTTPS_PROXY={} ' \
'-e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,' \
'cattle-system.svc" ' \
'rancher/rancher:{} --trace'.format(
proxy_url, proxy_url,
RANCHER_SERVER_VERSION)
deploy_result = run_command_on_proxy_node(bastion_node, ag_node,
deploy_rancher_command,
log_out=True)
assert "Downloaded newer image for rancher/rancher:{}".format(
RANCHER_SERVER_VERSION) in deploy_result[1]
return ag_node
def register_cluster_nodes(bastion_node, ag_nodes):
results = []
for ag_node in ag_nodes:
deploy_result = run_command_on_proxy_node(bastion_node, ag_node,
AGENT_REG_CMD)
results.append(deploy_result)
return results
def create_nlb_and_add_targets(aws_nodes):
# Create internet-facing nlb and grab ARN & dns name
lb = AmazonWebServices().create_network_lb(name=PROXY_HOST_NAME + "-nlb")
lb_arn = lb["LoadBalancers"][0]["LoadBalancerArn"]
public_dns = lb["LoadBalancers"][0]["DNSName"]
# Create internal nlb and grab ARN & dns name
internal_lb = AmazonWebServices().create_network_lb(
name=PROXY_HOST_NAME + "-internal-nlb", scheme='internal')
internal_lb_arn = internal_lb["LoadBalancers"][0]["LoadBalancerArn"]
internal_lb_dns = internal_lb["LoadBalancers"][0]["DNSName"]
# Upsert the route53 record -- if it exists, update, if not, insert
AmazonWebServices().upsert_route_53_record_cname(
RANCHER_PROXY_INTERNAL_HOSTNAME, internal_lb_dns)
AmazonWebServices().upsert_route_53_record_cname(
RANCHER_PROXY_HOSTNAME, public_dns)
public_dns = RANCHER_PROXY_HOSTNAME
# Create the target groups
tg80 = AmazonWebServices(). \
create_ha_target_group(80, PROXY_HOST_NAME + "-tg-80")
tg443 = AmazonWebServices(). \
create_ha_target_group(443, PROXY_HOST_NAME + "-tg-443")
tg80_arn = tg80["TargetGroups"][0]["TargetGroupArn"]
tg443_arn = tg443["TargetGroups"][0]["TargetGroupArn"]
# Create the internal target groups
internal_tg80 = AmazonWebServices(). \
create_ha_target_group(80, PROXY_HOST_NAME + "-internal-tg-80")
internal_tg443 = AmazonWebServices(). \
create_ha_target_group(443, PROXY_HOST_NAME + "-internal-tg-443")
internal_tg80_arn = internal_tg80["TargetGroups"][0]["TargetGroupArn"]
internal_tg443_arn = internal_tg443["TargetGroups"][0]["TargetGroupArn"]
# Create listeners for the load balancers, to forward to the target groups
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=lb_arn, port=80, targetGroupARN=tg80_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=lb_arn, port=443, targetGroupARN=tg443_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=internal_lb_arn, port=80,
targetGroupARN=internal_tg80_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=internal_lb_arn, port=443,
targetGroupARN=internal_tg443_arn)
targets = []
for aws_node in aws_nodes:
targets.append(aws_node.provider_node_id)
# Register the nodes to the internet-facing targets
targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
AmazonWebServices().register_targets(targets_list, tg80_arn)
targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
AmazonWebServices().register_targets(targets_list, tg443_arn)
# Wait up to approx. 5 minutes for targets to begin health checks
for i in range(300):
health80 = AmazonWebServices().describe_target_health(
tg80_arn)['TargetHealthDescriptions'][0]['TargetHealth']['State']
health443 = AmazonWebServices().describe_target_health(
tg443_arn)['TargetHealthDescriptions'][0]['TargetHealth']['State']
if health80 in ['initial', 'healthy'] \
and health443 in ['initial', 'healthy']:
break
time.sleep(1)
# Register the nodes to the internal targets
targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
AmazonWebServices().register_targets(targets_list, internal_tg80_arn)
targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
AmazonWebServices().register_targets(targets_list, internal_tg443_arn)
# Wait up to approx. 5 minutes for targets to begin health checks
for i in range(300):
try:
health80 = AmazonWebServices().describe_target_health(
internal_tg80_arn)[
'TargetHealthDescriptions'][0]['TargetHealth']['State']
health443 = AmazonWebServices().describe_target_health(
internal_tg443_arn)[
'TargetHealthDescriptions'][0]['TargetHealth']['State']
if health80 in ['initial', 'healthy'] \
and health443 in ['initial', 'healthy']:
break
except Exception:
print("Target group healthchecks unavailable...")
time.sleep(1)
return public_dns
def test_deploy_proxied_rancher():
proxy_node = deploy_proxy_server()
proxy_rancher_node = deploy_proxy_rancher(proxy_node)
public_dns = create_nlb_and_add_targets([proxy_rancher_node])
print(
"\nConnect to bastion node with:\nssh -i {}.pem {}@{}\n"
"Connect to rancher node by connecting to bastion, then run:\n"
"ssh -i {}.pem {}@{}\n\nOpen the Rancher UI with: https://{}\n"
"".format(
proxy_node.ssh_key_name, AWS_USER,
proxy_node.host_name,
proxy_node.ssh_key_name, AWS_USER,
proxy_rancher_node.private_ip_address,
public_dns))
def test_deploy_proxy_nodes():
bastion_node = get_bastion_node(BASTION_ID)
ag_nodes = prepare_airgap_proxy_node(bastion_node, NUMBER_OF_INSTANCES)
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then running the following command (with the quotes):\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP '.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name,
AWS_USER))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
results = register_cluster_nodes(bastion_node, ag_nodes)
for result in results:
assert "Downloaded newer image for rancher/rancher-agent" in result[1]
|
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Parted(object):
def __init__(self, name, label):
self.name = name
self.label = label
self.partitions = []
self.install_bootloader = False
def add_partition(self, **kwargs):
# TODO(kozhukalov): validate before appending
# calculating partition name based on device name and partition count
kwargs['name'] = self.next_name()
kwargs['count'] = self.next_count()
kwargs['device'] = self.name
# if begin is given use its value else use end of last partition
kwargs['begin'] = kwargs.get('begin', self.next_begin())
# if end is given use its value else
# try to calculate it based on size kwarg or
# raise KeyError
# (kwargs.pop['size'] will raise error if size is not set)
kwargs['end'] = kwargs.get('end') or \
kwargs['begin'] + kwargs.pop('size')
# if partition_type is given use its value else
# try to calculate it automatically
kwargs['partition_type'] = \
kwargs.get('partition_type', self.next_type())
partition = Partition(**kwargs)
self.partitions.append(partition)
return partition
@property
def logical(self):
return filter(lambda x: x.type == 'logical', self.partitions)
@property
def primary(self):
return filter(lambda x: x.type == 'primary', self.partitions)
@property
def extended(self):
found = filter(lambda x: x.type == 'extended', self.partitions)
if found:
return found[0]
def next_type(self):
if self.label == 'gpt':
return 'primary'
elif self.label == 'msdos':
if self.extended:
return 'logical'
elif len(self.partitions) < 3 and not self.extended:
return 'primary'
elif len(self.partitions) == 3 and not self.extended:
return 'extended'
# NOTE(agordeev): how to reach that condition?
else:
return 'logical'
def next_count(self, next_type=None):
next_type = next_type or self.next_type()
if next_type == 'logical':
return len(self.logical) + 5
return len(self.partitions) + 1
def next_begin(self):
if not self.partitions:
return 1
if self.partitions[-1] == self.extended:
return self.partitions[-1].begin
return self.partitions[-1].end
def next_name(self):
if self.next_type() == 'extended':
return None
separator = ''
special_devices = ('cciss', 'nvme', 'loop')
if any(n in self.name for n in special_devices):
separator = 'p'
return '%s%s%s' % (self.name, separator, self.next_count())
class Partition(object):
def __init__(self, name, count, device, begin, end, partition_type,
flags=None, guid=None, configdrive=False):
self.name = name
self.count = count
self.device = device
self.name = name
self.begin = begin
self.end = end
self.type = partition_type
self.flags = flags or []
self.guid = guid
self.configdrive = configdrive
def set_flag(self, flag):
if flag not in self.flags:
self.flags.append(flag)
def set_guid(self, guid):
self.guid = guid
class Pv(object):
def __init__(self, name, metadatasize=16, metadatacopies=2):
self.name = name
self.metadatasize = metadatasize
self.metadatacopies = metadatacopies
class Vg(object):
def __init__(self, name, pvnames=None):
self.name = name
self.pvnames = pvnames or []
def add_pv(self, pvname):
if pvname not in self.pvnames:
self.pvnames.append(pvname)
class Lv(object):
def __init__(self, name, vgname, size):
self.name = name
self.vgname = vgname
self.size = size
@property
def device_name(self):
return '/dev/mapper/%s-%s' % (self.vgname.replace('-', '--'),
self.name.replace('-', '--'))
class Md(object):
def __init__(self, name, level,
devices=None, spares=None):
self.name = name
self.level = level
self.devices = devices or []
self.spares = spares or []
def add_device(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.devices.append(device)
def add_spare(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.spares.append(device)
class Fs(object):
def __init__(self, device, mount=None,
fs_type=None, fs_options=None, fs_label=None):
self.device = device
self.mount = mount
self.type = fs_type or 'xfs'
self.options = fs_options or ''
self.label = fs_label or ''
class PartitionScheme(object):
def __init__(self):
self.parteds = []
self.mds = []
self.pvs = []
self.vgs = []
self.lvs = []
self.fss = []
def add_parted(self, **kwargs):
parted = Parted(**kwargs)
self.parteds.append(parted)
return parted
def add_pv(self, **kwargs):
pv = Pv(**kwargs)
self.pvs.append(pv)
return pv
def add_vg(self, **kwargs):
vg = Vg(**kwargs)
self.vgs.append(vg)
return vg
def add_lv(self, **kwargs):
lv = Lv(**kwargs)
self.lvs.append(lv)
return lv
def add_fs(self, **kwargs):
fs = Fs(**kwargs)
self.fss.append(fs)
return fs
def add_md(self, **kwargs):
mdkwargs = {}
mdkwargs['name'] = kwargs.get('name') or self.md_next_name()
mdkwargs['level'] = kwargs.get('level') or 'mirror'
md = Md(**mdkwargs)
self.mds.append(md)
return md
def md_by_name(self, name):
found = filter(lambda x: x.name == name, self.mds)
if found:
return found[0]
def md_by_mount(self, mount):
fs = self.fs_by_mount(mount)
if fs:
return self.md_by_name(fs.device)
def md_attach_by_mount(self, device, mount, spare=False, **kwargs):
md = self.md_by_mount(mount)
if not md:
md = self.add_md(**kwargs)
fskwargs = {}
fskwargs['device'] = md.name
fskwargs['mount'] = mount
fskwargs['fs_type'] = kwargs.pop('fs_type', None)
fskwargs['fs_options'] = kwargs.pop('fs_options', None)
fskwargs['fs_label'] = kwargs.pop('fs_label', None)
self.add_fs(**fskwargs)
md.add_spare(device) if spare else md.add_device(device)
return md
def md_next_name(self):
count = 0
while True:
name = '/dev/md%s' % count
if name not in [md.name for md in self.mds]:
return name
if count >= 127:
raise errors.MDAlreadyExistsError(
'Error while generating md name: '
'names from /dev/md0 to /dev/md127 seem to be busy, '
'try to generate md name manually')
count += 1
def vg_by_name(self, vgname):
found = filter(lambda x: (x.name == vgname), self.vgs)
if found:
return found[0]
def pv_by_name(self, pvname):
found = filter(lambda x: (x.name == pvname), self.pvs)
if found:
return found[0]
def vg_attach_by_name(self, pvname, vgname,
metadatasize=16, metadatacopies=2):
vg = self.vg_by_name(vgname) or self.add_vg(name=vgname)
pv = self.pv_by_name(pvname) or self.add_pv(
name=pvname, metadatasize=metadatasize,
metadatacopies=metadatacopies)
vg.add_pv(pv.name)
def fs_by_mount(self, mount):
found = filter(lambda x: (x.mount and x.mount == mount), self.fss)
if found:
return found[0]
def fs_by_device(self, device):
found = filter(lambda x: x.device == device, self.fss)
if found:
return found[0]
def fs_sorted_by_depth(self, reverse=False):
"""Getting file systems sorted by path length.
Shorter paths earlier.
['/', '/boot', '/var', '/var/lib/mysql']
:param reverse: Sort backward (Default: False)
"""
def key(x):
return x.mount.rstrip(os.path.sep).count(os.path.sep)
return sorted(self.fss, key=key, reverse=reverse)
def lv_by_device_name(self, device_name):
found = filter(lambda x: x.device_name == device_name, self.lvs)
if found:
return found[0]
def root_device(self):
fs = self.fs_by_mount('/')
if not fs:
raise errors.WrongPartitionSchemeError(
'Error while trying to find root device: '
'root file system not found')
return fs.device
def boot_device(self, grub_version=2):
# We assume /boot is a separate partition. If it is not
# then we try to use root file system
boot_fs = self.fs_by_mount('/boot') or self.fs_by_mount('/')
if not boot_fs:
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'boot file system not fount, '
'it must be a separate mount point')
if grub_version == 1:
# Legacy GRUB has a limitation. It is not able to mount MD devices.
# If it is MD compatible it is only able to ignore MD metadata
# and to mount one of those devices which are parts of MD device,
# but it is possible only if MD device is a MIRROR.
md = self.md_by_name(boot_fs.device)
if md:
try:
return md.devices[0]
except IndexError:
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'md device %s does not have devices attached' %
md.name)
# Legacy GRUB is not able to mount LVM devices.
if self.lv_by_device_name(boot_fs.device):
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'found device is %s but legacy grub is not able to '
'mount logical volumes' %
boot_fs.device)
return boot_fs.device
def configdrive_device(self):
# Configdrive device must be a small (about 10M) partition
# on one of node hard drives. This partition is necessary
# only if one uses cloud-init with configdrive.
for parted in self.parteds:
for prt in parted.partitions:
if prt.configdrive:
return prt.name
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ArticleTranslation.meta_title'
db.add_column(u'aldryn_newsblog_article_translation', 'meta_title',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'ArticleTranslation.meta_description'
db.add_column(u'aldryn_newsblog_article_translation', 'meta_description',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'ArticleTranslation.meta_keywords'
db.add_column(u'aldryn_newsblog_article_translation', 'meta_keywords',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ArticleTranslation.meta_title'
db.delete_column(u'aldryn_newsblog_article_translation', 'meta_title')
# Deleting field 'ArticleTranslation.meta_description'
db.delete_column(u'aldryn_newsblog_article_translation', 'meta_description')
# Deleting field 'ArticleTranslation.meta_keywords'
db.delete_column(u'aldryn_newsblog_article_translation', 'meta_keywords')
models = {
u'aldryn_categories.category': {
'Meta': {'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'rgt': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'aldryn_newsblog.article': {
'Meta': {'ordering': "[u'-publishing_date']", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Person']"}),
'categories': ('aldryn_categories.fields.CategoryManyToManyField', [], {'to': u"orm['aldryn_categories.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_newsblog_articles'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'publishing_date': ('django.db.models.fields.DateTimeField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'})
},
u'aldryn_newsblog.articletranslation': {
'Meta': {'unique_together': "[(u'language_code', u'master')]", 'object_name': 'ArticleTranslation', 'db_table': "u'aldryn_newsblog_article_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.Article']"}),
'meta_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'meta_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '234'})
},
u'aldryn_newsblog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'aldryn_newsblog.newsblogconfig': {
'Meta': {'unique_together': "(('type', 'namespace'),)", 'object_name': 'NewsBlogConfig'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_newsblog']
|
|
from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import get_dtype # noqa:F401
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import (
DatetimeArray,
PandasArray,
PeriodArray,
TimedeltaArray,
period_array,
)
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None) # noqa: PDF010
cython_table = pd.core.common._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from com._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
|
|
from django.template import loader, RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.xheaders import populate_xheaders
from django.db.models.fields import DateTimeField
from django.http import Http404, HttpResponse
import datetime, time
def archive_index(request, queryset, date_field, num_latest=15,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
mimetype=None, allow_future=False):
"""
Generic top-level archive of date-based objects.
Templates: ``<app_label>/<model_name>_archive.html``
Context:
date_list
List of years
latest
Latest N (defaults to 15) objects by date
"""
if extra_context is None: extra_context = {}
model = queryset.model
if not allow_future:
queryset = queryset.filter(**{'%s__lte' % date_field: datetime.datetime.now()})
date_list = queryset.dates(date_field, 'year')[::-1]
if not date_list and not allow_empty:
raise Http404, "No %s available" % model._meta.verbose_name
if date_list and num_latest:
latest = queryset.order_by('-'+date_field)[:num_latest]
else:
latest = None
if not template_name:
template_name = "%s/%s_archive.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list' : date_list,
'latest' : latest,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_year(request, year, queryset, date_field, template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object', mimetype=None,
make_object_list=False, allow_future=False):
"""
Generic yearly archive view.
Templates: ``<app_label>/<model_name>_archive_year.html``
Context:
date_list
List of months in this year with objects
year
This year
object_list
List of objects published in the given month
(Only available if make_object_list argument is True)
"""
if extra_context is None: extra_context = {}
model = queryset.model
now = datetime.datetime.now()
lookup_kwargs = {'%s__year' % date_field: year}
# Only bother to check current date if the year isn't in the past and future objects aren't requested.
if int(year) >= now.year and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
date_list = queryset.filter(**lookup_kwargs).dates(date_field, 'month')
if not date_list and not allow_empty:
raise Http404
if make_object_list:
object_list = queryset.filter(**lookup_kwargs).order_by(date_field)
else:
object_list = []
if not template_name:
template_name = "%s/%s_archive_year.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'year': year,
'%s_list' % template_object_name: object_list,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_month(request, year, month, queryset, date_field,
month_format='%b', template_name=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic monthly archive view.
Templates: ``<app_label>/<model_name>_archive_month.html``
Context:
month:
(date) this month
next_month:
(date) the first day of the next month, or None if the next month is in the future
previous_month:
(date) the first day of the previous month
object_list:
list of objects published in the given month
"""
if extra_context is None: extra_context = {}
try:
date = datetime.date(*time.strptime(year+month, '%Y'+month_format)[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of month, for use in a date-range lookup.
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
lookup_kwargs = {'%s__range' % date_field: (first_day, last_day)}
# Only bother to check current date if the month isn't in the past and future objects are requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not object_list and not allow_empty:
raise Http404
# Calculate the next month, if applicable.
if allow_future:
next_month = last_day + datetime.timedelta(days=1)
elif last_day < datetime.date.today():
next_month = last_day + datetime.timedelta(days=1)
else:
next_month = None
if not template_name:
template_name = "%s/%s_archive_month.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'month': date,
'next_month': next_month,
'previous_month': first_day - datetime.timedelta(days=1),
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_week(request, year, week, queryset, date_field,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic weekly archive view.
Templates: ``<app_label>/<model_name>_archive_week.html``
Context:
week:
(date) this week
object_list:
list of objects published in the given week
"""
if extra_context is None: extra_context = {}
try:
date = datetime.date(*time.strptime(year+'-0-'+week, '%Y-%w-%U')[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of week, for use in a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {'%s__range' % date_field: (first_day, last_day)}
# Only bother to check current date if the week isn't in the past and future objects aren't requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not object_list and not allow_empty:
raise Http404
if not template_name:
template_name = "%s/%s_archive_week.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'week': date,
})
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_day(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object',
mimetype=None, allow_future=False):
"""
Generic daily archive view.
Templates: ``<app_label>/<model_name>_archive_day.html``
Context:
object_list:
list of objects published that day
day:
(datetime) the day
previous_day
(datetime) the previous day
next_day
(datetime) the next day, or None if the current day is today
"""
if extra_context is None: extra_context = {}
try:
date = datetime.date(*time.strptime(year+month+day, '%Y'+month_format+day_format)[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not allow_empty and not object_list:
raise Http404
# Calculate the next day, if applicable.
if allow_future:
next_day = date + datetime.timedelta(days=1)
elif date < datetime.date.today():
next_day = date + datetime.timedelta(days=1)
else:
next_day = None
if not template_name:
template_name = "%s/%s_archive_day.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'day': date,
'previous_day': date - datetime.timedelta(days=1),
'next_day': next_day,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_today(request, **kwargs):
"""
Generic daily archive view for today. Same as archive_day view.
"""
today = datetime.date.today()
kwargs.update({
'year': str(today.year),
'month': today.strftime('%b').lower(),
'day': str(today.day),
})
return archive_day(request, **kwargs)
def object_detail(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', object_id=None, slug=None,
slug_field=None, template_name=None, template_name_field=None,
template_loader=loader, extra_context=None, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic detail view from year/month/day/slug or year/month/day/id structure.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object:
the object to be detailed
"""
if extra_context is None: extra_context = {}
try:
date = datetime.date(*time.strptime(year+month+day, '%Y'+month_format+day_format)[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise AttributeError, "Generic detail view must be called with either an object_id or a slug/slugfield"
try:
obj = queryset.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404, "No %s found for" % model._meta.verbose_name
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
from io import open
import warnings
from pymatgen.electronic_structure.bandstructure import Kpoint
from pymatgen import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.io.vasp import BSVasprun
from pymatgen.electronic_structure.bandstructure import (BandStructureSymmLine,
get_reconstructed_band_structure)
from pymatgen.util.testing import PymatgenTest
from monty.serialization import loadfn
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class KpointTest(unittest.TestCase):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.kpoint = Kpoint([0.1, 0.4, -0.5], self.lattice, label="X")
def test_properties(self):
self.assertEqual(self.kpoint.frac_coords[0], 0.1)
self.assertEqual(self.kpoint.frac_coords[1], 0.4)
self.assertEqual(self.kpoint.frac_coords[2], -0.5)
self.assertEqual(self.kpoint.a, 0.1)
self.assertEqual(self.kpoint.b, 0.4)
self.assertEqual(self.kpoint.c, -0.5)
self.assertEqual(self.lattice, Lattice.cubic(10.0))
self.assertEqual(self.kpoint.cart_coords[0], 1.0)
self.assertEqual(self.kpoint.cart_coords[1], 4.0)
self.assertEqual(self.kpoint.cart_coords[2], -5.0)
self.assertEqual(self.kpoint.label, "X")
class BandStructureSymmLine_test(PymatgenTest):
def setUp(self):
self.bs = loadfn(os.path.join(test_dir, "Cu2O_361_bandstructure.json"))
self.bs2 = loadfn(os.path.join(test_dir, "CaO_2605_bandstructure.json"))
self.bs_spin = loadfn(os.path.join(test_dir, "NiO_19009_bandstructure.json"))
self.bs_cbm0 = loadfn(os.path.join(test_dir, "InN_22205_bandstructure.json"))
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_diff_spins = loadfn(os.path.join(test_dir, "VBr2_971787_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_basic(self):
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][10][12][0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][25][0][
Orbital.dyz.value],
[0.0, 0.0, 0.0011, 0.0219, 0.0219, 0.069])
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][25][10]['O'], 0.0328)
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][22][25]['Cu'], 0.8327)
proj = self.bs.get_projections_on_elements_and_orbitals({'Cu': ['s',
'd']})
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['s'], 0.0027)
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['d'], 0.8495999999999999)
self.assertEqual(self.bs2.nb_bands, 16)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertEqual(self.bs2.branches[5]['name'], "L-U")
self.assertEqual(self.bs2.branches[5]['start_index'], 80)
self.assertEqual(self.bs2.branches[5]['end_index'], 95)
self.assertAlmostEqual(self.bs2.distance[70], 4.2335127528765737)
self.assertEqual(self.bs_spin.nb_bands, 27)
self.assertAlmostEqual(self.bs_spin.bands[Spin.up][5][10], 0.262)
self.assertAlmostEqual(self.bs_spin.bands[Spin.down][5][10],
1.6156)
def test_properties(self):
self.one_kpoint = self.bs2.kpoints[31]
self.assertEqual(self.one_kpoint.frac_coords[0], 0.5)
self.assertEqual(self.one_kpoint.frac_coords[1], 0.25)
self.assertEqual(self.one_kpoint.frac_coords[2], 0.75)
self.assertAlmostEqual(self.one_kpoint.cart_coords[0], 0.64918757)
self.assertAlmostEqual(self.one_kpoint.cart_coords[1], 1.29837513)
self.assertAlmostEqual(self.one_kpoint.cart_coords[2], 0.0)
self.assertEqual(self.one_kpoint.label, "W")
self.assertAlmostEqual(self.bs2.efermi, 2.6211967, "wrong fermi energy")
def test_get_branch(self):
self.assertAlmostEqual(self.bs2.get_branch(110)[0]['name'], "U-W")
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_diff_spins.get_direct_band_gap_dict()
self.assertEqual(direct_dict[Spin.down]['value'], 4.5365)
for bs in [self.bs2, self.bs_spin]:
dg_dict = bs.get_direct_band_gap_dict()
for spin, v in bs.bands.items():
kpt = dg_dict[spin]['kpoint_index']
vb, cb = dg_dict[spin]['band_indices']
gap = v[cb][kpt] - v[vb][kpt]
self.assertEqual(gap, dg_dict[spin]['value'])
self.assertRaises(ValueError, self.bs_cu.get_direct_band_gap_dict)
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs2.get_direct_band_gap(),
4.0125999999999999)
self.assertTrue(self.bs_diff_spins.get_direct_band_gap() > 0)
self.assertEqual(self.bs_cu.get_direct_band_gap(), 0)
def test_is_metal(self):
self.assertFalse(self.bs2.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
self.assertTrue(self.bs_cu.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs2.get_cbm()
self.assertAlmostEqual(cbm['energy'], 5.8709, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 8, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 15, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "X", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 8.0458, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 12, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 0, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs2.get_vbm()
self.assertAlmostEqual(vbm['energy'], 2.2361, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 3, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 5, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 0, "wrong VBM kpoint index")
self.assertEqual(vbm['kpoint'].frac_coords[0], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[1], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 5.731, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 2, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 0, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 10, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 79, "wrong VBM kpoint index")
self.assertEqual(vbm_spin['kpoint'].frac_coords[0], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[1], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[2], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, "L", "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs2.get_band_gap()
self.assertAlmostEqual(bg['energy'], 3.6348, "wrong gap energy")
self.assertEqual(bg['transition'], "\\Gamma-X", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 2.3148, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "L-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
bg_cbm0 = self.bs_cbm0.get_band_gap()
self.assertAlmostEqual(bg_cbm0['energy'], 0, places=3, msg="wrong gap energy")
def test_get_sym_eq_kpoints_and_degeneracy(self):
bs = self.bs2
cbm_k = bs.get_cbm()['kpoint'].frac_coords
vbm_k = bs.get_vbm()['kpoint'].frac_coords
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), None)
bs.structure = loadfn(os.path.join(test_dir, "CaO_2605_structure.json"))
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), 3)
self.assertEqual(bs.get_kpoint_degeneracy(vbm_k), 1)
cbm_eqs = bs.get_sym_eq_kpoints(cbm_k)
self.assertTrue([0.5, 0., 0.5] in cbm_eqs)
self.assertTrue([0., 0.5, 0.5] in cbm_eqs)
self.assertTrue([0.5, 0.5, 0.] in cbm_eqs)
vbm_eqs = bs.get_sym_eq_kpoints(vbm_k)
self.assertTrue([0., 0., 0.] in vbm_eqs)
def test_as_dict(self):
s = json.dumps(self.bs.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs2.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs_spin.as_dict())
self.assertIsNotNone(s)
def test_old_format_load(self):
with open(os.path.join(test_dir, "bs_ZnS_old.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
bs_old = BandStructureSymmLine.from_dict(d)
self.assertEqual(bs_old.get_projection_on_elements()[
Spin.up][0][0]['Zn'], 0.0971)
class ReconstructBandStructureTest(PymatgenTest):
def setUp(self):
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_cu2 = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_reconstruct_band_structure(self):
bs = get_reconstructed_band_structure([self.bs_cu, self.bs_cu2])
self.assertEqual(bs.bands[Spin.up].shape, (20, 700), "wrong number of bands or kpoints")
def test_vasprun_bs(self):
bsv = BSVasprun(os.path.join(test_dir, "vasprun.xml"),
parse_projected_eigen=True,
parse_potcar_file=True)
bs = bsv.get_band_structure(kpoints_filename=os.path.join(test_dir, "KPOINTS.band"),
line_mode=True)
bs.get_projection_on_elements()
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for EMC ScaleIO based on ScaleIO remote CLI.
"""
import base64
import json
import os
import eventlet
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import requests
import six
import urllib
from cinder import context
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
scaleio_opts = [
cfg.StrOpt('sio_rest_server_port',
default='443',
help='REST server port.'),
cfg.BoolOpt('sio_verify_server_certificate',
default=False,
help='Whether to verify server certificate.'),
cfg.StrOpt('sio_server_certificate_path',
default=None,
help='Server certificate path.'),
cfg.BoolOpt('sio_round_volume_capacity',
default=True,
help='Whether to round volume capacity.'),
cfg.BoolOpt('sio_force_delete',
default=False,
help='Whether to allow force delete.'),
cfg.BoolOpt('sio_unmap_volume_before_deletion',
default=False,
help='Whether to unmap volume before deletion.'),
cfg.StrOpt('sio_protection_domain_id',
default=None,
help='Protection domain id.'),
cfg.StrOpt('sio_protection_domain_name',
default=None,
help='Protection domain name.'),
cfg.StrOpt('sio_storage_pools',
default=None,
help='Storage pools.'),
cfg.StrOpt('sio_storage_pool_name',
default=None,
help='Storage pool name.'),
cfg.StrOpt('sio_storage_pool_id',
default=None,
help='Storage pool id.')
]
CONF.register_opts(scaleio_opts)
STORAGE_POOL_NAME = 'sio:sp_name'
STORAGE_POOL_ID = 'sio:sp_id'
PROTECTION_DOMAIN_NAME = 'sio:pd_name'
PROTECTION_DOMAIN_ID = 'sio:pd_id'
PROVISIONING_KEY = 'sio:provisioning'
IOPS_LIMIT_KEY = 'sio:iops_limit'
BANDWIDTH_LIMIT = 'sio:bandwidth_limit'
BLOCK_SIZE = 8
OK_STATUS_CODE = 200
VOLUME_NOT_FOUND_ERROR = 3
VOLUME_NOT_MAPPED_ERROR = 84
VOLUME_ALREADY_MAPPED_ERROR = 81
class ScaleIODriver(driver.VolumeDriver):
"""EMC ScaleIO Driver."""
VERSION = "2.0"
def __init__(self, *args, **kwargs):
super(ScaleIODriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(san.san_opts)
self.configuration.append_config_values(scaleio_opts)
self.server_ip = self.configuration.san_ip
self.server_port = self.configuration.sio_rest_server_port
self.server_username = self.configuration.san_login
self.server_password = self.configuration.san_password
self.server_token = None
self.verify_server_certificate = (
self.configuration.sio_verify_server_certificate)
self.server_certificate_path = None
if self.verify_server_certificate:
self.server_certificate_path = (
self.configuration.sio_server_certificate_path)
LOG.info(_LI(
"REST server IP: %(ip)s, port: %(port)s, username: %(user)s. "
"Verify server's certificate: %(verify_cert)s."),
{'ip': self.server_ip,
'port': self.server_port,
'user': self.server_username,
'verify_cert': self.verify_server_certificate})
self.storage_pools = [e.strip() for e in
self.configuration.sio_storage_pools.split(',')]
self.storage_pool_name = self.configuration.sio_storage_pool_name
self.storage_pool_id = self.configuration.sio_storage_pool_id
if (self.storage_pool_name is None and self.storage_pool_id is None):
LOG.warning(_LW("No storage pool name or id was found."))
else:
LOG.info(_LI(
"Storage pools names: %(pools)s, "
"storage pool name: %(pool)s, pool id: %(pool_id)s."),
{'pools': self.storage_pools,
'pool': self.storage_pool_name,
'pool_id': self.storage_pool_id})
self.protection_domain_name = (
self.configuration.sio_protection_domain_name)
LOG.info(_LI(
"Protection domain name: %(domain_name)s."),
{'domain_name': self.protection_domain_name})
self.protection_domain_id = self.configuration.sio_protection_domain_id
LOG.info(_LI(
"Protection domain name: %(domain_id)s."),
{'domain_id': self.protection_domain_id})
def check_for_setup_error(self):
if (not self.protection_domain_name and
not self.protection_domain_id):
LOG.warning(_LW("No protection domain name or id "
"was specified in configuration."))
if self.protection_domain_name and self.protection_domain_id:
msg = _("Cannot specify both protection domain name "
"and protection domain id.")
raise exception.InvalidInput(reason=msg)
if not self.server_ip:
msg = _("REST server IP must by specified.")
raise exception.InvalidInput(reason=msg)
if not self.server_username:
msg = _("REST server username must by specified.")
raise exception.InvalidInput(reason=msg)
if not self.server_password:
msg = _("REST server password must by specified.")
raise exception.InvalidInput(reason=msg)
if not self.verify_server_certificate:
LOG.warning(_LW("Verify certificate is not set, using default of "
"False."))
if self.verify_server_certificate and not self.server_certificate_path:
msg = _("Path to REST server's certificate must be specified.")
raise exception.InvalidInput(reason=msg)
if self.storage_pool_name and self.storage_pool_id:
msg = _("Cannot specify both storage pool name and storage "
"pool id.")
raise exception.InvalidInput(reason=msg)
if not self.storage_pool_name and not self.storage_pool_id:
msg = _("Must specify storage pool name or id.")
raise exception.InvalidInput(reason=msg)
def _find_storage_pool_id_from_storage_type(self, storage_type):
# Default to what was configured in configuration file if not defined.
return storage_type.get(STORAGE_POOL_ID,
self.storage_pool_id)
def _find_storage_pool_name_from_storage_type(self, storage_type):
return storage_type.get(STORAGE_POOL_NAME,
self.storage_pool_name)
def _find_protection_domain_id_from_storage_type(self, storage_type):
# Default to what was configured in configuration file if not defined.
return storage_type.get(PROTECTION_DOMAIN_ID,
self.protection_domain_id)
def _find_protection_domain_name_from_storage_type(self, storage_type):
# Default to what was configured in configuration file if not defined.
return storage_type.get(PROTECTION_DOMAIN_NAME,
self.protection_domain_name)
def _find_provisioning_type(self, storage_type):
return storage_type.get(PROVISIONING_KEY)
def _find_iops_limit(self, storage_type):
return storage_type.get(IOPS_LIMIT_KEY)
def _find_bandwidth_limit(self, storage_type):
return storage_type.get(BANDWIDTH_LIMIT)
def id_to_base64(self, id):
# Base64 encode the id to get a volume name less than 32 characters due
# to ScaleIO limitation.
name = six.text_type(id).replace("-", "")
try:
name = base64.b16decode(name.upper())
except TypeError:
pass
encoded_name = base64.b64encode(name)
LOG.debug(
"Converted id %(id)s to scaleio name %(name)s.",
{'id': id, 'name': encoded_name})
return encoded_name
def create_volume(self, volume):
"""Creates a scaleIO volume."""
self._check_volume_size(volume.size)
volname = self.id_to_base64(volume.id)
storage_type = self._get_volumetype_extraspecs(volume)
storage_pool_name = self._find_storage_pool_name_from_storage_type(
storage_type)
storage_pool_id = self._find_storage_pool_id_from_storage_type(
storage_type)
protection_domain_id = (
self._find_protection_domain_id_from_storage_type(storage_type))
protection_domain_name = (
self._find_protection_domain_name_from_storage_type(storage_type))
provisioning_type = self._find_provisioning_type(storage_type)
LOG.info(_LI(
"Volume type: %(volume_type)s, storage pool name: %(pool_name)s, "
"storage pool id: %(pool_id)s, protection domain id: "
"%(domain_id)s, protection domain name: %(domain_name)s."),
{'volume_type': storage_type,
'pool_name': storage_pool_name,
'pool_id': storage_pool_id,
'domain_id': protection_domain_id,
'domain_name': protection_domain_name})
verify_cert = self._get_verify_cert()
if storage_pool_name:
self.storage_pool_name = storage_pool_name
self.storage_pool_id = None
if storage_pool_id:
self.storage_pool_id = storage_pool_id
self.storage_pool_name = None
if protection_domain_name:
self.protection_domain_name = protection_domain_name
self.protection_domain_id = None
if protection_domain_id:
self.protection_domain_id = protection_domain_id
self.protection_domain_name = None
domain_id = self.protection_domain_id
if not domain_id:
if not self.protection_domain_name:
msg = _("Must specify protection domain name or"
" protection domain id.")
raise exception.VolumeBackendAPIException(data=msg)
encoded_domain_name = urllib.quote(self.protection_domain_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'encoded_domain_name': encoded_domain_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Domain/instances/getByName::"
"%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get domain id by name request: %s."),
request)
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
r = self._check_response(r, request)
domain_id = r.json()
if not domain_id:
msg = (_("Domain with name %s wasn't found.")
% self.protection_domain_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in domain_id:
msg = (_("Error getting domain id from name %(name)s: %(id)s.")
% {'name': self.protection_domain_name,
'id': domain_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Domain id is %s."), domain_id)
pool_name = self.storage_pool_name
pool_id = self.storage_pool_id
if pool_name:
encoded_domain_name = urllib.quote(pool_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'domain_id': domain_id,
'encoded_domain_name': encoded_domain_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Pool/instances/getByName::"
"%(domain_id)s,%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get pool id by name request: %s."), request)
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
pool_id = r.json()
if not pool_id:
msg = (_("Pool with name %(pool_name)s wasn't found in "
"domain %(domain_id)s.")
% {'pool_name': pool_name,
'domain_id': domain_id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in pool_id:
msg = (_("Error getting pool id from name %(pool_name)s: "
"%(err_msg)s.")
% {'pool_name': pool_name,
'err_msg': pool_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Pool id is %s."), pool_id)
if provisioning_type == 'thin':
provisioning = "ThinProvisioned"
# Default volume type is thick.
else:
provisioning = "ThickProvisioned"
# units.Mi = 1024 ** 2
volume_size_kb = volume.size * units.Mi
params = {'protectionDomainId': domain_id,
'volumeSizeInKb': six.text_type(volume_size_kb),
'name': volname,
'volumeType': provisioning,
'storagePoolId': pool_id}
LOG.info(_LI("Params for add volume request: %s."), params)
r = requests.post(
"https://" +
self.server_ip +
":" +
self.server_port +
"/api/types/Volume/instances",
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
response = r.json()
LOG.info(_LI("Add volume response: %s"), response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Error creating volume: %s.") % response['message'])
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."),
{'volname': volname, 'volid': volume.id})
def _check_volume_size(self, size):
if size % 8 != 0:
round_volume_capacity = (
self.configuration.sio_round_volume_capacity)
if not round_volume_capacity:
exception_msg = (_(
"Cannot create volume of size %s: not multiple of 8GB.") %
size)
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot(self, snapshot):
"""Creates a scaleio snapshot."""
volname = self.id_to_base64(snapshot.volume_id)
snapname = self.id_to_base64(snapshot.id)
self._snapshot_volume(volname, snapname)
def _snapshot_volume(self, volname, snapname):
vol_id = self._get_volume_id(volname)
params = {
'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]}
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/System/action/snapshotVolumes") % req_vars
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request, False, params)
response = r.json()
LOG.info(_LI("snapshot volume response: %s."), response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Failed creating snapshot for volume %(volname)s: "
"%(response)s.") %
{'volname': volname,
'response': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _check_response(self, response, request, is_get_request=True,
params=None):
if response.status_code == 401 or response.status_code == 403:
LOG.info(_LI("Token is invalid, going to re-login and get "
"a new one."))
login_request = (
"https://" + self.server_ip +
":" + self.server_port + "/api/login")
verify_cert = self._get_verify_cert()
r = requests.get(
login_request,
auth=(
self.server_username,
self.server_password),
verify=verify_cert)
token = r.json()
self.server_token = token
# Repeat request with valid token.
LOG.info(_LI(
"Going to perform request again %s with valid token."),
request)
if is_get_request:
res = requests.get(request,
auth=(self.server_username,
self.server_token),
verify=verify_cert)
else:
res = requests.post(request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(self.server_username,
self.server_token),
verify=verify_cert)
return res
return response
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
# We interchange 'volume' and 'snapshot' because in ScaleIO
# snapshot is a volume: once a snapshot is generated it
# becomes a new unmapped volume in the system and the user
# may manipulate it in the same manner as any other volume
# exposed by the system
volname = self.id_to_base64(snapshot.id)
snapname = self.id_to_base64(volume.id)
LOG.info(_LI(
"ScaleIO create volume from snapshot: snapshot %(snapname)s "
"to volume %(volname)s."),
{'volname': volname,
'snapname': snapname})
self._snapshot_volume(volname, snapname)
def _get_volume_id(self, volname):
volname_encoded = urllib.quote(volname, '')
volname_double_encoded = urllib.quote(volname_encoded, '')
LOG.info(_LI("Volume name after double encoding is %s."),
volname_double_encoded)
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'encoded': volname_double_encoded}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Volume/instances/getByName::"
"%(encoded)s") % req_vars
LOG.info(_LI("ScaleIO get volume id by name request: %s"), request)
r = requests.get(
request,
auth=(self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request)
vol_id = r.json()
if not vol_id:
msg = _("Volume with name %s wasn't found.") % volname
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in vol_id:
msg = (_("Error getting volume id from name %(volname)s: %(err)s.")
% {'volname': volname,
'err': vol_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("volume id is %s."), vol_id)
return vol_id
def _get_headers(self):
return {'content-type': 'application/json'}
def _get_verify_cert(self):
verify_cert = False
if self.verify_server_certificate:
verify_cert = self.server_certificate_path
return verify_cert
def extend_volume(self, volume, new_size):
"""Extends the size of an existing available ScaleIO volume."""
self._check_volume_size(new_size)
volname = self.id_to_base64(volume.id)
LOG.info(_LI(
"ScaleIO extend volume: volume %(volname)s to size %(new_size)s."),
{'volname': volname,
'new_size': new_size})
vol_id = self._get_volume_id(volname)
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'vol_id': vol_id}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(vol_id)s"
"/action/setVolumeSize") % req_vars
LOG.info(_LI("Change volume capacity request: %s."), request)
volume_new_size = new_size
params = {'sizeInGB': six.text_type(volume_new_size)}
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request, False, params)
if r.status_code != OK_STATUS_CODE:
response = r.json()
msg = (_("Error extending volume %(vol)s: %(err)s.")
% {'vol': volname,
'err': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volname = self.id_to_base64(src_vref.id)
snapname = self.id_to_base64(volume.id)
LOG.info(_LI(
"ScaleIO create cloned volume: source volume %(src)s to target "
"volume %(tgt)s."),
{'src': volname,
'tgt': snapname})
self._snapshot_volume(volname, snapname)
def delete_volume(self, volume):
"""Deletes a self.logical volume"""
volname = self.id_to_base64(volume.id)
self._delete_volume(volname)
def _delete_volume(self, volname):
volname_encoded = urllib.quote(volname, '')
volname_double_encoded = urllib.quote(volname_encoded, '')
LOG.info(_LI("Volume name after double encoding is %s."),
volname_double_encoded)
verify_cert = self._get_verify_cert()
# convert volume name to id
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'encoded': volname_double_encoded}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Volume/instances/getByName::"
"%(encoded)s") % req_vars
LOG.info(_LI("ScaleIO get volume id by name request: %s."), request)
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
r = self._check_response(r, request)
LOG.info(_LI("Get by name response: %s."), r.text)
vol_id = r.json()
LOG.info(_LI("ScaleIO volume id to delete is %s."), vol_id)
if r.status_code != OK_STATUS_CODE and "errorCode" in vol_id:
msg = (_("Error getting volume id from name %(vol)s: %(err)s.")
% {'vol': volname, 'err': vol_id['message']})
LOG.error(msg)
error_code = vol_id['errorCode']
if (error_code == VOLUME_NOT_FOUND_ERROR):
force_delete = self.configuration.sio_force_delete
if force_delete:
LOG.warning(_LW(
"Ignoring error in delete volume %s: volume not found "
"due to force delete settings."), volname)
return
raise exception.VolumeBackendAPIException(data=msg)
unmap_before_delete = (
self.configuration.sio_unmap_volume_before_deletion)
# Ensure that the volume is not mapped to any SDC before deletion in
# case unmap_before_deletion is enabled.
if unmap_before_delete:
params = {'allSdcs': ''}
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'vol_id': vol_id}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(vol_id)s"
"/action/removeMappedSdc") % req_vars
LOG.info(_LI(
"Trying to unmap volume from all sdcs before deletion: %s."),
request)
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
r = self._check_response(r, request, False, params)
LOG.debug("Unmap volume response: %s.", r.text)
params = {'removeMode': 'ONLY_ME'}
r = requests.post(
"https://" +
self.server_ip +
":" +
self.server_port +
"/api/instances/Volume::" +
six.text_type(vol_id) +
"/action/removeVolume",
data=json.dumps(params),
headers=self._get_headers(),
auth=(self.server_username,
self.server_token),
verify=verify_cert)
r = self._check_response(r, request, False, params)
if r.status_code != OK_STATUS_CODE:
response = r.json()
error_code = response['errorCode']
if error_code == 78:
force_delete = self.configuration.sio_orce_delete
if force_delete:
LOG.warning(_LW(
"Ignoring error in delete volume %s: volume not found "
"due to force delete settings."), vol_id)
else:
msg = (_("Error deleting volume %s: volume not found.") %
vol_id)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = (_("Error deleting volume %(vol)s: %(err)s.") %
{'vol': vol_id,
'err': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def delete_snapshot(self, snapshot):
"""Deletes a ScaleIO snapshot."""
snapname = self.id_to_base64(snapshot.id)
LOG.info(_LI("ScaleIO delete snapshot."))
self._delete_volume(snapname)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The scaleio driver returns a driver_volume_type of 'scaleio'.
"""
LOG.debug("Connector is %s.", connector)
volname = self.id_to_base64(volume.id)
properties = {}
properties['scaleIO_volname'] = volname
properties['hostIP'] = connector['ip']
properties['serverIP'] = self.server_ip
properties['serverPort'] = self.server_port
properties['serverUsername'] = self.server_username
properties['serverPassword'] = self.server_password
properties['serverToken'] = self.server_token
storage_type = self._get_volumetype_extraspecs(volume)
LOG.info(_LI("Volume type is %s."), storage_type)
iops_limit = self._find_iops_limit(storage_type)
LOG.info(_LI("iops limit is: %s."), iops_limit)
bandwidth_limit = self._find_bandwidth_limit(storage_type)
LOG.info(_LI("Bandwidth limit is: %s."), bandwidth_limit)
properties['iopsLimit'] = iops_limit
properties['bandwidthLimit'] = bandwidth_limit
return {'driver_volume_type': 'scaleio',
'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
LOG.debug("scaleio driver terminate connection.")
def _update_volume_stats(self):
stats = {}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or 'scaleio'
stats['vendor_name'] = 'EMC'
stats['driver_version'] = self.VERSION
stats['storage_protocol'] = 'scaleio'
stats['total_capacity_gb'] = 'unknown'
stats['free_capacity_gb'] = 'unknown'
stats['reserved_percentage'] = 0
stats['QoS_support'] = False
pools = []
verify_cert = self._get_verify_cert()
max_free_capacity = 0
total_capacity = 0
for sp_name in self.storage_pools:
splitted_name = sp_name.split(':')
domain_name = splitted_name[0]
pool_name = splitted_name[1]
LOG.debug("domain name is %(domain)s, pool name is %(pool)s.",
{'domain': domain_name,
'pool': pool_name})
# Get domain id from name.
encoded_domain_name = urllib.quote(domain_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'encoded_domain_name': encoded_domain_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Domain/instances/getByName::"
"%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get domain id by name request: %s."),
request)
LOG.info(_LI("username: %(username)s, verify_cert: %(verify)s."),
{'username': self.server_username,
'verify': verify_cert})
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
r = self._check_response(r, request)
LOG.info(_LI("Get domain by name response: %s"), r.text)
domain_id = r.json()
if not domain_id:
msg = (_("Domain with name %s wasn't found.")
% self.protection_domain_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in domain_id:
msg = (_("Error getting domain id from name %(name)s: "
"%(err)s.")
% {'name': self.protection_domain_name,
'err': domain_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Domain id is %s."), domain_id)
# Get pool id from name.
encoded_pool_name = urllib.quote(pool_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'domain_id': domain_id,
'encoded_pool_name': encoded_pool_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Pool/instances/getByName::"
"%(domain_id)s,%(encoded_pool_name)s") % req_vars
LOG.info(_LI("ScaleIO get pool id by name request: %s."), request)
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
pool_id = r.json()
if not pool_id:
msg = (_("Pool with name %(pool)s wasn't found in domain "
"%(domain)s.")
% {'pool': pool_name,
'domain': domain_id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in pool_id:
msg = (_("Error getting pool id from name %(pool)s: "
"%(err)s.")
% {'pool': pool_name,
'err': pool_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Pool id is %s."), pool_id)
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/StoragePool/instances/action/"
"querySelectedStatistics") % req_vars
params = {'ids': [pool_id], 'properties': [
"capacityInUseInKb", "capacityLimitInKb"]}
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
response = r.json()
LOG.info(_LI("Query capacity stats response: %s."), response)
for res in response.values():
capacityInUse = res['capacityInUseInKb']
capacityLimit = res['capacityLimitInKb']
total_capacity_gb = capacityLimit / units.Mi
used_capacity_gb = capacityInUse / units.Mi
free_capacity_gb = total_capacity_gb - used_capacity_gb
LOG.info(_LI(
"free capacity of pool %(pool)s is: %(free)s, "
"total capacity: %(total)s."),
{'pool': pool_name,
'free': free_capacity_gb,
'total': total_capacity_gb})
pool = {'pool_name': sp_name,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'QoS_support': False,
'reserved_percentage': 0
}
pools.append(pool)
if free_capacity_gb > max_free_capacity:
max_free_capacity = free_capacity_gb
total_capacity = total_capacity + total_capacity_gb
stats['volume_backend_name'] = backend_name or 'scaleio'
stats['vendor_name'] = 'EMC'
stats['driver_version'] = self.VERSION
stats['storage_protocol'] = 'scaleio'
# Use zero capacities here so we always use a pool.
stats['total_capacity_gb'] = total_capacity
stats['free_capacity_gb'] = max_free_capacity
LOG.info(_LI(
"Free capacity for backend is: %(free)s, total capacity: "
"%(total)s."),
{'free': max_free_capacity,
'total': total_capacity})
stats['reserved_percentage'] = 0
stats['QoS_support'] = False
stats['pools'] = pools
LOG.info(_LI("Backend name is %s."), stats["volume_backend_name"])
self._stats = stats
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _get_volumetype_extraspecs(self, volume):
specs = {}
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id:
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
for key, value in specs.items():
specs[key] = value
return specs
def find_volume_path(self, volume_id):
LOG.info(_LI("looking for volume %s."), volume_id)
# Look for the volume in /dev/disk/by-id directory.
disk_filename = ""
tries = 0
while not disk_filename:
if tries > self.configuration.num_volume_device_scan_tries:
msg = (_(
"scaleIO volume %s not found at expected path.")
% volume_id)
raise exception.VolumeBackendAPIException(msg)
by_id_path = "/dev/disk/by-id"
if not os.path.isdir(by_id_path):
LOG.warning(_LW(
"scaleIO volume %(vol)s not yet found (no directory "
"/dev/disk/by-id yet). Try number: %(tries)d."),
{'vol': volume_id,
'tries': tries})
tries = tries + 1
eventlet.sleep(1)
continue
filenames = os.listdir(by_id_path)
LOG.info(_LI(
"Files found in path %(path)s: %(file)s."),
{'path': by_id_path,
'file': filenames})
for filename in filenames:
if (filename.startswith("emc-vol") and
filename.endswith(volume_id)):
disk_filename = filename
if not disk_filename:
LOG.warning(_LW(
"scaleIO volume %(vol)s not yet found. "
"Try number: %(tries)d."),
{'vol': volume_id,
'tries': tries})
tries = tries + 1
eventlet.sleep(1)
if (tries != 0):
LOG.info(_LI(
"Found scaleIO device %(file)s after %(tries)d retries "),
{'file': disk_filename,
'tries': tries})
full_disk_name = by_id_path + "/" + disk_filename
LOG.info(_LI("Full disk name is %s."), full_disk_name)
return full_disk_name
def _get_client_id(
self, server_ip, server_username, server_password, sdc_ip):
req_vars = {'server_ip': server_ip,
'server_port': self.server_port,
'sdc_ip': sdc_ip}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Client/instances/getByIp::"
"%(sdc_ip)s/") % req_vars
LOG.info(_LI("ScaleIO get client id by ip request: %s."), request)
r = requests.get(
request,
auth=(
server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request)
sdc_id = r.json()
if not sdc_id:
msg = _("Client with ip %s wasn't found.") % sdc_ip
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != 200 and "errorCode" in sdc_id:
msg = (_("Error getting sdc id from ip %(ip)s: %(id)s.")
% {'ip': sdc_ip, 'id': sdc_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("ScaleIO sdc id is %s."), sdc_id)
return sdc_id
def _sio_attach_volume(self, volume, sdc_ip):
# We need to make sure we even *have* a local path
LOG.info(_LI("ScaleIO attach volume in scaleio cinder driver."))
volname = self.id_to_base64(volume.id)
cmd = ['drv_cfg']
cmd += ["--query_guid"]
LOG.info(_LI("ScaleIO sdc query guid command: %s"), six.text_type(cmd))
try:
(out, err) = utils.execute(*cmd, run_as_root=True)
LOG.debug("Map volume %(cmd)s: stdout=%(out)s stderr=%(err)s",
{'cmd': cmd, 'out': out, 'err': err})
except processutils.ProcessExecutionError as e:
msg = _("Error querying sdc guid: %s.") % six.text_type(e.stderr)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
guid = out
LOG.info(_LI("Current sdc guid: %s."), guid)
params = {'guid': guid}
volume_id = self._get_volume_id(volname)
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'volume_id': volume_id}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(volume_id)s"
"/action/addMappedSdc") % req_vars
LOG.info(_LI("Map volume request: %s."), request)
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request, False)
if r.status_code != OK_STATUS_CODE:
response = r.json()
error_code = response['errorCode']
if (error_code == VOLUME_ALREADY_MAPPED_ERROR):
LOG.warning(_LW("Ignoring error mapping volume %s: "
"volume already mapped."), volname)
else:
msg = (_("Error mapping volume %(vol)s: %(err)s.")
% {'vol': volname,
'err': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
formated_id = volume_id
return self.find_volume_path(formated_id)
def _sio_detach_volume(self, volume, sdc_ip):
LOG.info(_LI("ScaleIO detach volume in scaleio cinder driver."))
volname = self.id_to_base64(volume.id)
cmd = ['drv_cfg']
cmd += ["--query_guid"]
LOG.info(_LI("ScaleIO sdc query guid command: %s."), cmd)
try:
(out, err) = utils.execute(*cmd, run_as_root=True)
LOG.debug("Unmap volume %(cmd)s: stdout=%(out)s stderr=%(err)s",
{'cmd': cmd, 'out': out, 'err': err})
except processutils.ProcessExecutionError as e:
msg = _("Error querying sdc guid: %s.") % six.text_type(e.stderr)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
guid = out
LOG.info(_LI("Current sdc guid: %s."), guid)
params = {'guid': guid}
volume_id = self._get_volume_id(volname)
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'vol_id': volume_id}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(vol_id)s"
"/action/removeMappedSdc") % req_vars
LOG.info(_LI("Unmap volume request: %s."), request)
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request, False, params)
if r.status_code != OK_STATUS_CODE:
response = r.json()
error_code = response['errorCode']
if error_code == VOLUME_NOT_MAPPED_ERROR:
LOG.warning(_LW("Ignoring error unmapping volume %s: "
"volume not mapped."), volname)
else:
msg = (_("Error unmapping volume %(vol)s: %(err)s.")
% {'vol': volname,
'err': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
LOG.info(_LI(
"ScaleIO copy_image_to_volume volume: %(vol)s image service: "
"%(service)s image id: %(id)s."),
{'vol': volume,
'service': six.text_type(image_service),
'id': six.text_type(image_id)})
properties = utils.brick_get_connector_properties()
sdc_ip = properties['ip']
LOG.debug("SDC ip is: %s", sdc_ip)
try:
image_utils.fetch_to_raw(context,
image_service,
image_id,
self._sio_attach_volume(volume, sdc_ip),
BLOCK_SIZE,
size=volume['size'])
finally:
self._sio_detach_volume(volume, sdc_ip)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
LOG.info(_LI(
"ScaleIO copy_volume_to_image volume: %(vol)s image service: "
"%(service)s image meta: %(meta)s."),
{'vol': volume,
'service': six.text_type(image_service),
'meta': six.text_type(image_meta)})
properties = utils.brick_get_connector_properties()
sdc_ip = properties['ip']
LOG.debug("SDC ip is: {0}".format(sdc_ip))
try:
image_utils.upload_volume(context,
image_service,
image_meta,
self._sio_attach_volume(volume, sdc_ip))
finally:
self._sio_detach_volume(volume, sdc_ip)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
|
|
import util
from PicartoClientAPI import PublicApi, rest, logger, OnlineDetails, ApiClient, Category, Configuration, ChannelDetails
from pagination import PaginationStore
configuration = Configuration()
public_api = PublicApi()
api_client = ApiClient()
pages = PaginationStore()
rest.HTTP = HTTP
logger.Log = Log
THUMB_BASE = "https://thumb-us1.picarto.tv/thumbnail/%s.jpg"
TITLE = 'Picarto'
PREFIX = '/video/picarto'
ART = 'art-default.jpg'
ICON = 'icon-default.jpg'
WebsiteURL = 'https://picarto.tv/'
http = 'http:'
WebsitePopOutURL = 'https://picarto.tv/streampopout/{0}/public'
RE_LIST_ID = Regex('listId: "(.+?)", pagesConfig: ')
RE_CONTENT_ID = Regex('CONTENT_ID = "(.+?)";')
P_API = "https://api.picarto.tv/v1"
CATEGORY_THUMB = "https://picarto.tv/images/explore/communitys/%s.jpg"
STREAM_BASE = "https://1-edge5-us-east.picarto.tv/mp4/%s.mp4"
def Start():
configuration.username = Prefs['username']
configuration.password = Prefs['password']
util.Lang = L
@handler(
PREFIX,
L(TITLE),
ICON,
ART
)
def MainMenu():
Log.Debug("MainMenu")
oc = ObjectContainer(
title2=L(TITLE),
no_cache=True,
content=ContainerContent.Genres
)
oc.add(DirectoryObject(
key=Callback(OnlineSubMenu, title=L('Online')),
title=u'%s' % L('Online'),
))
Log.Debug("SubMenu: online")
oc.add(DirectoryObject(
key=Callback(CategoriesSubMenu, title=L('Categories')),
title=u'%s' % L('Categories'),
))
Log.Debug("SubMenu: categories")
oc.add(DirectoryObject(
key=Callback(EventsSubMenu, title=L('Events')),
title=u'%s' % L('Events'),
))
Log.Debug("SubMenu: events")
# oc.add(InputDirectoryObject(
# key=Callback(
# Search
# ),
# title=u'%s' % L('Search'), prompt=u'%s' % L('Search user'),
# ))
return oc
def buildSummary(details):
""" :type details OnlineDetails"""
Log.Debug("buildSummary " + "")
return "\r\n".join('%s' % x for x in details.languages)
@route(PREFIX + '/online')
def OnlineSubMenu(title, page=1, categories=None, **kwargs):
try:
Log.Debug("OnlineSubMenu " + str(page) + ", " + str(categories))
page = int(page)
if not pages.online_pages:
buffer_list = list()
online_channels_list = public_api.online_get(adult=Prefs['filter_adult'],
gaming=Prefs['filter_gaming'],
categories=categories) # type: list
for channel_dict in online_channels_list:
# Log.Debug(channel_dict)
details = api_client.deserialize_model(channel_dict, OnlineDetails) # type: OnlineDetails
# Log.Debug(details)
buffer_list.append(details)
pages.online_pages = list(PaginationStore.grouper(int(Prefs['elements_per_side']), buffer_list))
online_page_length = len(pages.online_pages)
page = abs(page) % online_page_length
oc = ObjectContainer(
title2=u'%s' % title + " (" + str(page) + "/" + str(online_page_length) + ")",
art=None,
content=ContainerContent.Albums
)
# Log.Debug(str(page) + ", " + str(pages.online_pages[page]))
for details in pages.online_pages[page]:
if details:
oc.add(VideoClipObject(url=StreamURLForName(details.name),
title=details.name + " - " + details.title,
thumb=details.thumbnails.mobile,
summary=buildSummary(details),
# rating=, <-stars rating
content_rating=L("adult") if details.adult else L("everyone"),
# duration=float('inf'),# needs int
# year=current,
# genres=details.category,
# writers=writers_a,
# directors=directors_a,
# roles=roles_a,
studio=details.name
))
if page < online_page_length:
page += 1
oc.add(NextPageObject(key=Callback(OnlineSubMenu, title=L("back"), page=page, categories=categories)))
return oc
except Exception as e:
Log.Exception("OnlineSubMenu had an exception")
return ContentNotFound()
@route(PREFIX + '/categories')
def Categories(name):
return OnlineSubMenu(name, 1, name)
def buildCategorySummary(details):
return "online: " + str(details.online_channels) + \
" total: " + str(details.total_channels) + \
" viewers: " + str(details.viewers)
def buildCategoryThumb(name):
return CATEGORY_THUMB % name.lower().replace(" ", "").replace("&", "")
@route(PREFIX + '/categories_list')
def CategoriesSubMenu(title, **kwargs):
Log.Debug("CategoriesSubMenu")
oc = ObjectContainer(
title2=u'%s' % title,
art=None,
content=ContainerContent.Albums
)
try:
categories_list = public_api.categories_get() # type: list
for categories_dict in categories_list:
# Log.Debug(channel_dict)
details = api_client.deserialize_model(categories_dict, Category) # type: Category
# Log.Debug(details)
oc.add(DirectoryObject(key=Callback(Categories, name=details.name),
title=details.name,
summary=buildCategorySummary(details),
tagline=L("adult") if details.adult else L("everyone"),
thumb=buildCategoryThumb(details.name)
))
return oc
except Exception as e:
Log.Exception("OnlineSubMenu had an exception")
return ContentNotFound()
@route(PREFIX + '/events')
def EventsSubMenu(title, **kwargs):
Log.Debug("EventsSubMenu")
return ContentNotFound()
@route(PREFIX + '/artist')
def showArtist(name, **kwargs):
Log.Debug("showArtist " + name + ", ".join('%s=%r' % x for x in kwargs.iteritems()))
oc = ObjectContainer(title2=u'%s' % name)
try:
oc.add(URLService.MediaObjectsForURL(
"https://1-edge1-eu-west.picarto.tv/mp4/LiLaiRa.mp4?token=public&ticket=0&con=1516735777815&type=0&scope=0"))
# oc.add(URLService.MediaObjectsForURL(StreamURLForName(name)))
except Exception as e:
Log.Exception("showArtist had an exception")
return ContentNotFound()
return oc
def StreamURLForName(name):
# Log.Debug("StreamURLForName " + STREAM_BASE % name)
return STREAM_BASE % name
# the following is not working without javascript
# Log.Debug("StreamURLForName "+name)
# search_page = HTML.ElementFromURL("https://picarto.tv/" + name)
# Log.Debug(search_page)
# items = search_page.xpath("//*[@id='picarto-player-1_html5_api']")
# Log.Debug(items)
# for item in items:
# Log.Debug(item)
# return item.get("src")
# return ""
def ContentNotFound():
return MessageContainer(
L('Error'),
L('No entries found')
)
|
|
"""
Test Webdriver steps.
"""
from aloe.testing import FeatureTest
from aloe_webdriver.tests.base import feature, PAGES
class TestUtil(FeatureTest):
"""Test steps."""
@feature()
def test_I_should_see(self):
"""
Feature: I should see, I should not see
Scenario: Everything fires up
When I visit "{page}"
Then I should see "Hello there!"
And I should see a link to "Google" with the url "http://google.com/"
And I should see a link with the url "http://google.com/"
And I should not see "Bogeyman"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_I_see_a_link(self):
"""
Feature: I should see a link
Scenario: Everything fires up
When I go to "{page}"
Then I should see a link to "Google" with the url "http://google.com/"
And I see "Hello there!"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_see_a_link_containing(self):
"""
Feature: I should see a link containing
Scenario: Everything fires up
When I go to "{page}"
Then The browser's URL should contain "file://"
And I should see a link that contains the text "Goo" and the url "http://google.com/"
""" # noqa
return dict(page=PAGES['basic_page'])
@feature()
def test_basic_page_linking(self):
"""
Feature: Basic page linking
Scenario: Follow links
Given I go to "{link_page}"
And I see "Page o link"
When I click "Next Page"
Then I should be at "{link_dest_page}"
And The browser's URL should be "{link_dest_page}"
And The browser's URL should not contain "http://"
"""
return {
'link_page': PAGES['link_page'],
'link_dest_page': PAGES['link_dest']
}
@feature()
def test_I_see_a_form(self):
"""
Feature: I should see a form
Scenario: Everything fires up
When I go to "{page}"
Then I should see a form that goes to "basic_page.html"
And the element with id of "somediv" contains "Hello"
And the element with id of "somediv" does not contain "bye"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_I_fill_in_a_form(self):
"""
Feature: I fill in a form
Scenario: Everything fires up
Given I go to "{page}"
And I fill in "bio" with "everything awesome"
And I fill in "Password: " with "neat"
When I press "Submit!"
Then The browser's URL should contain "bio=everything"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_checkboxes_checked(self):
"""
Feature: Checkboxes checked
Scenario: Everything fires up
Given I go to "{page}"
When I check "I have a bike"
Then The "I have a bike" checkbox should be checked
And The "I have a car" checkbox should not be checked
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_checkboxes_unchecked(self):
"""
Feature: Checkboxes unchecked
Scenario: Everything fires up
Given I go to "{page}"
And I check "I have a bike"
And The "I have a bike" checkbox should be checked
When I uncheck "I have a bike"
Then The "I have a bike" checkbox should not be checked
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_combo_boxes(self):
"""
Feature: Combo boxes
Scenario: Everything fires up
Given I go to "{page}"
Then I should see option "Mercedes" in selector "car_choice"
And I should see option "Volvo" in selector "car_choice"
And I should not see option "Skoda" in selector "car_choice"
When I select "Mercedes" from "car_choice"
Then The "Mercedes" option from "car_choice" should be selected
"""
return dict(page=PAGES['basic_page'])
@feature(fails=True)
def test_combo_boxes_fail(self):
"""
Feature: Combo boxes fail
Scenario: Everything fires up
Given I go to "{page}"
Then I should not see option "Mercedes" in selector "car_choice"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_multi_combo_boxes(self):
'''
Feature: Multi-combo-boxes
Scenario: Everything fires up
Given I go to "{page}"
When I select the following from "Favorite Colors:":
"""
Blue
Green
"""
Then The following options from "Favorite Colors:" should be selected:
"""
Blue
Green
"""
'''
return dict(page=PAGES['basic_page'])
@feature()
def test_radio_buttons(self):
"""
Feature: Radio buttons
Scenario: Everything fires up
When I go to "{page}"
And I choose "Male"
Then The "Male" option should be chosen
And The "Female" option should not be chosen
"""
return dict(page=PAGES['basic_page'])
@feature(fails=True)
def test_hidden_text(self):
"""
Feature: Hidden text
Scenario: Everything fires up
When I go to "{page}"
Then I should see an element with id of "bio_field"
And I should see an element with id of "somediv" within 2 seconds
And I should not see an element with id of "hidden_text"
And I should see "Weeeee" within 1 second
"""
return dict(page=PAGES['basic_page'])
@feature(fails=True)
def test_hidden_text_2(self):
"""
Feature: Hidden text 2
Scenario: Everything fires up
When I go to "{page}"
Then I should see "Hello there" within 1 second
And I should see an element with id of "oops_field" within 1 second
And I should not see an element with id of "hidden_text"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_alert_accept(self):
"""
Feature: test alert accept
Scenario: alerts
When I go to "{page}"
Then I should see an alert with text "This is an alerting alert"
When I accept the alert
Then I should not see an alert
And I should see "true"
"""
return dict(page=PAGES['alert_page'])
@feature()
def test_alert_dismiss(self):
"""
Feature: test alert accept
Scenario: alerts
When I go to "{page}"
Then I should see an alert with text "This is an alerting alert"
When I dismiss the alert
Then I should not see an alert
And I should see "false"
"""
return dict(page=PAGES['alert_page'])
@feature()
def test_tooltips(self):
"""
Feature: test tooltips
Scenario: tooltips
When I go to "{page}"
Then I should see an element with tooltip "A tooltip"
And I should not see an element with tooltip "Does not exist"
And I should not see an element with tooltip "Hidden"
When I click the element with tooltip "A tooltip"
Then the browser's URL should contain "#anchor"
"""
return dict(page=PAGES['tooltips'])
@feature()
def test_labels(self):
"""
Feature: test labels
Scenario: basic page
When I go to "{page}"
And I click on label "Favorite Colors:"
Then element with id "fav_colors" should be focused
And element with id "bio_field" should not be focused
"""
return dict(page=PAGES['basic_page'])
@feature(fails=True)
def test_labels_fail(self):
"""
Feature: test labels fail
Scenario: basic page
When I go to "{page}"
And I click on label "Favorite Colors:"
Then element with id "fav_colors" should not be focused
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_input_values(self):
"""
Feature: assert value
Scenario: basic page
When I go to "{page}"
And I fill in "username" with "Danni"
Then input "username" has value "Danni"
"""
return dict(page=PAGES['basic_page'])
@feature(fails=True)
def test_input_values_fail(self):
"""
Feature: assert value
Scenario: basic page
When I go to "{page}"
And I fill in "username" with "Danni"
Then input "username" has value "Ricky"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_date_input(self):
"""
Feature: assert value
Scenario: basic page
When I go to "{page}"
And I fill in "dob" with "1900/01/01"
Then input "dob" has value "1900/01/01"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_page_title(self):
"""
Feature: assert value
Scenario: basic page
When I go to "{page}"
Then the page title should be "A Basic Page"
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_submit_only(self):
"""
Feature: submit only form
Scenario: basic page
When I go to "{page}"
And I submit the only form
Then the browser's URL should contain "bio="
And the browser's URL should contain "user="
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_submit_action(self):
"""
Feature: submit only form
Scenario: basic page
When I go to "{page}"
And I submit the form with action "basic_page.html"
Then the browser's URL should contain "bio="
And the browser's URL should contain "user="
"""
return dict(page=PAGES['basic_page'])
@feature()
def test_submit_id(self):
"""
Feature: submit only form
Scenario: basic page
When I go to "{page}"
And I submit the form with id "the-form"
Then the browser's URL should contain "bio="
And the browser's URL should contain "user="
"""
return dict(page=PAGES['basic_page'])
|
|
# Test export module
import sys
sys.path.insert(0, '..')
import copy
import os
import shutil
import numpy as np
import flopy
pth = os.path.join('..', 'examples', 'data', 'mf2005_test')
namfiles = [namfile for namfile in os.listdir(pth) if namfile.endswith('.nam')]
# skip = ["MNW2-Fig28.nam", "testsfr2.nam", "testsfr2_tab.nam"]
skip = []
tpth = os.path.join('temp', 't007')
# make the directory if it does not exist
if not os.path.isdir(tpth):
os.makedirs(tpth)
npth = os.path.join('temp', 't007', 'netcdf')
# delete the directory if it exists
if os.path.isdir(npth):
shutil.rmtree(npth)
# make the directory
os.makedirs(npth)
spth = os.path.join('temp', 't007', 'shapefile')
# make the directory if it does not exist
if not os.path.isdir(spth):
os.makedirs(spth)
def export_netcdf(namfile):
if namfile in skip:
return
print(namfile)
m = flopy.modflow.Modflow.load(namfile, model_ws=pth, verbose=False)
if m.sr.lenuni == 0:
m.sr.lenuni = 1
# print('skipping...lenuni==0 (undefined)')
# return
# if sum(m.dis.laycbd) != 0:
if m.dis.botm.shape[0] != m.nlay:
print('skipping...botm.shape[0] != nlay')
return
assert m, 'Could not load namefile {}'.format(namfile)
assert isinstance(m, flopy.modflow.Modflow)
# Do not fail if netCDF4 not installed
try:
import netCDF4
import pyproj
except:
return
fnc = m.export(os.path.join(npth, m.name + '.nc'))
fnc.write()
fnc_name = os.path.join(npth, m.name + '.nc')
try:
fnc = m.export(fnc_name)
fnc.write()
except Exception as e:
raise Exception(
'ncdf export fail for namfile {0}:\n{1} '.format(namfile, str(e)))
try:
nc = netCDF4.Dataset(fnc_name, 'r')
except Exception as e:
raise Exception('ncdf import fail for nc file {0}'.format(fnc_name))
return
def export_shapefile(namfile):
try:
import shapefile as shp
except:
return
print(namfile)
m = flopy.modflow.Modflow.load(namfile, model_ws=pth, verbose=False)
assert m, 'Could not load namefile {}'.format(namfile)
assert isinstance(m, flopy.modflow.Modflow)
fnc_name = os.path.join(spth, m.name + '.shp')
try:
fnc = m.export(fnc_name)
#fnc2 = m.export(fnc_name, package_names=None)
#fnc3 = m.export(fnc_name, package_names=['DIS'])
except Exception as e:
raise Exception(
'shapefile export fail for namfile {0}:\n{1} '.format(namfile,
str(e)))
try:
s = shp.Reader(fnc_name)
except Exception as e:
raise Exception(
' shapefile import fail for {0}:{1}'.format(fnc_name, str(e)))
assert s.numRecords == m.nrow * m.ncol, "wrong number of records in " + \
"shapefile {0}:{1:d}".format(
fnc_name, s.numRecords)
return
def test_freyberg_export():
namfile = 'freyberg.nam'
model_ws = '../examples/data/freyberg_multilayer_transient/'
m = flopy.modflow.Modflow.load(namfile, model_ws=model_ws, verbose=False,
load_only=['DIS', 'BAS6', 'NWT', 'OC',
'RCH',
'WEL',
'DRN',
'UPW'])
m.drn.stress_period_data.export(os.path.join(spth, namfile[:-4]+'.shp'), sparse=True)
def test_export_output():
import os
import numpy as np
import flopy
# Do not fail if netCDF4 not installed
try:
import netCDF4
import pyproj
except:
return
model_ws = os.path.join("..", "examples", "data", "freyberg")
ml = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws)
hds_pth = os.path.join(model_ws, "freyberg.githds")
hds = flopy.utils.HeadFile(hds_pth)
out_pth = os.path.join(npth, "freyberg.out.nc")
nc = flopy.export.utils.output_helper(out_pth, ml,
{"freyberg.githds": hds})
var = nc.nc.variables.get("head")
arr = var[:]
ibound_mask = ml.bas6.ibound.array == 0
arr_mask = arr.mask[0]
assert np.array_equal(ibound_mask, arr_mask)
def test_mbase_sr():
import numpy as np
import flopy
ml = flopy.modflow.Modflow(modelname="test", xul=1000.0,
rotation=12.5, start_datetime="1/1/2016")
try:
print(ml.sr.xcentergrid)
except:
pass
else:
raise Exception("should have failed")
dis = flopy.modflow.ModflowDis(ml, nrow=10, ncol=5, delr=np.arange(5),
xul=500)
print(ml.sr)
assert ml.sr.xul == 500
assert ml.sr.yll == -10
ml.model_ws = tpth
ml.write_input()
ml1 = flopy.modflow.Modflow.load("test.nam", model_ws=ml.model_ws)
assert ml1.sr == ml.sr
assert ml1.start_datetime == ml.start_datetime
def test_free_format_flag():
import flopy
Lx = 100.
Ly = 100.
nlay = 1
nrow = 51
ncol = 51
delr = Lx / ncol
delc = Ly / nrow
top = 0
botm = [-1]
ms = flopy.modflow.Modflow(rotation=20.)
dis = flopy.modflow.ModflowDis(ms, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr,
delc=delc, top=top, botm=botm)
bas = flopy.modflow.ModflowBas(ms, ifrefm=True)
assert ms.free_format_input == bas.ifrefm
ms.free_format_input = False
assert ms.free_format_input == bas.ifrefm
ms.free_format_input = True
bas.ifrefm = False
assert ms.free_format_input == bas.ifrefm
bas.ifrefm = True
assert ms.free_format_input == bas.ifrefm
ms.model_ws = tpth
ms.write_input()
ms1 = flopy.modflow.Modflow.load(ms.namefile, model_ws=ms.model_ws)
assert ms1.free_format_input == ms.free_format_input
assert ms1.free_format_input == ms1.bas6.ifrefm
ms1.free_format_input = False
assert ms1.free_format_input == ms1.bas6.ifrefm
bas.ifrefm = False
assert ms1.free_format_input == ms1.bas6.ifrefm
bas.ifrefm = True
assert ms1.free_format_input == ms1.bas6.ifrefm
def test_sr():
import flopy
Lx = 100.
Ly = 100.
nlay = 1
nrow = 51
ncol = 51
delr = Lx / ncol
delc = Ly / nrow
top = 0
botm = [-1]
ms = flopy.modflow.Modflow(rotation=20.)
dis = flopy.modflow.ModflowDis(ms, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc, top=top, botm=botm)
bas = flopy.modflow.ModflowBas(ms, ifrefm=True)
# test instantiation of an empty sr object
sr = flopy.utils.reference.SpatialReference()
# test instantiation of SR with xul, yul and no grid
sr = flopy.utils.reference.SpatialReference(xul=1, yul=1)
xul, yul = 321., 123.
sr = flopy.utils.SpatialReference(delr=ms.dis.delr.array,
delc=ms.dis.delc.array, lenuni=3,
xul=xul, yul=yul, rotation=20)
#txt = 'yul does not approximately equal 100 - ' + \
# '(xul, yul) = ({}, {})'.format( ms.sr.yul, ms.sr.yul)
assert abs(ms.sr.yul - Ly) < 1e-3#, txt
ms.sr.xul = 111
assert ms.sr.xul == 111
# test that transform for arbitrary coordinates
# is working in same as transform for model grid
x, y = ms.sr.xcenter, ms.sr.ycenter[0]
xt, yt = sr.transform(x, y)
assert np.sum(xt - sr.xcentergrid[0]) < 1e-3
x, y = ms.sr.xcenter[0], ms.sr.ycenter
xt, yt = sr.transform(x, y)
assert np.sum(yt - sr.ycentergrid[:, 0]) < 1e-3
# test inverse transform
x0, y0 = 9.99, 2.49
x1, y1 = sr.transform(x0, y0)
x2, y2 = sr.transform(x1, y1, inverse=True)
assert np.abs(x2-x0) < 1e-6
assert np.abs(y2-y0) < 1e6
# test input using ul vs ll
xll, yll = sr.xll, sr.yll
sr2 = flopy.utils.SpatialReference(delr=ms.dis.delr.array,
delc=ms.dis.delc.array, lenuni=3,
xll=xll, yll=yll, rotation=20)
assert sr2.xul == sr.xul
assert sr2.yul == sr.yul
assert np.array_equal(sr.xcentergrid, sr2.xcentergrid)
assert np.array_equal(sr.ycentergrid, sr2.ycentergrid)
ms.sr.lenuni = 1
assert ms.sr.lenuni == 1
ms.sr.units = "feet"
assert ms.sr.units == "feet"
ms.sr = sr
assert ms.sr == sr
assert ms.sr.lenuni != ms.dis.lenuni
try:
ms.sr.units = "junk"
except:
pass
else:
raise Exception("should have failed")
ms.start_datetime = "1-1-2016"
assert ms.start_datetime == "1-1-2016"
assert ms.dis.start_datetime == "1-1-2016"
ms.model_ws = tpth
ms.write_input()
ms1 = flopy.modflow.Modflow.load(ms.namefile, model_ws=ms.model_ws)
assert ms1.sr == ms.sr
assert ms1.dis.sr == ms.dis.sr
assert ms1.start_datetime == ms.start_datetime
assert ms1.sr.units == ms.sr.units
assert ms1.dis.lenuni == ms1.sr.lenuni
#assert ms1.sr.lenuni != sr.lenuni
ms1.sr = sr
assert ms1.sr == ms.sr
def test_sr_scaling():
nlay, nrow, ncol = 1, 10, 5
delr, delc = 250, 500
xll, yll = 286.80, 29.03
print(np.__version__)
# test scaling of length units
ms2 = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr,
delc=delc)
ms2.sr = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,
delc=ms2.dis.delc.array, lenuni=3,
xll=xll, yll=yll, rotation=0)
ms2.sr.epsg = 26715
ms2.dis.export(os.path.join(spth, 'dis2.shp'))
ms3 = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(ms3, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr,
delc=delc)
ms3.sr = flopy.utils.SpatialReference(delr=ms3.dis.delr.array,
delc=ms2.dis.delc.array, lenuni=2,
length_multiplier=2.,
xll=xll, yll=yll, rotation=0)
ms3.dis.export(os.path.join(spth, 'dis3.shp'), epsg=26715)
# check that the origin(s) are maintained
assert np.array_equal(ms3.sr.get_vertices(nrow - 1, 0)[1],
[ms3.sr.xll, ms3.sr.yll])
assert np.allclose(ms3.sr.get_vertices(nrow - 1, 0)[1],
ms2.sr.get_vertices(nrow - 1, 0)[1])
# check that the upper left corner is computed correctly
# in this case, length_multiplier overrides the given units
def check_size(sr):
xur, yur = sr.get_vertices(0, ncol - 1)[3]
assert np.abs(xur - (xll + sr.length_multiplier * delr * ncol)) < 1e-4
assert np.abs(yur - (yll + sr.length_multiplier * delc * nrow)) < 1e-4
check_size(ms3.sr)
# run the same tests but with units specified instead of a length multiplier
ms2 = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
lenuni=1 # feet; should have no effect on SR
# (model not supplied to SR)
)
ms2.sr = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,
delc=ms2.dis.delc.array,
lenuni=2, # meters
epsg=26715, # meters, listed on spatialreference.org
xll=xll, yll=yll, rotation=0)
assert ms2.sr.model_length_units == 'meters'
assert ms2.sr.length_multiplier == 1.
ms2.sr.lenuni = 1 # feet; test dynamic setting
assert ms2.sr.model_length_units == 'feet'
check_size(ms2.sr)
assert ms2.sr.length_multiplier == .3048
ms2.sr.lenuni = 3 # centimeters
assert ms2.sr.model_length_units == 'centimeters'
check_size(ms2.sr)
assert ms2.sr.length_multiplier == 0.01
ms2.sr.lenuni = 2 # meters
check_size(ms2.sr)
ms2.sr.units = 'meters'
ms2.sr.proj4_str = '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'
assert ms2.sr.proj4_str == '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'
assert ms2.sr.units == 'feet'
assert ms2.sr.length_multiplier == 1/.3048
check_size(ms2.sr)
ms2.sr.epsg = 6610 # meters, not listed on spatialreference.org but understood by pyproj
assert ms2.sr.units == 'meters'
assert ms2.sr.proj4_str is not None
check_size(ms2.sr)
def test_dynamic_xll_yll():
nlay, nrow, ncol = 1, 10, 5
delr, delc = 250, 500
xll, yll = 286.80, 29.03
# test scaling of length units
ms2 = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr,
delc=delc)
sr1 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,
delc=ms2.dis.delc.array, lenuni=2,
xll=xll, yll=yll, rotation=30)
xul, yul = sr1.xul, sr1.yul
sr1.length_multiplier = 1.0 / 3.281
assert sr1.xll == xll
assert sr1.yll == yll
sr2 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,
delc=ms2.dis.delc.array, lenuni=2,
xul=xul, yul=yul, rotation=30)
sr2.length_multiplier = 1.0 / 3.281
assert sr2.xul == xul
assert sr2.yul == yul
# test resetting of attributes
sr3 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,
delc=ms2.dis.delc.array, lenuni=2,
xll=xll, yll=yll, rotation=30)
# check that xul, yul and xll, yll are being recomputed
sr3.xll += 10.
sr3.yll += 21.
assert np.abs(sr3.xul - (xul + 10.)) < 1e-6
assert np.abs(sr3.yul - (yul + 21.)) < 1e-6
sr4 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,
delc=ms2.dis.delc.array, lenuni=2,
xul=xul, yul=yul, rotation=30)
assert sr4.origin_loc == 'ul'
sr4.xul += 10.
sr4.yul += 21.
assert np.abs(sr4.xll - (xll + 10.)) < 1e-6
assert np.abs(sr4.yll - (yll + 21.)) < 1e-6
sr4.rotation = 0.
assert np.abs(sr4.xul - (xul + 10.)) < 1e-6 # these shouldn't move because ul has priority
assert np.abs(sr4.yul - (yul + 21.)) < 1e-6
assert np.abs(sr4.xll - sr4.xul) < 1e-6
assert np.abs(sr4.yll - (sr4.yul - sr4.yedge[0])) < 1e-6
sr4.xll = 0.
sr4.yll = 10.
assert sr4.origin_loc == 'll'
assert sr4.xul == 0.
assert sr4.yul == sr4.yedge[0] + 10.
sr4.xul = xul
sr4.yul = yul
assert sr4.origin_loc == 'ul'
sr4.rotation = 30.
assert np.abs(sr4.xll - xll) < 1e-6
assert np.abs(sr4.yll - yll) < 1e-6
sr5 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,
delc=ms2.dis.delc.array, lenuni=2,
xll=xll, yll=yll,
rotation=0, epsg=26915)
sr5.lenuni = 1
assert sr5.length_multiplier == .3048
assert sr5.yul == sr5.yll + sr5.yedge[0] * sr5.length_multiplier
sr5.lenuni = 2
assert sr5.length_multiplier == 1.
assert sr5.yul == sr5.yll + sr5.yedge[0]
sr5.proj4_str = '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'
assert sr5.units == 'feet'
assert sr5.length_multiplier == 1/.3048
def test_namfile_readwrite():
nlay, nrow, ncol = 1, 30, 5
delr, delc = 250, 500
xll, yll = 272300, 5086000
fm = flopy.modflow
m = fm.Modflow(modelname='junk', model_ws=os.path.join('temp', 't007'))
dis = fm.ModflowDis(m, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr,
delc=delc)
m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,
delc=m.dis.delc.array, lenuni=3,
length_multiplier=.3048,
xll=xll, yll=yll, rotation=30)
# test reading and writing of SR information to namfile
m.write_input()
m2 = fm.Modflow.load('junk.nam', model_ws=os.path.join('temp', 't007'))
assert abs(m2.sr.xll - xll) < 1e-2
assert abs(m2.sr.yll - yll) < 1e-2
assert m2.sr.rotation == 30
assert abs(m2.sr.length_multiplier - .3048) < 1e-10
model_ws = os.path.join("..", "examples", "data", "freyberg_multilayer_transient")
ml = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, verbose=False,
check=False, exe_name="mfnwt")
assert ml.sr.xul == 619653
assert ml.sr.yul == 3353277
assert ml.sr.rotation == 15.
def test_read_usgs_model_reference():
nlay, nrow, ncol = 1, 30, 5
delr, delc = 250, 500
#xll, yll = 272300, 5086000
model_ws = os.path.join('temp', 't007')
shutil.copy('../examples/data/usgs.model.reference', model_ws)
fm = flopy.modflow
m = fm.Modflow(modelname='junk', model_ws=model_ws)
# feet and days
dis = fm.ModflowDis(m, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr,
delc=delc, lenuni=1, itmuni=4)
m.write_input()
# test reading of SR information from usgs.model.reference
m2 = fm.Modflow.load('junk.nam', model_ws=os.path.join('temp', 't007'))
from flopy.utils.reference import SpatialReference
d = SpatialReference.read_usgs_model_reference_file(os.path.join('temp', 't007', 'usgs.model.reference'))
assert m2.sr.xul == d['xul']
assert m2.sr.yul == d['yul']
assert m2.sr.rotation == d['rotation']
assert m2.sr.lenuni == d['lenuni']
assert m2.sr.epsg == d['epsg']
# have to delete this, otherwise it will mess up other tests
if os.path.exists(os.path.join(tpth, 'usgs.model.reference')):
os.remove(os.path.join(tpth, 'usgs.model.reference'))
def test_rotation():
m = flopy.modflow.Modflow(rotation=20.)
dis = flopy.modflow.ModflowDis(m, nlay=1, nrow=40, ncol=20,
delr=250.,
delc=250., top=10, botm=0)
xul, yul = 500000, 2934000
m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,
delc=m.dis.delc.array,
xul=xul, yul=yul, rotation=45.)
xll, yll = m.sr.xll, m.sr.yll
assert np.abs(m.dis.sr.xgrid[0, 0] - xul) < 1e-4
assert np.abs(m.dis.sr.ygrid[0, 0] - yul) < 1e-4
m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,
delc=m.dis.delc.array,
xul=xul, yul=yul, rotation=-45.)
assert m.dis.sr.xgrid[0, 0] == xul
assert m.dis.sr.ygrid[0, 0] == yul
xll2, yll2 = m.sr.xll, m.sr.yll
m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,
delc=m.dis.delc.array,
xll=xll2, yll=yll2, rotation=-45.)
assert m.dis.sr.xgrid[0, 0] == xul
assert m.dis.sr.ygrid[0, 0] == yul
m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,
delc=m.dis.delc.array,
xll=xll, yll=yll, rotation=45.)
assert m.dis.sr.xgrid[0, 0] == xul
assert m.dis.sr.ygrid[0, 0] == yul
def test_sr_with_Map():
import matplotlib.pyplot as plt
m = flopy.modflow.Modflow(rotation=20.)
dis = flopy.modflow.ModflowDis(m, nlay=1, nrow=40, ncol=20,
delr=250.,
delc=250., top=10, botm=0)
# transformation assigned by arguments
xul, yul, rotation = 500000., 2934000., 45.
modelmap = flopy.plot.ModelMap(model=m, xul=xul, yul=yul,
rotation=rotation)
lc = modelmap.plot_grid()
xll, yll = modelmap.sr.xll, modelmap.sr.yll
plt.close()
def check_vertices():
xllp, yllp = lc._paths[0].vertices[0]
xulp, yulp = lc._paths[0].vertices[1]
assert np.abs(xllp - xll) < 1e-6
assert np.abs(yllp - yll) < 1e-6
assert np.abs(xulp - xul) < 1e-6
assert np.abs(yulp - yul) < 1e-6
check_vertices()
modelmap = flopy.plot.ModelMap(model=m, xll=xll, yll=yll,
rotation=rotation)
lc = modelmap.plot_grid()
check_vertices()
plt.close()
# transformation in m.sr
sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,
delc=m.dis.delc.array,
xll=xll, yll=yll, rotation=rotation)
m.sr = copy.deepcopy(sr)
modelmap = flopy.plot.ModelMap(model=m)
lc = modelmap.plot_grid()
check_vertices()
plt.close()
# transformation assign from sr instance
m.sr._reset()
m.sr.set_spatialreference()
modelmap = flopy.plot.ModelMap(model=m, sr=sr)
lc = modelmap.plot_grid()
check_vertices()
plt.close()
# test plotting of line with specification of xul, yul in Dis/Model Map
mf = flopy.modflow.Modflow()
# Model domain and grid definition
dis = flopy.modflow.ModflowDis(mf, nlay=1, nrow=10, ncol=20, delr=1., delc=1., xul=100, yul=210)
#fig, ax = plt.subplots()
verts = [[101., 201.], [119., 209.]]
modelxsect = flopy.plot.ModelCrossSection(model=mf, line={'line': verts},
xul=mf.dis.sr.xul, yul=mf.dis.sr.yul)
linecollection = modelxsect.plot_grid()
plt.close()
def test_netcdf_classmethods():
import os
import flopy
# Do not fail if netCDF4 not installed
try:
import netCDF4
import pyproj
except:
return
nam_file = "freyberg.nam"
model_ws = os.path.join('..', 'examples', 'data',
'freyberg_multilayer_transient')
ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,
verbose=True, load_only=[])
f = ml.export(os.path.join(npth, "freyberg.nc"))
v1_set = set(f.nc.variables.keys())
fnc = os.path.join(npth, "freyberg.new.nc")
new_f = flopy.export.NetCdf.zeros_like(f, output_filename=fnc)
v2_set = set(new_f.nc.variables.keys())
diff = v1_set.symmetric_difference(v2_set)
assert len(diff) == 0, str(diff)
# def test_netcdf_overloads():
# import os
# import flopy
# nam_file = "freyberg.nam"
# model_ws = os.path.join('..', 'examples', 'data', 'freyberg_multilayer_transient')
# ml = flopy.modflow.Modflow.load(nam_file,model_ws=model_ws,check=False,
# verbose=False,load_only=[])
#
# f = ml.export(os.path.join("temp","freyberg.nc"))
# fzero = flopy.export.NetCdf.zeros_like(f)
# assert fzero.nc.variables["model_top"][:].sum() == 0
# print(f.nc.variables["model_top"][0,:])
# fplus1 = f + 1
# assert fplus1.nc.variables["model_top"][0,0] == f.nc.variables["model_top"][0,0] + 1
# assert (f + fplus1).nc.variables["model_top"][0,0] ==\
# f.nc.variables["model_top"][0,0] + \
# fplus1.nc.variables["model_top"][0,0]
#
# fminus1 = f - 1
# assert fminus1.nc.variables["model_top"][0,0] == f.nc.variables["model_top"][0,0] - 1
# assert (f - fminus1).nc.variables["model_top"][0,0]==\
# f.nc.variables["model_top"][0,0] - \
# fminus1.nc.variables["model_top"][0,0]
#
# ftimes2 = f * 2
# assert ftimes2.nc.variables["model_top"][0,0] == f.nc.variables["model_top"][0,0] * 2
# assert (f * ftimes2).nc.variables["model_top"][0,0] ==\
# f.nc.variables["model_top"][0,0] * \
# ftimes2.nc.variables["model_top"][0,0]
#
# fdiv2 = f / 2
# assert fdiv2.nc.variables["model_top"][0,0] == f.nc.variables["model_top"][0,0] / 2
# assert (f / fdiv2).nc.variables["model_top"][0,0] == \
# f.nc.variables["model_top"][0,0] / \
# fdiv2.nc.variables["model_top"][0,0]
#
# assert f.nc.variables["ibound"][0,0,0] == 1
def test_shapefile_ibound():
import os
import flopy
try:
import shapefile
except:
return
shape_name = os.path.join(spth, "test.shp")
nam_file = "freyberg.nam"
model_ws = os.path.join('..', 'examples', 'data',
'freyberg_multilayer_transient')
ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,
verbose=True, load_only=[])
ml.export(shape_name)
shp = shapefile.Reader(shape_name)
field_names = [item[0] for item in shp.fields][1:]
ib_idx = field_names.index("ibound_001")
txt = "should be int instead of {0}".format(type(shp.record(0)[ib_idx]))
assert type(shp.record(0)[ib_idx]) == int, txt
def test_shapefile():
for namfile in namfiles:
yield export_shapefile, namfile
return
def test_netcdf():
for namfile in namfiles:
yield export_netcdf, namfile
return
def build_netcdf():
for namfile in namfiles:
export_netcdf(namfile)
return
def build_sfr_netcdf():
namfile = 'testsfr2.nam'
export_netcdf(namfile)
return
if __name__ == '__main__':
#test_shapefile()
# test_shapefile_ibound()
# test_netcdf_overloads()
#test_netcdf_classmethods()
# build_netcdf()
# build_sfr_netcdf()
#test_sr()
#test_mbase_sr()
#test_rotation()
test_sr_with_Map()
#test_sr_scaling()
#test_read_usgs_model_reference()
#test_dynamic_xll_yll()
#test_namfile_readwrite()
# test_free_format_flag()
# test_export_output()
#for namfile in namfiles:
# for namfile in ["fhb.nam"]:
# export_netcdf(namfile)
#test_freyberg_export()
pass
|
|
import os
import six
import pytest
from doit import version
from doit.cmdparse import CmdParseError, CmdParse
from doit.exceptions import InvalidCommand, InvalidDodoFile
from doit.dependency import FileChangedChecker
from doit.task import Task
from doit.cmd_base import version_tuple, Command, DoitCmdBase
from doit.cmd_base import ModuleTaskLoader, DodoTaskLoader
from doit.cmd_base import check_tasks_exist, tasks_and_deps_iter, subtasks_iter
def test_version_tuple():
assert [1,2,3] == version_tuple([1,2,3])
assert [1,2,3] == version_tuple('1.2.3')
assert [1,2,3] == version_tuple(six.u('1.2.3'))
assert [0,2,0] == version_tuple('0.2.0')
assert [0,2,-1] == version_tuple('0.2.dev1')
opt_bool = {'name': 'flag',
'short':'f',
'long': 'flag',
'inverse':'no-flag',
'type': bool,
'default': False,
'help': 'help for opt1'}
opt_rare = {'name': 'rare',
'long': 'rare-bool',
'type': bool,
'default': False,
'help': 'help for opt2 [default: %(default)s]'}
opt_int = {'name': 'num',
'short':'n',
'long': 'number',
'type': int,
'default': 5,
'help': 'help for opt3 [default: %(default)s]'}
opt_no = {'name': 'no',
'short':'',
'long': '',
'type': int,
'default': 5,
'help': 'user cant modify me'}
class SampleCmd(Command):
doc_purpose = 'PURPOSE'
doc_usage = 'USAGE'
doc_description = 'DESCRIPTION'
cmd_options = [opt_bool, opt_rare, opt_int, opt_no]
@staticmethod
def execute(params, args):
return params, args
class TestCommand(object):
def test_configure(self):
config = {'GLOBAL':{'foo':1, 'bar':'2'},
'whatever':{'xxx': 'yyy'},
'samplecmd': {'foo':4},
}
cmd = SampleCmd(config=config)
assert cmd.config == config
assert cmd.config_vals == {'foo':4, 'bar':'2'}
def test_call_value_cmd_line_arg(self):
cmd = SampleCmd()
params, args = cmd.parse_execute(['-n','7','ppp'])
assert ['ppp'] == args
assert 7 == params['num']
def test_call_value_option_default(self):
cmd = SampleCmd()
params, args = cmd.parse_execute([])
assert 5 == params['num']
def test_call_value_overwritten_default(self):
cmd = SampleCmd(config={'GLOBAL':{'num': 20}})
params, args = cmd.parse_execute([])
assert 20 == params['num']
def test_help(self):
cmd = SampleCmd(config={'GLOBAL':{'num': 20}})
text = cmd.help()
assert 'PURPOSE' in text
assert 'USAGE' in text
assert 'DESCRIPTION' in text
assert '-f' in text
assert '--rare-bool' in text
assert 'help for opt1' in text
assert opt_no['name'] in [o.name for o in cmd.get_options()]
# option wihtout short and long are not displayed
assert 'user cant modify me' not in text
# default value is displayed
assert "help for opt2 [default: False]" in text
# overwritten default
assert "help for opt3 [default: 20]" in text
def test_failCall(self):
cmd = SampleCmd()
pytest.raises(CmdParseError, cmd.parse_execute, ['-x','35'])
class TestModuleTaskLoader(object):
def test_load_tasks(self):
cmd = Command()
members = {'task_xxx1': lambda : {'actions':[]},
'task_no': 'strings are not tasks',
'blabla': lambda :None,
'DOIT_CONFIG': {'verbose': 2},
}
loader = ModuleTaskLoader(members)
task_list, config = loader.load_tasks(cmd, {}, [])
assert ['xxx1'] == [t.name for t in task_list]
assert {'verbose': 2} == config
class TestDodoTaskLoader(object):
def test_load_tasks(self, restore_cwd):
os.chdir(os.path.dirname(__file__))
cmd = Command()
params = {'dodoFile': 'loader_sample.py',
'cwdPath': None,
'seek_file': False,
}
loader = DodoTaskLoader()
task_list, config = loader.load_tasks(cmd, params, [])
assert ['xxx1', 'yyy2'] == [t.name for t in task_list]
assert {'verbose': 2} == config
class TestDoitCmdBase(object):
class MyCmd(DoitCmdBase):
doc_purpose = "fake for testing"
doc_usage = "[TASK ...]"
doc_description = None
opt_my = {
'name': 'my_opt',
'short':'m',
'long': 'mine',
'type': str,
'default': 'xxx',
'help': "my option"
}
cmd_options = (opt_my,)
def _execute(self, my_opt):
return my_opt
# command with lower level execute() method
def test_new_cmd(self):
class MyRawCmd(self.MyCmd):
def execute(self, params, args):
return params['my_opt']
members = {'task_xxx1': lambda : {'actions':[]},}
loader = ModuleTaskLoader(members)
mycmd = MyRawCmd(task_loader=loader, cmds={'foo':None, 'bar':None})
assert mycmd.loader.cmd_names == ['bar', 'foo']
assert 'min' == mycmd.parse_execute(['--mine', 'min'])
# loader gets a reference to config
def test_loader_config(self, depfile_name):
mycmd = self.MyCmd(config={'foo':{'bar':'x'}})
assert mycmd.loader.config['foo'] == {'bar':'x'}
# command with _execute() method
def test_execute(self, depfile_name):
members = {'task_xxx1': lambda : {'actions':[]},}
loader = ModuleTaskLoader(members)
mycmd = self.MyCmd(task_loader=loader)
assert 'min' == mycmd.parse_execute([
'--db-file', depfile_name,
'--mine', 'min'])
# command with _execute() method
def test_minversion(self, depfile_name, monkeypatch):
members = {
'task_xxx1': lambda : {'actions':[]},
'DOIT_CONFIG': {'minversion': '5.2.3'},
}
loader = ModuleTaskLoader(members)
# version ok
monkeypatch.setattr(version, 'VERSION', '7.5.8')
mycmd = self.MyCmd(task_loader=loader)
assert 'xxx' == mycmd.parse_execute(['--db-file', depfile_name])
# version too old
monkeypatch.setattr(version, 'VERSION', '5.2.1')
mycmd = self.MyCmd(task_loader=loader)
pytest.raises(InvalidDodoFile, mycmd.parse_execute, [])
def testInvalidChecker(self):
mycmd = self.MyCmd(task_loader=ModuleTaskLoader({}))
params, args = CmdParse(mycmd.get_options()).parse([])
params['check_file_uptodate'] = 'i dont exist'
pytest.raises(InvalidCommand, mycmd.execute, params, args)
def testCustomChecker(self, depfile_name):
class MyChecker(FileChangedChecker):
pass
mycmd = self.MyCmd(task_loader=ModuleTaskLoader({}))
params, args = CmdParse(mycmd.get_options()).parse([])
params['check_file_uptodate'] = MyChecker
params['dep_file'] = depfile_name
mycmd.execute(params, args)
assert isinstance(mycmd.dep_manager.checker, MyChecker)
def testPluginBackend(self, depfile_name):
mycmd = self.MyCmd(task_loader=ModuleTaskLoader({}),
config={'BACKEND': {'j2': 'doit.dependency:JsonDB'}})
params, args = CmdParse(mycmd.get_options()).parse(['--backend', 'j2'])
params['dep_file'] = depfile_name
mycmd.execute(params, args)
assert mycmd.dep_manager.db_class is mycmd._backends['j2']
def testPluginLoader(self, depfile_name):
entry_point = {'mod': 'tests.sample_plugin:MyLoader'}
mycmd = self.MyCmd(config={'GLOBAL': {'loader': 'mod'},
'LOADER': entry_point})
assert mycmd.loader.__class__.__name__ == 'MyLoader'
task_list, dodo_config = mycmd.loader.load_tasks(mycmd, {}, [])
assert task_list[0].name == 'sample_task'
assert dodo_config == {'verbosity': 2}
class TestCheckTasksExist(object):
def test_None(self):
check_tasks_exist({}, None)
# nothing is raised
def test_invalid(self):
pytest.raises(InvalidCommand, check_tasks_exist, {}, 't2')
def test_valid(self):
tasks = {
't1': Task("t1", [""] ),
't2': Task("t2", [""], task_dep=['t1']),
}
check_tasks_exist(tasks, ['t2'])
# nothing is raised
class TestTaskAndDepsIter(object):
def test_dep_iter(self):
tasks = {
't1': Task("t1", [""] ),
't2': Task("t2", [""], task_dep=['t1']),
't3': Task("t3", [""], setup=['t1']),
't4': Task("t4", [""], task_dep=['t3']),
}
def names(sel_tasks, repeated=False):
task_list = tasks_and_deps_iter(tasks, sel_tasks, repeated)
return [t.name for t in task_list]
# no deps
assert ['t1'] == names(['t1'])
# with task_dep
assert ['t2', 't1'] == names(['t2'])
# with setup
assert ['t3', 't1'] == names(['t3'])
# two levels
assert ['t4', 't3', 't1'] == names(['t4'])
# select 2
assert set(['t2', 't1']) == set(names(['t1', 't2']))
# repeat deps
got = names(['t1', 't2'], True)
assert 3 == len(got)
assert 't1' == got[-1]
class TestSubtaskIter(object):
def test_sub_iter(self):
tasks = {
't1': Task("t1", [""] ),
't2': Task("t2", [""], task_dep=['t1', 't2:a', 't2:b']),
't2:a': Task("t2:a", [""], is_subtask=True),
't2:b': Task("t2:b", [""], is_subtask=True),
}
def names(task_name):
return [t.name for t in subtasks_iter(tasks, tasks[task_name])]
assert [] == names('t1')
assert ['t2:a', 't2:b'] == names('t2')
|
|
"""Automata.py
Manipulation of and conversions between regular expressions,
deterministic finite automata, and nondeterministic finite automata.
D. Eppstein, UC Irvine, November 2003.
"""
from __future__ import generators
from Util import arbitrary_item
import sys
import operator
import unittest
from sets import Set,ImmutableSet
from PartitionRefinement import PartitionRefinement
from Sequence import Sequence
class LanguageError(Exception): pass
class RegExpError(Exception): pass
# maintain Python 2.2 compatibility
if 'True' not in globals():
globals()['True'] = not None
globals()['False'] = not True
def Language(A):
"""Convert automaton A into an object describing its language.
This is distinct from class RegularLanguage in case we
want to later add other types of automaton and nonregular languages.
"""
return A.language()
class RegularLanguage:
"""Object representing the language recognized by a DFA or NFA.
Available operations are testing whether a string is in the language,
logical combinations, and subset and equality testing.
"""
def __init__(self,arg):
if isinstance(arg,FiniteAutomaton):
self.recognizer = arg
elif isinstance(arg,(str,unicode)):
self.recognizer = RegExp(arg)
else:
raise LanguageError("Unrecognized constructor for RegularLanguage")
def __contains__(self,inputsequence):
return self.recognizer(inputsequence)
def __eq__(self,other):
if not isinstance(other,RegularLanguage):
return None
return self.recognizer.minimize() == other.recognizer.minimize()
def __ne__(self,other):
return not (self == other)
def __le__(self,other):
return not(self &~ other)
def __ge__(self,other):
return not(other &~ self)
def __lt__(self,other):
return self <= other and self != other
def __gt__(self,other):
return self >= other and self != other
def __invert__(self):
"""Complement (with respect to alphabet) of language."""
return Language(self.recognizer.complement())
def __and__(self,other):
"""Intersection of two languages with the same alphabet."""
if not isinstance(other,RegularLanguage):
raise LanguageError("Unable to intersect nonregular language")
return Language(self.recognizer.intersection(other.recognizer))
def __or__(self,other):
"""Union of two languages with the same alphabet."""
if not isinstance(other,RegularLanguage):
raise LanguageError("Unable to intersect nonregular language")
return Language(self.recognizer.union(other.recognizer))
def __xor__(self,other):
"""Symmetric difference of two languages with the same alphabet."""
if not isinstance(other,RegularLanguage):
raise LanguageError("Unable to intersect nonregular language")
return Language(self.recognizer.symmetricDifference(other.recognizer))
def __nonzero__(self):
"""Is this the empty language?"""
for x in self.recognizer.states():
if x.isfinal():
return True
return False
class FiniteAutomaton:
"""Base class for DFA and NFA. This class should not be instantiated
on its own, but dispatches methods that are appropriate to both types
of automaton by calling .asDFA() or .asNFA() to convert the automaton
to the appropriate type. All automaton instances should include the
following instance variables and methods:
- x.initial: initial state (for DFA) or set of states (for NFA)
- x.alphabet: set of input symbols accepted by the automaton
- x.transition(state,symbol): result of transition function,
either a single state (for a DFA) or set of states (for an NFA)
- x.isfinal(state): whether the state is an accepting state
- x.asDFA(): return an equivalent DFA
- x.asNFA(): return an equivalent NFA
"""
initial = alphabet = transition = isfinal = asDFA = asNFA = None
def __len__(self):
"""How many states does this automaton have?"""
return len(list(self.states()))
def __call__(self,symbols):
"""Test whether sequence of symbols is accepted by the DFA."""
return self.asDFA()(symbols)
def language(self):
"""Form language object for language recognized by automaton."""
return RegularLanguage(self)
def states(self):
"""Generate all states reachable from initial state."""
return self.asNFA().states()
def pprint(self,output=sys.stdout):
"""Pretty-print this automaton to an output stream."""
return self.asNFA().pprint(output)
def minimize(self):
"""Return smallest equivalent DFA."""
return _MinimumDFA(self.asDFA())
def reverse(self):
"""Construct NFA for reversal of original NFA's language."""
return _ReverseNFA(self.asNFA())
def renumber(self,offset=0):
"""Replace complicated state objects by small integers."""
return _RenumberNFA(self.asNFA(),offset=offset)
def RegExp(self):
"""Return equivalent regular expression."""
return self.asNFA().RegExp()
def complement(self):
"""Make automaton recognizing complement of given automaton's language."""
return _ComplementDFA(self.asDFA())
def union(self,other):
"""Make automaton recognizing union of two automata's languages."""
return _ProductDFA(self.asDFA(),other.asDFA(),operator.or_)
def intersection(self,other):
"""Make automaton recognizing union of two automata's languages."""
return _ProductDFA(self.asDFA(),other.asDFA(),operator.and_)
def symmetricDifference(self,other):
"""Make automaton recognizing union of two automata's languages."""
return _ProductDFA(self.asDFA(),other.asDFA(),operator.xor)
class DFA(FiniteAutomaton):
"""Base class for deterministic finite automaton. Subclasses are
responsible for filling out the details of the initial state, alphabet,
and transition function.
"""
def asDFA(self):
return self
def asNFA(self):
return _NFAfromDFA(self)
def __call__(self,symbols):
"""Test whether sequence of symbols is accepted by the DFA."""
state = self.initial
for symbol in symbols:
if symbol not in self.alphabet:
raise LanguageError("Symbol " + repr(symbol) +
" not in input alphabet")
state = self.transition(state,symbol)
return self.isfinal(state)
def __eq__(self,other):
"""Report whether these two DFAs have equivalent states."""
if not isinstance(other,DFA) or len(self) != len(other) \
or self.alphabet != other.alphabet:
return False
equivalences = {self.initial:other.initial}
unprocessed = [self.initial]
while unprocessed:
x = unprocessed.pop()
y = equivalences[x]
for c in self.alphabet:
xc = self.transition(x,c)
yc = other.transition(y,c)
if xc not in equivalences:
equivalences[xc] = yc
unprocessed.append(xc)
elif equivalences[xc] != yc:
return False
return True
def __ne__(self,other):
"""Report whether these two DFAs have equivalent states."""
return not (self == other)
class NFA(FiniteAutomaton):
"""Base class for nondeterministic finite automaton. Subclasses are
responsible for filling out the details of the initial state, alphabet,
and transition function. Note that the NFAs defined here do not allow
epsilon-transitions. Results of self.initial and self.transition are
assumed to be represented as ImmutableSet instances.
"""
def asNFA(self):
return self
def asDFA(self):
return _DFAfromNFA(self)
def states(self):
visited = Set()
unvisited = Set(self.initial)
while unvisited:
state = arbitrary_item(unvisited)
yield state
unvisited.remove(state)
visited.add(state)
for symbol in self.alphabet:
unvisited |= self.transition(state,symbol) - visited
def pprint(self,output=sys.stdout):
"""Pretty-print this NFA to an output stream."""
for state in self.states():
adjectives = []
if state in self.initial:
adjectives.append("initial")
if self.isfinal(state):
adjectives.append("accepting")
if not [c for c in self.alphabet if self.transition(state,c)]:
adjectives.append("terminal")
if not adjectives:
print >>output, state
else:
print >>output, state, "(" + ", ".join(adjectives) + ")"
for c in self.alphabet:
for neighbor in self.transition(state,c):
print >>output, " --[" + str(c) + "]-->", neighbor
def RegExp(self):
"""Convert to regular expression and return as a string.
See Sipser for an explanation of this algorithm."""
# create artificial initial and final states
initial = object()
final = object()
states = Set([initial,final]) | Set(self.states())
# 2d matrix of expressions connecting each pair of states
expr = {}
for x in states:
for y in states:
expr[x,y] = None
for x in self.states():
if x in self.initial:
expr[initial,x] = ''
if self.isfinal(x):
expr[x,final] = ''
expr[x,x] = ''
for x in self.states():
for c in self.alphabet:
for y in self.transition(x,c):
if expr[x,y]:
expr[x,y] += '+' + str(c)
else:
expr[x,y] = str(c)
# eliminate states one at a time
for s in self.states():
states.remove(s)
for x in states:
for y in states:
if expr[x,s] is not None and expr[s,y] is not None:
xsy = []
if expr[x,s]:
xsy += self._parenthesize(expr[x,s])
if expr[s,s]:
xsy += self._parenthesize(expr[s,s],True) + ['*']
if expr[s,y]:
xsy += self._parenthesize(expr[s,y])
if expr[x,y] is not None:
xsy += ['+',expr[x,y] or '()']
expr[x,y] = ''.join(xsy)
return expr[initial,final]
def _parenthesize(self,expr,starring=False):
"""Return list of strings with or without parens for use in RegExp.
This is only for the purpose of simplifying the expressions returned,
by omitting parentheses or other expression features when unnecessary;
it would always be correct simply to return ['(',expr,')'].
"""
if len(expr) == 1 or (not starring and '+' not in expr):
return [expr]
elif starring and expr.endswith('+()'):
return ['(',expr[:-3],')'] # +epsilon redundant when starring
else:
return ['(',expr,')']
class _DFAfromNFA(DFA):
"""Conversion of NFA to DFA. We create a DFA state for each set
of NFA states. A DFA state is final if it contains at least one
final NFA set, and the transition function for a DFA state is the
union of the transition functions of the NFA states it contains.
"""
def __init__(self,N):
self.initial = N.initial
self.alphabet = N.alphabet
self.NFA = N
def transition(self,stateset,symbol):
output = Set()
for state in stateset:
output |= self.NFA.transition(state,symbol)
return ImmutableSet(output)
def isfinal(self,stateset):
for state in stateset:
if self.NFA.isfinal(state):
return True
return False
class _NFAfromDFA(NFA):
"""Conversion of DFA to NFA. We convert the initial state and the
results of each transition function into single-element sets.
"""
def __init__(self,D):
self.initial = ImmutableSet([D.initial])
self.alphabet = D.alphabet
self.DFA = D
def transition(self,state,symbol):
return ImmutableSet([self.DFA.transition(state,symbol)])
def isfinal(self,state):
return self.DFA.isfinal(state)
Empty = ImmutableSet()
class RegExp(NFA):
"""Convert regular expression to NFA."""
def __init__(self,expr):
self.expr = expr
self.pos = 0
self.nstates = 0
self.expect = {}
self.successor = {}
self.alphabet = Set()
self.initial,penultimate,epsilon = self.expression()
final = self.newstate(None)
for state in penultimate:
self.successor[state].add(final)
self.final = ImmutableSet([final])
if epsilon:
self.final = self.final | self.initial
def transition(self,state,c):
"""Implement NFA transition function."""
if c != self.expect[state]:
return Empty
else:
return self.successor[state]
def isfinal(self,state):
"""Implement NFA acceptance test."""
return state in self.final
# Recursive-descent parser for regular expressions.
# Each function uses self.pos as a pointer into self.expr,
# updates self.expect and self.successor,
# and returns a tuple (initial,penultimate,epsilon), where
# initial = the initial states of the subexpression
# penultimate = states one step away from an accepting state
# epsilon = true if the subexpression accepts the empty string
def epsilon(self):
"""Parse an empty string and return an empty automaton."""
return Empty,Empty,True
def newstate(self,expect):
"""Allocate a new state in which we expect to see the given letter."""
state = self.nstates
self.successor[state] = Set()
self.expect[state] = expect
self.nstates += 1
return state
def base(self):
"""Parse a subexpression that can be starred: single letter or group."""
if self.pos == len(self.expr) or self.expr[self.pos] == ')':
return self.epsilon()
if self.expr[self.pos] == '(':
self.pos += 1
ret = self.expression()
if self.pos == len(self.expr) or self.expr[self.pos] != ')':
raise LanguageError("Close paren expected at char " + str(self.pos))
self.pos += 1
return ret
if self.expr[self.pos] == '\\':
self.pos += 1
if self.pos == len(self.expr):
raise RegExpError("Character expected after backslash")
self.alphabet.add(self.expr[self.pos])
state = self.newstate(self.expr[self.pos])
self.pos += 1
state = ImmutableSet([state])
return state,state,False
def factor(self):
"""Parse a catenable expression: base or starred base."""
initial,penultimate,epsilon = self.base()
while self.pos < len(self.expr) and self.expr[self.pos] == '*':
self.pos += 1
for state in penultimate:
self.successor[state] |= initial
epsilon = True
return initial,penultimate,epsilon
def term(self):
"""Parse a summable expression: factor or concatenation."""
initial,penultimate,epsilon = self.factor()
while self.pos < len(self.expr) and self.expr[self.pos] not in ')+':
Fi,Fp,Fe = self.factor()
for state in penultimate:
self.successor[state] |= Fi
if epsilon:
initial = initial | Fi
if Fe:
penultimate = penultimate | Fp
else:
penultimate = Fp
epsilon = epsilon and Fe
return initial,penultimate,epsilon
def expression(self):
"""Parse a whole regular expression or grouped subexpression."""
initial,penultimate,epsilon = self.term()
while self.pos < len(self.expr) and self.expr[self.pos] == '+':
self.pos += 1
Ti,Tp,Te = self.term()
initial = initial | Ti
penultimate = penultimate | Tp
epsilon = epsilon or Te
return initial,penultimate,epsilon
class LookupNFA(NFA):
"""Construct NFA with precomputed lookup table of transitions."""
def __init__(self,alphabet,initial,ttable,final):
self.alphabet = alphabet
self.initial = ImmutableSet(initial)
self.ttable = ttable
self.final = ImmutableSet(final)
def transition(self,state,symbol):
return ImmutableSet(self.ttable[state,symbol])
def isfinal(self,state):
return state in self.final
def _RenumberNFA(N,offset=0):
"""Replace NFA state objects with small integers."""
replacements = {}
for x in N.states():
replacements[x] = offset
offset += 1
initial = [replacements[x] for x in N.initial]
ttable = {}
for state in N.states():
for symbol in N.alphabet:
ttable[replacements[state],symbol] = [replacements[x]
for x in N.transition(state,symbol)]
final = [replacements[x] for x in N.states() if N.isfinal(x)]
return LookupNFA(N.alphabet,initial,ttable,final)
class _ProductDFA(DFA):
"""DFA that simulates D1 and D2 and combines their outputs with op."""
def __init__(self,D1,D2,op):
if D1.alphabet != D2.alphabet:
raise LanguageError("DFAs have incompatible alphabets")
self.alphabet = D1.alphabet
self.initial = (D1.initial,D2.initial)
self.D1 = D1
self.D2 = D2
self.op = op
def transition(self,state,symbol):
s1,s2 = state
return self.D1.transition(s1,symbol), \
self.D2.transition(s2,symbol)
def isfinal(self,state):
s1,s2 = state
f1 = self.D1.isfinal(s1) and 1 or 0
f2 = self.D2.isfinal(s2) and 1 or 0
return self.op(f1,f2)
def _ReverseNFA(N):
"""Construct NFA for reversal of original NFA's language."""
initial = [s for s in N.states() if N.isfinal(s)]
ttable = dict([((s,c),[]) for s in N.states() for c in N.alphabet])
for s in N.states():
for c in N.alphabet:
for t in N.transition(s,c):
ttable[t,c].append(s)
return LookupNFA(N.alphabet,initial,ttable,N.initial)
class _ComplementDFA(DFA):
"""DFA for complementary language."""
def __init__(self,D):
self.DFA = D
self.initial = D.initial
self.alphabet = D.alphabet
def transition(self,state,symbol):
return self.DFA.transition(state,symbol)
def isfinal(self,state):
return not self.DFA.isfinal(state)
class _MinimumDFA(DFA):
"""Construct equivalent DFA with minimum number of states,
using Hopcroft's O(ns log n) partition-refinement algorithm.
"""
def __init__(self,D):
# refine partition of states by reversed neighborhoods
N = D.reverse()
P = PartitionRefinement(D.states())
P.refine([s for s in D.states() if D.isfinal(s)])
unrefined = Sequence(P,key=id)
while unrefined:
part = arbitrary_item(unrefined)
unrefined.remove(part)
for symbol in D.alphabet:
neighbors = Set()
for state in part:
neighbors |= N.transition(state,symbol)
for new,old in P.refine(neighbors):
if old in unrefined or len(new) < len(old):
unrefined.append(new)
else:
unrefined.append(old)
# convert partition to DFA
P.freeze()
self.partition = P
self.initial = P[D.initial]
self.alphabet = D.alphabet
self.DFA = D
def transition(self,state,symbol):
rep = arbitrary_item(state)
return self.partition[self.DFA.transition(rep,symbol)]
def isfinal(self,state):
rep = arbitrary_item(state)
return self.DFA.isfinal(rep)
# If called as standalone routine, run some unit tests
class RegExpTest(unittest.TestCase):
# tuples (L,[strings in L],[strings not in L])
languages = [
(RegularLanguage("0"), ["0"], ["","00"]),
(RegularLanguage("(10+0)*"), ["","0","010"], ["1"]),
(RegularLanguage("(0+1)*1(0+1)(0+1)"), ["000100"], ["0011"]),
]
def testMembership(self):
"""membership tests for RegularLanguage(expression)"""
for L,Li,Lx in self.languages:
for S in Li:
self.assert_(S in L)
for S in Lx:
self.assert_(S not in L)
def testComplement(self):
"""membership tests for ~RegularLanguage"""
for L,Li,Lx in self.languages:
L = ~L
for S in Lx:
self.assert_(S in L)
for S in Li:
self.assert_(S not in L)
def testEquivalent(self):
"""test that converting NFA->expr->NFA produces same language"""
for L1,Li,Lx in self.languages:
L2 = RegularLanguage(L1.recognizer.RegExp())
self.assertEqual(L1,L2)
def testInequivalent(self):
"""test that different regular languages are recognized as different"""
for i in range(len(self.languages)):
for j in range(i):
self.assertNotEqual(self.languages[i][0],
self.languages[j][0])
if __name__ == "__main__":
unittest.main()
|
|
from collections import namedtuple
import mxnet as mx
from stt_layer_batchnorm import batchnorm
GRUState = namedtuple("GRUState", ["h"])
GRUParam = namedtuple("GRUParam", ["gates_i2h_weight", "gates_i2h_bias",
"gates_h2h_weight", "gates_h2h_bias",
"trans_i2h_weight", "trans_i2h_bias",
"trans_h2h_weight", "trans_h2h_bias"])
GRUModel = namedtuple("GRUModel", ["rnn_exec", "symbol",
"init_states", "last_states",
"seq_data", "seq_labels", "seq_outputs",
"param_blocks"])
def gru(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0., is_batchnorm=False, gamma=None, beta=None, name=None):
"""
GRU Cell symbol
Reference:
* Chung, Junyoung, et al. "Empirical evaluation of gated recurrent neural
networks on sequence modeling." arXiv preprint arXiv:1412.3555 (2014).
"""
if dropout > 0.:
indata = mx.sym.Dropout(data=indata, p=dropout)
i2h = mx.sym.FullyConnected(data=indata,
weight=param.gates_i2h_weight,
bias=param.gates_i2h_bias,
num_hidden=num_hidden * 2,
name="t%d_l%d_gates_i2h" % (seqidx, layeridx))
if is_batchnorm:
if name is not None:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta, name="%s_batchnorm" % name)
else:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)
h2h = mx.sym.FullyConnected(data=prev_state.h,
weight=param.gates_h2h_weight,
bias=param.gates_h2h_bias,
num_hidden=num_hidden * 2,
name="t%d_l%d_gates_h2h" % (seqidx, layeridx))
gates = i2h + h2h
slice_gates = mx.sym.SliceChannel(gates, num_outputs=2,
name="t%d_l%d_slice" % (seqidx, layeridx))
update_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid")
reset_gate = mx.sym.Activation(slice_gates[1], act_type="sigmoid")
# The transform part of GRU is a little magic
htrans_i2h = mx.sym.FullyConnected(data=indata,
weight=param.trans_i2h_weight,
bias=param.trans_i2h_bias,
num_hidden=num_hidden,
name="t%d_l%d_trans_i2h" % (seqidx, layeridx))
h_after_reset = prev_state.h * reset_gate
htrans_h2h = mx.sym.FullyConnected(data=h_after_reset,
weight=param.trans_h2h_weight,
bias=param.trans_h2h_bias,
num_hidden=num_hidden,
name="t%d_l%d_trans_h2h" % (seqidx, layeridx))
h_trans = htrans_i2h + htrans_h2h
h_trans_active = mx.sym.Activation(h_trans, act_type="tanh")
next_h = prev_state.h + update_gate * (h_trans_active - prev_state.h)
return GRUState(h=next_h)
def gru_unroll(net, num_gru_layer, seq_len, num_hidden_gru_list, dropout=0., is_batchnorm=False, prefix="",
direction="forward", is_bucketing=False):
if num_gru_layer > 0:
param_cells = []
last_states = []
for i in range(num_gru_layer):
param_cells.append(GRUParam(gates_i2h_weight=mx.sym.Variable(prefix + "l%d_i2h_gates_weight" % i),
gates_i2h_bias=mx.sym.Variable(prefix + "l%d_i2h_gates_bias" % i),
gates_h2h_weight=mx.sym.Variable(prefix + "l%d_h2h_gates_weight" % i),
gates_h2h_bias=mx.sym.Variable(prefix + "l%d_h2h_gates_bias" % i),
trans_i2h_weight=mx.sym.Variable(prefix + "l%d_i2h_trans_weight" % i),
trans_i2h_bias=mx.sym.Variable(prefix + "l%d_i2h_trans_bias" % i),
trans_h2h_weight=mx.sym.Variable(prefix + "l%d_h2h_trans_weight" % i),
trans_h2h_bias=mx.sym.Variable(prefix + "l%d_h2h_trans_bias" % i)))
state = GRUState(h=mx.sym.Variable(prefix + "l%d_init_h" % i))
last_states.append(state)
assert (len(last_states) == num_gru_layer)
# declare batchnorm param(gamma,beta) in timestep wise
if is_batchnorm:
batchnorm_gamma = []
batchnorm_beta = []
if is_bucketing:
for l in range(num_gru_layer):
batchnorm_gamma.append(mx.sym.Variable(prefix + "l%d_i2h_gamma" % l))
batchnorm_beta.append(mx.sym.Variable(prefix + "l%d_i2h_beta" % l))
else:
for seqidx in range(seq_len):
batchnorm_gamma.append(mx.sym.Variable(prefix + "t%d_i2h_gamma" % seqidx))
batchnorm_beta.append(mx.sym.Variable(prefix + "t%d_i2h_beta" % seqidx))
hidden_all = []
for seqidx in range(seq_len):
if direction == "forward":
k = seqidx
hidden = net[k]
elif direction == "backward":
k = seq_len - seqidx - 1
hidden = net[k]
else:
raise Exception("direction should be whether forward or backward")
# stack GRU
for i in range(num_gru_layer):
if i == 0:
dp_ratio = 0.
else:
dp_ratio = dropout
if is_batchnorm:
if is_bucketing:
next_state = gru(num_hidden_gru_list[i], indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=k, layeridx=i, dropout=dp_ratio,
is_batchnorm=is_batchnorm,
gamma=batchnorm_gamma[i],
beta=batchnorm_beta[i],
name=prefix + ("t%d_l%d" % (seqidx, i))
)
else:
next_state = gru(num_hidden_gru_list[i], indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=k, layeridx=i, dropout=dp_ratio,
is_batchnorm=is_batchnorm,
gamma=batchnorm_gamma[k],
beta=batchnorm_beta[k],
name=prefix + ("t%d_l%d" % (seqidx, i))
)
else:
next_state = gru(num_hidden_gru_list[i], indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=k, layeridx=i, dropout=dp_ratio,
is_batchnorm=is_batchnorm,
name=prefix)
hidden = next_state.h
last_states[i] = next_state
# decoder
if dropout > 0.:
hidden = mx.sym.Dropout(data=hidden, p=dropout)
if direction == "forward":
hidden_all.append(hidden)
elif direction == "backward":
hidden_all.insert(0, hidden)
else:
raise Exception("direction should be whether forward or backward")
net = hidden_all
return net
def bi_gru_unroll(net, num_gru_layer, seq_len, num_hidden_gru_list, dropout=0., is_batchnorm=False, is_bucketing=False):
if num_gru_layer > 0:
net_forward = gru_unroll(net=net,
num_gru_layer=num_gru_layer,
seq_len=seq_len,
num_hidden_gru_list=num_hidden_gru_list,
dropout=dropout,
is_batchnorm=is_batchnorm,
prefix="forward_",
direction="forward",
is_bucketing=is_bucketing)
net_backward = gru_unroll(net=net,
num_gru_layer=num_gru_layer,
seq_len=seq_len,
num_hidden_gru_list=num_hidden_gru_list,
dropout=dropout,
is_batchnorm=is_batchnorm,
prefix="backward_",
direction="backward",
is_bucketing=is_bucketing)
hidden_all = []
for i in range(seq_len):
hidden_all.append(mx.sym.Concat(*[net_forward[i], net_backward[i]], dim=1))
net = hidden_all
return net
def bi_gru_unroll_two_input_two_output(net1, net2, num_gru_layer, seq_len, num_hidden_gru_list, dropout=0.,
is_batchnorm=False, is_bucketing=False):
if num_gru_layer > 0:
net_forward = gru_unroll(net=net1,
num_gru_layer=num_gru_layer,
seq_len=seq_len,
num_hidden_gru_list=num_hidden_gru_list,
dropout=dropout,
is_batchnorm=is_batchnorm,
prefix="forward_",
direction="forward",
is_bucketing=is_bucketing)
net_backward = gru_unroll(net=net2,
num_gru_layer=num_gru_layer,
seq_len=seq_len,
num_hidden_gru_list=num_hidden_gru_list,
dropout=dropout,
is_batchnorm=is_batchnorm,
prefix="backward_",
direction="backward",
is_bucketing=is_bucketing)
return net_forward, net_backward
else:
return net1, net2
|
|
"""
A tentative module for pushing data through branching pipelines.
"""
import csv
from tempfile import NamedTemporaryFile
from operator import itemgetter
from itertools import islice
from collections import defaultdict
import pickle as pickle
from .util import asindices, HybridRow, shortlistmergesorted
import petl.transform
import collections
class PipelineComponent(object):
def __init__(self):
self.default_receivers = list()
self.keyed_receivers = defaultdict(list)
def pipe(self, *args):
assert 1 <= len(args) <= 2, '1 or 2 arguments expected'
if len(args) == 1:
receiver = args[0]
self.default_receivers.append(receiver)
return receiver
elif len(args) == 2:
key, receiver = args
self.keyed_receivers[key].append(receiver)
return receiver
def __or__(self, other):
if isinstance(other, tuple):
return self.pipe(*other)
else:
return self.pipe(other)
def _connect_receivers(self, fields):
default_connections = [r.connect(fields) for r in self.default_receivers]
keyed_connections = dict()
for k in self.keyed_receivers:
keyed_connections[k] = [r.connect(fields) for r in self.keyed_receivers[k]]
return default_connections, keyed_connections
def push(self, source, limit=None):
it = iter(source)
fields = next(it)
c = self.connect(fields)
for row in islice(it, limit):
c.accept(tuple(row))
c.close()
class PipelineConnection(object):
def __init__(self, default_connections, keyed_connections, fields):
self.default_connections = default_connections
self.keyed_connections = keyed_connections
self.fields = fields
def close(self):
for c in self.default_connections:
c.close()
for k in self.keyed_connections:
for c in self.keyed_connections[k]:
c.close()
def broadcast(self, *args):
assert 1 <= len(args) <= 2, 'expected 1 or 2 arguments'
if len(args) == 1:
row = args[0]
for c in self.default_connections:
c.accept(tuple(row))
elif len(args) == 2:
key, row = args
if key in self.keyed_connections:
for c in self.keyed_connections[key]:
c.accept(tuple(row))
def tocsv(filename, dialect=csv.excel, **kwargs):
"""
Push rows to a CSV file. E.g.::
>>> from petl.push import tocsv
>>> p = tocsv('example.csv')
>>> p.push(sometable)
"""
return ToCsvComponent(filename, dialect, **kwargs)
def totsv(filename, dialect=csv.excel_tab, **kwargs):
"""
Push rows to a tab-delimited file. E.g.::
>>> from petl.push import totsv
>>> p = totsv('example.tsv')
>>> p.push(sometable)
"""
return ToCsvComponent(filename, dialect, **kwargs)
class ToCsvComponent(PipelineComponent):
def __init__(self, filename, dialect, **kwargs):
super(ToCsvComponent, self).__init__()
self.filename = filename
self.dialect = dialect
self.kwargs = kwargs
def connect(self, fields):
default_connections, keyed_connections = self._connect_receivers(fields)
return ToCsvConnection(default_connections, keyed_connections, fields,
self.filename, self.dialect, self.kwargs)
class ToCsvConnection(PipelineConnection):
def __init__(self, default_connections, keyed_connections, fields, filename, dialect, kwargs):
super(ToCsvConnection, self).__init__(default_connections, keyed_connections, fields)
self.file = open(filename, 'wb')
self.writer = csv.writer(self.file, dialect=dialect, **kwargs)
self.writer.writerow(fields)
def accept(self, row):
self.writer.writerow(row)
# forward rows on the default pipe (behave like tee)
self.broadcast(row)
def close(self):
self.file.flush()
self.file.close()
super(ToCsvConnection, self).close()
def topickle(filename, protocol=-1):
"""
Push rows to a pickle file. E.g.::
>>> from petl.push import topickle
>>> p = topickle('example.pickle')
>>> p.push(sometable)
"""
return ToPickleComponent(filename, protocol)
class ToPickleComponent(PipelineComponent):
def __init__(self, filename, protocol):
super(ToPickleComponent, self).__init__()
self.filename = filename
self.protocol = protocol
def connect(self, fields):
default_connections, keyed_connections = self._connect_receivers(fields)
return ToPickleConnection(default_connections, keyed_connections, fields,
self.filename, self.protocol)
class ToPickleConnection(PipelineConnection):
def __init__(self, default_connections, keyed_connections, fields, filename, protocol):
super(ToPickleConnection, self).__init__(default_connections, keyed_connections, fields)
self.file = open(filename, 'wb')
self.protocol = protocol
pickle.dump(fields, self.file, self.protocol)
def accept(self, row):
pickle.dump(row, self.file, self.protocol)
# forward rows on the default pipe (behave like tee)
self.broadcast(row)
def close(self):
self.file.flush()
self.file.close()
super(ToPickleConnection, self).close()
def partition(discriminator):
"""
Partition rows based on values of a field or results of applying a
function on the row. E.g.::
>>> from petl.push import partition, tocsv
>>> p = partition('fruit')
>>> p.pipe('orange', tocsv('oranges.csv'))
>>> p.pipe('banana', tocsv('bananas.csv'))
>>> p.push(sometable)
In the example above, rows where the value of the 'fruit' field
equals 'orange' are piped to the 'oranges.csv' file, and rows
where the 'fruit' field equals 'banana' are piped to the
'bananas.csv' file.
"""
return PartitionComponent(discriminator)
class PartitionComponent(PipelineComponent):
def __init__(self, discriminator):
super(PartitionComponent, self).__init__()
self.discriminator = discriminator
def connect(self, fields):
default_connections, keyed_connections = self._connect_receivers(fields)
return PartitionConnection(default_connections, keyed_connections, fields, self.discriminator)
class PartitionConnection(PipelineConnection):
def __init__(self, default_connections, keyed_connections, fields, discriminator):
super(PartitionConnection, self).__init__(default_connections, keyed_connections, fields)
if isinstance(discriminator, collections.Callable):
self.discriminator = discriminator
else: # assume field or fields
self.discriminator = itemgetter(*asindices(fields, discriminator))
def accept(self, row):
row = HybridRow(row, self.fields)
key = self.discriminator(row)
self.broadcast(key, row)
def sort(key=None, reverse=False, buffersize=None):
"""
Sort rows based on some key field or fields. E.g.::
>>> from petl.push import sort, tocsv
>>> p = sort('foo')
>>> p.pipe(tocsv('sorted_by_foo.csv'))
>>> p.push(sometable)
"""
return SortComponent(key=key, reverse=reverse, buffersize=buffersize)
class SortComponent(PipelineComponent):
def __init__(self, key=None, reverse=False, buffersize=None):
super(SortComponent, self).__init__()
self.key = key
self.reverse = reverse
self.buffersize = buffersize
def connect(self, fields):
default_connections, keyed_connections = self._connect_receivers(fields)
return SortConnection(default_connections, keyed_connections, fields,
self.key, self.reverse, self.buffersize)
class SortConnection(PipelineConnection):
def __init__(self, default_connections, keyed_connections, fields, key, reverse, buffersize):
super(SortConnection, self).__init__(default_connections, keyed_connections, fields)
self.getkey = None
if key is not None:
# convert field selection into field indices
indices = asindices(fields, key)
# now use field indices to construct a _getkey function
# N.B., this will probably raise an exception on short rows
self.getkey = itemgetter(*indices)
self.reverse = reverse
if buffersize is None:
self.buffersize = petl.transform.defaultbuffersize
else:
self.buffersize = buffersize
self.cache = list()
self.chunkfiles = list()
def accept(self, row):
row = tuple(row)
if len(self.cache) < self.buffersize:
self.cache.append(row)
else:
# sort and dump the chunk
self.cache.sort(key=self.getkey, reverse=self.reverse)
f = NamedTemporaryFile() # TODO need not be named
for r in self.cache:
pickle.dump(r, f, protocol=-1)
f.flush()
f.seek(0)
self.chunkfiles.append(f)
self.cache = [row]
def close(self):
# sort anything remaining in the cache
self.cache.sort(key=self.getkey, reverse=self.reverse)
if self.chunkfiles:
chunkiters = [iterchunk(f) for f in self.chunkfiles]
chunkiters.append(self.cache) # make sure any left in cache are included
for row in shortlistmergesorted(self.getkey, self.reverse, *chunkiters):
self.broadcast(row)
else:
for row in self.cache:
self.broadcast(row)
super(SortConnection, self).close()
def iterchunk(f):
try:
while True:
yield pickle.load(f)
except EOFError:
pass
def duplicates(key):
"""
Report rows with duplicate key values. E.g.::
>>> from petl.push import duplicates, tocsv
>>> p = duplicates('foo')
>>> p.pipe(tocsv('foo_dups.csv'))
>>> p.pipe('remainder', tocsv('foo_uniq.csv'))
>>> p.push(sometable)
N.B., assumes data are already sorted by the given key.
"""
return DuplicatesComponent(key)
class DuplicatesComponent(PipelineComponent):
def __init__(self, key):
super(DuplicatesComponent, self).__init__()
self.key = key
def connect(self, fields):
default_connections, keyed_connections = self._connect_receivers(fields)
return DuplicatesConnection(default_connections, keyed_connections, fields, self.key)
class DuplicatesConnection(PipelineConnection):
def __init__(self, default_connections, keyed_connections, fields, key):
super(DuplicatesConnection, self).__init__(default_connections, keyed_connections, fields)
# convert field selection into field indices
indices = asindices(fields, key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
self.getkey = itemgetter(*indices)
# initial state
self.previous = None
self.previous_is_duplicate = False
# convert field selection into field indices
indices = asindices(fields, key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
self.getkey = itemgetter(*indices)
# initial state
self.previous = None
self.previous_is_duplicate = False
def _broadcast_duplicate(self, row):
self.broadcast(row)
def _broadcast_unique(self, row):
self.broadcast('remainder', row)
def accept(self, row):
if self.previous is None:
self.previous = row
else:
# TODO repeat calculation of key could be removed?
kprev = self.getkey(self.previous)
kcurr = self.getkey(row)
if kprev == kcurr:
if not self.previous_is_duplicate:
self._broadcast_duplicate(self.previous)
self.previous_is_duplicate = True
self._broadcast_duplicate(row)
else:
if not self.previous_is_duplicate:
# forward unique row
self._broadcast_unique(self.previous)
# reset
self.previous_is_duplicate = False
self.previous = row
def close(self):
if not self.previous_is_duplicate:
# forward unique row
self._broadcast_unique(self.previous)
super(DuplicatesConnection, self).close()
def unique(key):
"""
Report rows with unique key values. E.g.::
>>> from petl.push import unique, tocsv
>>> p = unique('foo')
>>> p.pipe(tocsv('foo_uniq.csv'))
>>> p.pipe('remainder', tocsv('foo_dups.csv'))
>>> p.push(sometable)
N.B., assumes data are already sorted by the given key. See also
:func:`duplicates`.
"""
return UniqueComponent(key)
class UniqueComponent(DuplicatesComponent):
def __init__(self, key):
super(UniqueComponent, self).__init__(key)
def connect(self, fields):
default_connections, keyed_connections = self._connect_receivers(fields)
return UniqueConnection(default_connections, keyed_connections, fields, self.key)
class UniqueConnection(DuplicatesConnection):
def __init__(self, default_connections, keyed_connections, fields, key):
super(UniqueConnection, self).__init__(default_connections, keyed_connections, fields, key)
def _broadcast_duplicate(self, row):
self.broadcast('remainder', row)
def _broadcast_unique(self, row):
self.broadcast(row) # unique on default pipe
def diff():
"""
Find rows that differ between two tables. E.g.::
>>> from petl.push import diff, tocsv
>>> p = diff()
>>> p.pipe('+', tocsv('added.csv'))
>>> p.pipe('-', tocsv('subtracted.csv'))
>>> p.pipe(tocsv('common.csv'))
>>> p.push(sometable, someothertable)
"""
return DiffComponent()
class DiffComponent(PipelineComponent):
def __init__(self):
super(DiffComponent, self).__init__()
def push(self, ta, tb, limit=None):
ita = iter(ta)
itb = iter(tb)
aflds = [str(f) for f in next(ita)]
next(itb) # ignore b fields
default_connections, keyed_connections = self._connect_receivers(aflds)
def _broadcast(*args):
if len(args) == 1:
for c in default_connections:
c.accept(args[0])
else:
key, row = args
if key in keyed_connections:
for c in keyed_connections[key]:
c.accept(row)
try:
a = tuple(next(ita))
except StopIteration:
# a is empty, everything in b is added
for b in itb:
_broadcast('+', b)
else:
try:
b = tuple(next(itb))
except StopIteration:
# b is empty, everything in a is subtracted
_broadcast('-', a)
for a in ita:
_broadcast('-', a)
else:
while a is not None and b is not None:
if b is None or a < b:
_broadcast('-', a)
# advance a
try:
a = tuple(next(ita))
except StopIteration:
a = None
elif a == b:
_broadcast(a) # default channel
# advance both
try:
a = tuple(next(ita))
except StopIteration:
a = None
try:
b = tuple(next(itb))
except StopIteration:
b = None
else:
_broadcast('+', b)
# advance b
try:
b = tuple(next(itb))
except StopIteration:
b = None
# TODO standard components (one in, one out)...
# totext
# tosqlite3
# todb
# toxml
# tojson
# todicts
# tolist
# rename
# setheader
# extendheader
# pushheader
# skip
# skipcomments
# rowslice
# head
# tail
# cut
# cutout
# select
# selectop
# selecteq
# selectne
# selectlt
# selectle
# selectgt
# selectge
# selectrangeopen
# selectrangeopenleft
# selectrangeopenright
# selectrangeclosed
# selectin
# selectnotin
# selectis
# selectisnot
# selectre
# rowselect
# rowlenselect
# fieldselect
# replace
# replaceall
# convert
# convertall
# fieldconvert
# convertnumbers
# resub
# extend
# capture
# split
# unpack
# fieldmap
# rowmap
# rowmapmany
# sort
# aggregate
# rangeaggregate
# rangecounts
# rowreduce
# rangerowreduce
# mergereduce
# melt
# recast
# transpose
# pivot
#
# TODO branching components (one in, many out)...
# conflicts (default pipe is conflicts, 'remainder' is the rest)
#
# TODO special components (many in)...
# cat (no point?)
# joins
# complement (default pipe is complement, 'remainder' is the rest)
# recordcomplement
# recorddiff
# intersection
# mergesort
# merge
#
|
|
import json
import re
from compare import expect, ensure, matcher
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from tardis.tardis_portal.models import \
Experiment, ObjectACL, User, UserProfile
from tardis.tardis_portal.ParameterSetManager import ParameterSetManager
@matcher
def to_match(self, regex):
assert re.search(regex, self.value)
def _create_user_and_login(username='testuser', password='testpass'):
user = User.objects.create_user(username, '', password)
user.save()
client = Client()
client.login(username=username, password=password)
return (user, client)
class TabTestCase(TestCase):
def setUp(self):
user, client = _create_user_and_login()
experiment = Experiment(title='Norwegian Blue',
description='Parrot + 40kV',
created_by=user)
experiment.save()
acl = ObjectACL(content_object=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=False,
canRead=True,
canWrite=False,
canDelete=False,
aclOwnershipType=ObjectACL.OWNER_OWNED)
acl.save()
self.client = client
self.experiment = experiment
def testAccessWithoutReadPerms(self):
client = Client()
response = client.get(
reverse('tardis.apps.related_info.views.index',
args=[self.experiment.id]))
expect(response.status_code).to_equal(403)
def testAccessWithReadPerms(self):
response = self.client.get(
reverse('tardis.apps.related_info.views.index',
args=[self.experiment.id]))
expect(response.status_code).to_equal(200)
class ListTestCase(TestCase):
def setUp(self):
user, client = _create_user_and_login()
experiment = Experiment(title='Norwegian Blue',
description='Parrot + 40kV',
created_by=user)
experiment.save()
acl = ObjectACL(content_object=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=False,
canRead=True,
canWrite=False,
canDelete=False,
aclOwnershipType=ObjectACL.OWNER_OWNED)
acl.save()
self.client = client
self.experiment = experiment
def testHandlesEmptySet(self):
response = self.client.get(
reverse('tardis.apps.related_info.views.'
+ 'list_or_create_related_info',
args=[self.experiment.id]))
expect(response.status_code).to_equal(200)
expect(response['Content-Type'])\
.to_equal('application/json; charset=utf-8')
expect(response.content).to_equal('[]')
def testHandlesSingleEntry(self):
from ..views import SCHEMA_URI
psm = ParameterSetManager(parentObject=self.experiment,
schema=SCHEMA_URI)
params = {'type': 'website',
'identifier': 'https://www.google.com/',
'title': 'Google',
'notes': 'This is a note.'}
for k, v in params.items():
psm.set_param(k, v)
response = self.client.get(
reverse('tardis.apps.related_info.views.'
+ 'list_or_create_related_info',
args=[self.experiment.id]))
expect(response.status_code).to_equal(200)
expect(response['Content-Type'])\
.to_equal('application/json; charset=utf-8')
objs = json.loads(response.content)
expect(len(objs)).to_be(1)
for k in params.keys():
expect(objs[0][k]).to_equal(params[k])
def testHandlesMultipleEntries(self):
from ..views import SCHEMA_URI
param_list = ({'type': 'website',
'identifier': 'https://www.example.test/%d' % i,
'title': 'Title #%d' % i,
'notes': 'This is note #%d.' % i} for i in range(10))
for params in param_list:
psm = ParameterSetManager(parentObject=self.experiment,
schema=SCHEMA_URI)
for k, v in params.items():
psm.set_param(k, v)
response = self.client.get(
reverse('tardis.apps.related_info.views.'
+ 'list_or_create_related_info',
args=[self.experiment.id]))
expect(response.status_code).to_equal(200)
expect(response['Content-Type'])\
.to_equal('application/json; charset=utf-8')
objs = json.loads(response.content)
expect(len(objs)).to_be(10)
for obj in objs:
expect(obj['type']).to_equal('website')
expect(obj['identifier']).to_match(r'www.example.test/\d+$')
expect(obj['title']).to_match(r'^Title #\d+$')
expect(obj['notes']).to_match(r'note #\d+\.$')
class GetTestCase(TestCase):
def setUp(self):
user, client = _create_user_and_login()
experiment = Experiment(title='Norwegian Blue',
description='Parrot + 40kV',
created_by=user)
experiment.save()
acl = ObjectACL(content_object=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=False,
canRead=True,
canWrite=False,
canDelete=False,
aclOwnershipType=ObjectACL.OWNER_OWNED)
acl.save()
self.client = client
self.experiment = experiment
def testHandlesNotFound(self):
response = self.client.get(
reverse('tardis.apps.related_info.views.'
+ 'get_or_update_or_delete_related_info',
args=[self.experiment.id, 0]))
expect(response.status_code).to_equal(404)
def testHandlesFound(self):
from ..views import SCHEMA_URI
psm = ParameterSetManager(parentObject=self.experiment,
schema=SCHEMA_URI)
params = {'type': 'website',
'identifier': 'https://www.google.com/',
'title': 'Google',
'notes': 'This is a note.'}
for k, v in params.items():
psm.set_param(k, v)
response = self.client.get(
reverse('tardis.apps.related_info.views.'
+ 'get_or_update_or_delete_related_info',
args=[self.experiment.id, psm.parameterset.id]))
expect(response.status_code).to_equal(200)
obj = json.loads(response.content)
for k in params.keys():
expect(obj[k]).to_equal(params[k])
class CreateTestCase(TestCase):
def setUp(self):
user, client = _create_user_and_login()
experiment = Experiment(title='Norwegian Blue',
description='Parrot + 40kV',
created_by=user)
experiment.save()
acl = ObjectACL(content_object=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=False,
canRead=True,
canWrite=True,
canDelete=False,
aclOwnershipType=ObjectACL.OWNER_OWNED)
acl.save()
self.acl = acl
self.client = client
self.experiment = experiment
def testMustHaveWrite(self):
self.acl.canWrite = False
self.acl.save()
params = {'type': 'website',
'identifier': 'https://www.google.com/',
'title': 'Google',
'notes': 'This is a note.'}
response = self.client.post(
reverse('tardis.apps.related_info.views.'
+ 'list_or_create_related_info',
args=[self.experiment.id]),
data=json.dumps(params),
content_type='application/json')
expect(response.status_code).to_equal(403)
def testCanCreate(self):
params = {'type': 'website',
'identifier': 'https://www.google.com/',
'title': 'Google',
'notes': 'This is a note.'}
response = self.client.post(
reverse('tardis.apps.related_info.views.'
+ 'list_or_create_related_info',
args=[self.experiment.id]),
data=json.dumps(params),
content_type='application/json')
# Check that content reports as created, returns the created object
expect(response.status_code).to_equal(201)
obj = json.loads(response.content)
ensure(isinstance(obj['id'], int), True,
message='Created object should have an ID.')
for k in params.keys():
expect(obj[k]).to_equal(params[k])
# Check that creation really did persist
response = self.client.get(
reverse('tardis.apps.related_info.views.'
+ 'get_or_update_or_delete_related_info',
args=[self.experiment.id, obj['id']]))
expect(response.status_code).to_equal(200)
def testDetectsBadInput(self):
def do_post(params):
return self.client.post(
reverse('tardis.apps.related_info.views.'
+ 'list_or_create_related_info',
args=[self.experiment.id]),
data=json.dumps(params),
content_type='application/json')
# We need an identifier
params = {'type': 'website'}
response = do_post(params)
expect(response.status_code).to_equal(400)
# We need a type
params = {'identifier': 'http://www.google.com/'}
response = do_post(params)
expect(response.status_code).to_equal(400)
# We need an identifier and URL
params = {'type': 'website', 'identifier': 'http://www.google.com/'}
response = do_post(params)
expect(response.status_code).to_equal(201)
class UpdateTestCase(TestCase):
def setUp(self):
user, client = _create_user_and_login()
experiment = Experiment(title='Norwegian Blue',
description='Parrot + 40kV',
created_by=user)
experiment.save()
acl = ObjectACL(content_object=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=False,
canRead=True,
canWrite=True,
canDelete=False,
aclOwnershipType=ObjectACL.OWNER_OWNED)
acl.save()
self.acl = acl
self.client = client
self.experiment = experiment
def _create_initial_entry(self):
params = {'type': 'website',
'identifier': 'https://www.google.com/',
'title': 'Google',
'notes': 'This is a note.'}
response = self.client.post(reverse('tardis.apps.related_info.views.'
+ 'list_or_create_related_info',
args=[self.experiment.id]),
data=json.dumps(params),
content_type='application/json')
expect(response.status_code).to_equal(201)
return json.loads(response.content)
def testMustHaveWrite(self):
related_info_id = self._create_initial_entry()['id']
self.acl.canWrite = False
self.acl.save()
params = {'type': 'website',
'identifier': 'https://www.google.com/'}
response = self.client.put(
reverse('tardis.apps.related_info.views.'
+ 'get_or_update_or_delete_related_info',
args=[self.experiment.id, related_info_id]),
data=json.dumps(params),
content_type='application/json')
expect(response.status_code).to_equal(403)
def testDetectsBadInput(self):
def do_put(params):
return self.client.put(
reverse('tardis.apps.related_info.views.'
+ 'get_or_update_or_delete_related_info',
args=[self.experiment.id,
self._create_initial_entry()['id']]),
data=json.dumps(params),
content_type='application/json')
# We need an identifier
params = {'type': 'website'}
response = do_put(params)
expect(response.status_code).to_equal(400)
# We need a type
params = {'identifier': 'http://www.google.com/'}
response = do_put(params)
expect(response.status_code).to_equal(400)
# We need an identifier and URL
params = {'type': 'website',
'identifier': 'http://www.google.com/'}
response = do_put(params)
expect(response.status_code).to_equal(201)
class DeleteTestCase(TestCase):
def setUp(self):
user, client = _create_user_and_login()
experiment = Experiment(title='Norwegian Blue',
description='Parrot + 40kV',
created_by=user)
experiment.save()
acl = ObjectACL(content_object=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=False,
canRead=True,
canWrite=True,
canDelete=False,
aclOwnershipType=ObjectACL.OWNER_OWNED)
acl.save()
self.acl = acl
self.client = client
self.experiment = experiment
def _create_initial_entry(self):
params = {'type': 'website',
'identifier': 'https://www.google.com/',
'title': 'Google',
'notes': 'This is a note.'}
response = self.client.post(reverse('tardis.apps.related_info.views.'
+ 'list_or_create_related_info',
args=[self.experiment.id]),
data=json.dumps(params),
content_type='application/json')
expect(response.status_code).to_equal(201)
return json.loads(response.content)
def testMustHaveWrite(self):
related_info_id = self._create_initial_entry()['id']
self.acl.canWrite = False
self.acl.save()
response = self.client.delete(
reverse('tardis.apps.related_info.views.'
+ 'get_or_update_or_delete_related_info',
args=[self.experiment.id, related_info_id]))
expect(response.status_code).to_equal(403)
def testCanDelete(self):
response = self.client.delete(
reverse('tardis.apps.related_info.views.'
+ 'get_or_update_or_delete_related_info',
args=[self.experiment.id,
self._create_initial_entry()['id']]))
expect(response.status_code).to_equal(200)
obj = json.loads(response.content)
expect(obj.keys()).to_be_greater_than(1)
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class VPNaaSTestJSON(base.BaseAdminNetworkTest):
_interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Delete, and Update VPN Service
List, Show, Create, Delete, and Update IKE policy
List, Show, Create, Delete, and Update IPSec policy
"""
@classmethod
def resource_setup(cls):
if not test.is_extension_enabled('vpnaas', 'network'):
msg = "vpnaas extension not enabled."
raise cls.skipException(msg)
super(VPNaaSTestJSON, cls).resource_setup()
cls.ext_net_id = CONF.network.public_network_id
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(
data_utils.rand_name("router"),
external_network_id=CONF.network.public_network_id)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
cls.router['id'])
cls.ikepolicy = cls.create_ikepolicy(
data_utils.rand_name("ike-policy-"))
cls.ipsecpolicy = cls.create_ipsecpolicy(
data_utils.rand_name("ipsec-policy-"))
def _delete_ike_policy(self, ike_policy_id):
# Deletes a ike policy and verifies if it is deleted or not
ike_list = list()
all_ike = self.client.list_ikepolicies()
for ike in all_ike['ikepolicies']:
ike_list.append(ike['id'])
if ike_policy_id in ike_list:
self.client.delete_ikepolicy(ike_policy_id)
# Asserting that the policy is not found in list after deletion
ikepolicies = self.client.list_ikepolicies()
ike_id_list = list()
for i in ikepolicies['ikepolicies']:
ike_id_list.append(i['id'])
self.assertNotIn(ike_policy_id, ike_id_list)
def _delete_ipsec_policy(self, ipsec_policy_id):
# Deletes an ike policy if it exists
try:
self.client.delete_ipsecpolicy(ipsec_policy_id)
except exceptions.NotFound:
pass
def _assertExpected(self, expected, actual):
# Check if not expected keys/values exists in actual response body
for key, value in expected.iteritems():
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
def _delete_vpn_service(self, vpn_service_id):
self.client.delete_vpnservice(vpn_service_id)
# Asserting if vpn service is found in the list after deletion
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertNotIn(vpn_service_id, vpn_services)
def _get_tenant_id(self):
"""
Returns the tenant_id of the client current user
"""
# TODO(jroovers) This is a temporary workaround to get the tenant_id
# of the the current client. Replace this once tenant_isolation for
# neutron is fixed.
body = self.client.show_network(self.network['id'])
return body['network']['tenant_id']
@test.attr(type='smoke')
def test_admin_create_ipsec_policy_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create IPSec policy for the newly created tenant
name = data_utils.rand_name('ipsec-policy')
body = (self.admin_client.
create_ipsecpolicy(name=name, tenant_id=tenant_id))
ipsecpolicy = body['ipsecpolicy']
self.assertIsNotNone(ipsecpolicy['id'])
self.addCleanup(self.admin_client.delete_ipsecpolicy,
ipsecpolicy['id'])
# Assert that created ipsec policy is found in API list call
body = self.client.list_ipsecpolicies()
ipsecpolicies = [policy['id'] for policy in body['ipsecpolicies']]
self.assertIn(ipsecpolicy['id'], ipsecpolicies)
@test.attr(type='smoke')
def test_admin_create_vpn_service_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create vpn service for the newly created tenant
network2 = self.create_network()
subnet2 = self.create_subnet(network2)
router2 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router2['id'], subnet2['id'])
name = data_utils.rand_name('vpn-service')
body = self.admin_client.create_vpnservice(
subnet_id=subnet2['id'],
router_id=router2['id'],
name=name,
admin_state_up=True,
tenant_id=tenant_id)
vpnservice = body['vpnservice']
self.assertIsNotNone(vpnservice['id'])
self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
# Assert that created vpnservice is found in API list call
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertIn(vpnservice['id'], vpn_services)
@test.attr(type='smoke')
def test_admin_create_ike_policy_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create IKE policy for the newly created tenant
name = data_utils.rand_name('ike-policy')
body = (self.admin_client.
create_ikepolicy(name=name, ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1",
tenant_id=tenant_id))
ikepolicy = body['ikepolicy']
self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
# Assert that created ike policy is found in API list call
body = self.client.list_ikepolicies()
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
self.assertIn(ikepolicy['id'], ikepolicies)
@test.attr(type='smoke')
def test_list_vpn_services(self):
# Verify the VPN service exists in the list of all VPN services
body = self.client.list_vpnservices()
vpnservices = body['vpnservices']
self.assertIn(self.vpnservice['id'], [v['id'] for v in vpnservices])
@test.attr(type='smoke')
def test_create_update_delete_vpn_service(self):
# Creates a VPN service and sets up deletion
network1 = self.create_network()
subnet1 = self.create_subnet(network1)
router1 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router1['id'], subnet1['id'])
name = data_utils.rand_name('vpn-service1')
body = self.client.create_vpnservice(subnet_id=subnet1['id'],
router_id=router1['id'],
name=name,
admin_state_up=True)
vpnservice = body['vpnservice']
self.addCleanup(self._delete_vpn_service, vpnservice['id'])
# Assert if created vpnservices are not found in vpnservices list
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertIsNotNone(vpnservice['id'])
self.assertIn(vpnservice['id'], vpn_services)
# TODO(raies): implement logic to update vpnservice
# VPNaaS client function to update is implemented.
# But precondition is that current state of vpnservice
# should be "ACTIVE" not "PENDING*"
@test.attr(type='smoke')
def test_show_vpn_service(self):
# Verifies the details of a vpn service
body = self.client.show_vpnservice(self.vpnservice['id'])
vpnservice = body['vpnservice']
self.assertEqual(self.vpnservice['id'], vpnservice['id'])
self.assertEqual(self.vpnservice['name'], vpnservice['name'])
self.assertEqual(self.vpnservice['description'],
vpnservice['description'])
self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
valid_status = ["ACTIVE", "DOWN", "BUILD", "ERROR", "PENDING_CREATE",
"PENDING_UPDATE", "PENDING_DELETE"]
self.assertIn(vpnservice['status'], valid_status)
@test.attr(type='smoke')
def test_list_ike_policies(self):
# Verify the ike policy exists in the list of all IKE policies
body = self.client.list_ikepolicies()
ikepolicies = body['ikepolicies']
self.assertIn(self.ikepolicy['id'], [i['id'] for i in ikepolicies])
@test.attr(type='smoke')
def test_create_update_delete_ike_policy(self):
# Creates a IKE policy
name = data_utils.rand_name('ike-policy')
body = (self.client.create_ikepolicy(
name=name,
ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1"))
ikepolicy = body['ikepolicy']
self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
# Update IKE Policy
new_ike = {'name': data_utils.rand_name("New-IKE"),
'description': "Updated ike policy",
'encryption_algorithm': "aes-256",
'ike_version': "v2",
'pfs': "group14",
'lifetime': {'units': "seconds", 'value': 2000}}
self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
# Confirm that update was successful by verifying using 'show'
body = self.client.show_ikepolicy(ikepolicy['id'])
ike_policy = body['ikepolicy']
for key, value in new_ike.iteritems():
self.assertIn(key, ike_policy)
self.assertEqual(value, ike_policy[key])
# Verification of ike policy delete
self.client.delete_ikepolicy(ikepolicy['id'])
body = self.client.list_ikepolicies()
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
self.assertNotIn(ike_policy['id'], ikepolicies)
@test.attr(type='smoke')
def test_show_ike_policy(self):
# Verifies the details of a ike policy
body = self.client.show_ikepolicy(self.ikepolicy['id'])
ikepolicy = body['ikepolicy']
self.assertEqual(self.ikepolicy['id'], ikepolicy['id'])
self.assertEqual(self.ikepolicy['name'], ikepolicy['name'])
self.assertEqual(self.ikepolicy['description'],
ikepolicy['description'])
self.assertEqual(self.ikepolicy['encryption_algorithm'],
ikepolicy['encryption_algorithm'])
self.assertEqual(self.ikepolicy['auth_algorithm'],
ikepolicy['auth_algorithm'])
self.assertEqual(self.ikepolicy['tenant_id'],
ikepolicy['tenant_id'])
self.assertEqual(self.ikepolicy['pfs'],
ikepolicy['pfs'])
self.assertEqual(self.ikepolicy['phase1_negotiation_mode'],
ikepolicy['phase1_negotiation_mode'])
self.assertEqual(self.ikepolicy['ike_version'],
ikepolicy['ike_version'])
@test.attr(type='smoke')
def test_list_ipsec_policies(self):
# Verify the ipsec policy exists in the list of all ipsec policies
body = self.client.list_ipsecpolicies()
ipsecpolicies = body['ipsecpolicies']
self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
@test.attr(type='smoke')
def test_create_update_delete_ipsec_policy(self):
# Creates an ipsec policy
ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
'pfs': 'group5',
'encryption_algorithm': "aes-128",
'auth_algorithm': 'sha1'}
resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
ipsecpolicy = resp_body['ipsecpolicy']
self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
self._assertExpected(ipsec_policy_body, ipsecpolicy)
# Verification of ipsec policy update
new_ipsec = {'description': 'Updated ipsec policy',
'pfs': 'group2',
'name': data_utils.rand_name("New-IPSec"),
'encryption_algorithm': "aes-256",
'lifetime': {'units': "seconds", 'value': '2000'}}
body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
**new_ipsec)
updated_ipsec_policy = body['ipsecpolicy']
self._assertExpected(new_ipsec, updated_ipsec_policy)
# Verification of ipsec policy delete
self.client.delete_ipsecpolicy(ipsecpolicy['id'])
self.assertRaises(exceptions.NotFound,
self.client.delete_ipsecpolicy, ipsecpolicy['id'])
@test.attr(type='smoke')
def test_show_ipsec_policy(self):
# Verifies the details of an ipsec policy
body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
ipsecpolicy = body['ipsecpolicy']
self._assertExpected(self.ipsecpolicy, ipsecpolicy)
|
|
# Import statements
import sys
sys.path.append("/home/pi/Documents/Robots/slcypi/MA") ### ADD PATH
sys.path.append("/home/pi/Documents/Robots/slcypi/HAT_Python3") ### ADD PATH
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Tank import Tank
from ImageAnalysis import ImageAnalysis
import picamera
import picamera.array
import time
import pygame
from scipy import ndimage
from time import sleep
# Settings
WIDTH = 320
HEIGHT = 240
# Initialize Tank
robot = Tank()
robot.correctDirections(True,True,True)
# Initialize ImageAnalysis
IA = ImageAnalysis()
IA.filterLower = np.array([25,35,70])
IA.filterUpper = np.array([65,255,205])
# Initialize Pygame
pygame.init()
pygame.display.set_caption('My Robot')
screen = pygame.display.set_mode((WIDTH,HEIGHT),0)
# Start settings
auto = False
done = False
viewOptions = ["noFilter","colorFilter","lineDetection"]
viewNr = 1
startTime = time.time()
def toggleView(viewNr):
viewNr = viewNr + 1
if viewNr > 2:
viewNr = 0
return(viewNr)
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (WIDTH, HEIGHT)
while done == False:
# Image capture
camera.capture(stream, 'bgr', use_video_port=True)
bgr = stream.array
# Image process
res, mask = IA.colorFilter(bgr, False, False)
if viewOptions[viewNr] == "noFilter":
res = bgr
if viewOptions[viewNr] == "lineDetection":
res = IA.edgeDetection(bgr)
# Image transpose
res = cv2.transpose(res)
mask = np.transpose(mask)
# Image display
sface = pygame.surfarray.make_surface(res)
screen.blit(sface,(0,0))
pygame.display.update()
# User events
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
# Exit on escape
if (event.key == pygame.K_ESCAPE):
done = True
# View toggle
if event.key == (pygame.K_v):
viewNr = toggleView(viewNr)
# Drive commands
if event.key == (pygame.K_UP):
robot.driveSync(1)
if event.key == (pygame.K_DOWN):
robot.driveSync(-1)
if (event.key == pygame.K_LEFT):
robot.rotateSync(1,45)
if (event.key == pygame.K_RIGHT):
robot.rotateSync(-1,45)
if (event.key == pygame.K_q):
auto = True
if (event.key == pygame.K_w):
auto = False
robot.driveSync(0)
robot.rotateSync(0)
if (event.key == pygame.K_7):
IA.filterUpper[0] = IA.filterUpper[0] + 5
print(IA.filterUpper)
if (event.key == pygame.K_u):
IA.filterUpper[0] = IA.filterUpper[0] - 5
print(IA.filterUpper)
if (event.key == pygame.K_j):
IA.filterLower[0] = IA.filterLower[0] + 5
print(IA.filterLower)
if (event.key == pygame.K_m):
IA.filterLower[0] = IA.filterLower[0] - 5
print(IA.filterLower)
if (event.key == pygame.K_8):
IA.filterUpper[1] = IA.filterUpper[1] + 5
print(IA.filterUpper)
if (event.key == pygame.K_i):
IA.filterUpper[1] = IA.filterUpper[1] - 5
print(IA.filterUpper)
if (event.key == pygame.K_k):
IA.filterLower[1] = IA.filterLower[1] + 5
print(IA.filterLower)
if (event.key == pygame.K_COMMA):
IA.filterLower[1] = IA.filterLower[1] - 5
print(IA.filterLower)
if (event.key == pygame.K_9):
IA.filterUpper[2] = IA.filterUpper[2] + 5
print(IA.filterUpper)
if (event.key == pygame.K_o):
IA.filterUpper[2] = IA.filterUpper[2] - 5
print(IA.filterUpper)
if (event.key == pygame.K_l):
IA.filterLower[2] = IA.filterLower[2] + 5
print(IA.filterLower)
if (event.key == pygame.K_PERIOD):
IA.filterLower[2] = IA.filterLower[2] - 5
print(IA.filterLower)
if event.type == pygame.KEYUP:
if event.key == (pygame.K_UP):
robot.driveSync(0)
if event.key == (pygame.K_DOWN):
robot.driveSync(0)
if (event.key == pygame.K_LEFT):
robot.rotateSync(0)
if (event.key == pygame.K_RIGHT):
robot.rotateSync(0)
# Autonomous
if auto == True:
# Analyze line
aRes = IA.blockAnalyze(mask)
print(aRes)
dir = aRes[0]
count = aRes[1]
# Drive
if abs(dir) > 0.20:
rotateSpeed = 60
if abs(dir) > 0.5:
rotateSpeed = 80
if dir > 0:
print("Rotate -1")
robot.rotateSync(-1, rotateSpeed)
sleep(0.05)
robot.rotateSync(0)
else:
print("Rotate 1")
robot.rotateSync(1, rotateSpeed)
sleep(0.05)
robot.rotateSync(0)
if dir > -999:
relCount = (1 - abs(dir)) * count
if count > 800:
driveSpeed = 50
if count > 10000:
driveSpeed = int(relCount / 10000 * 50)
if driveSpeed > 45 :
robot.driveSync(1, driveSpeed)
else:
robot.driveSync(0)
else:
robot.driveSync(0)
# Handle stream
stream.seek(0)
stream.truncate()
# Compute fps
lapseTime = (time.time() - startTime)
startTime = time.time()
if lapseTime > 0:
fps = 1.0 / lapseTime
print("fps: " + str(fps))
robot.stop()
pygame.quit()
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import fileinput
import os
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import urllib2, base64, httplib
from StringIO import StringIO as BytesIO
from datetime import datetime
from resource_management.core.resources.system import File, Directory, Execute
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
from resource_management.core.source import DownloadSource, InlineTemplate
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.ranger_functions import Rangeradmin
from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
from resource_management.libraries.functions.decorator import safe_retry
from resource_management.core.utils import PasswordString
from resource_management.core.shell import as_sudo
import re
import time
import socket
def password_validation(password, key):
import params
if password.strip() == "":
raise Fail("Blank password is not allowed for {0} property. Please enter valid password.".format(key))
if re.search("[\\\`'\"]",password):
raise Fail("{0} password contains one of the unsupported special characters like \" ' \ `".format(key))
else:
Logger.info("Password validated")
def setup_kms_db(stack_version=None):
import params
if params.has_ranger_admin:
kms_home = params.kms_home
version = params.version
if stack_version is not None:
kms_home = format("{stack_root}/{stack_version}/ranger-kms")
version = stack_version
password_validation(params.kms_master_key_password, 'KMS master key')
copy_jdbc_connector(stack_version=version)
env_dict = {'RANGER_KMS_HOME':kms_home, 'JAVA_HOME': params.java_home}
if params.db_flavor.lower() == 'sqla':
env_dict = {'RANGER_KMS_HOME':kms_home, 'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH':params.ld_library_path}
dba_setup = format('ambari-python-wrap {kms_home}/dba_script.py -q')
db_setup = format('ambari-python-wrap {kms_home}/db_setup.py')
if params.create_db_user:
Logger.info('Setting up Ranger KMS DB and DB User')
Execute(dba_setup, environment=env_dict, logoutput=True, user=params.kms_user, tries=5, try_sleep=10)
else:
Logger.info('Separate DBA property not set. Assuming Ranger KMS DB and DB User exists!')
Execute(db_setup, environment=env_dict, logoutput=True, user=params.kms_user, tries=5, try_sleep=10)
def setup_java_patch():
import params
if params.has_ranger_admin:
kms_home = params.kms_home
setup_java_patch = format('ambari-python-wrap {kms_home}/db_setup.py -javapatch')
env_dict = {'RANGER_KMS_HOME':kms_home, 'JAVA_HOME': params.java_home}
if params.db_flavor.lower() == 'sqla':
env_dict = {'RANGER_KMS_HOME':kms_home, 'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH':params.ld_library_path}
Execute(setup_java_patch, environment=env_dict, logoutput=True, user=params.kms_user, tries=5, try_sleep=10)
kms_lib_path = format('{kms_home}/ews/webapp/lib/')
files = os.listdir(kms_lib_path)
hadoop_jar_files = []
for x in files:
if x.startswith('hadoop-common') and x.endswith('.jar'):
hadoop_jar_files.append(x)
if len(hadoop_jar_files) != 0:
for f in hadoop_jar_files:
Execute((format('{java_home}/bin/jar'),'-uf', format('{kms_home}/ews/webapp/lib/{f}'), format('{kms_home}/ews/webapp/META-INF/services/org.apache.hadoop.crypto.key.KeyProviderFactory')),
user=params.kms_user)
File(format('{kms_home}/ews/webapp/lib/{f}'), owner=params.kms_user, group=params.kms_group)
def do_keystore_setup(cred_provider_path, credential_alias, credential_password):
import params
if cred_provider_path is not None:
java_bin = format('{java_home}/bin/java')
file_path = format('jceks://file{cred_provider_path}')
cmd = (java_bin, '-cp', params.cred_lib_path, 'org.apache.ranger.credentialapi.buildks', 'create', credential_alias, '-value', PasswordString(credential_password), '-provider', file_path)
Execute(cmd,
environment={'JAVA_HOME': params.java_home},
logoutput=True,
sudo=True,
)
File(cred_provider_path,
owner = params.kms_user,
group = params.kms_group,
mode = 0640
)
def kms(upgrade_type=None):
import params
if params.has_ranger_admin:
Directory(params.kms_conf_dir,
owner = params.kms_user,
group = params.kms_group,
create_parents = True
)
Directory("/etc/security/serverKeys",
create_parents = True,
cd_access = "a"
)
Directory("/etc/ranger/kms",
create_parents = True,
cd_access = "a"
)
copy_jdbc_connector()
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
mode = 0644,
)
cp = format("{check_db_connection_jar}")
if params.db_flavor.lower() == 'sqla':
cp = cp + os.pathsep + format("{kms_home}/ews/webapp/lib/sajdbc4.jar")
else:
path_to_jdbc = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
if not os.path.isfile(path_to_jdbc):
path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + \
params.default_connectors_map[params.db_flavor.lower()] if params.db_flavor.lower() in params.default_connectors_map else None
if not os.path.isfile(path_to_jdbc):
path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + "*"
error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.db_flavor] + \
" in ranger kms lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
Logger.error(error_message)
cp = cp + os.pathsep + path_to_jdbc
db_connection_check_command = format(
"{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_kms_jdbc_connection_url}' {db_user} {db_password!p} {ranger_kms_jdbc_driver}")
env_dict = {}
if params.db_flavor.lower() == 'sqla':
env_dict = {'LD_LIBRARY_PATH':params.ld_library_path}
Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10, environment=env_dict)
if params.xa_audit_db_is_enabled and params.driver_source is not None and not params.driver_source.endswith("/None"):
if params.xa_previous_jdbc_jar and os.path.isfile(params.xa_previous_jdbc_jar):
File(params.xa_previous_jdbc_jar, action='delete')
File(params.downloaded_connector_path,
content = DownloadSource(params.driver_source),
mode = 0644
)
Execute(('cp', '--remove-destination', params.downloaded_connector_path, params.driver_target),
path=["/bin", "/usr/bin/"],
sudo=True)
File(params.driver_target, mode=0644)
Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF', 'classes', 'lib'),
mode=0755,
owner=params.kms_user,
group=params.kms_group
)
Execute(('cp',format('{kms_home}/ranger-kms-initd'),'/etc/init.d/ranger-kms'),
not_if=format('ls /etc/init.d/ranger-kms'),
only_if=format('ls {kms_home}/ranger-kms-initd'),
sudo=True)
File('/etc/init.d/ranger-kms',
mode = 0755
)
Directory(format('{kms_home}/'),
owner = params.kms_user,
group = params.kms_group,
recursive_ownership = True,
)
Directory(params.ranger_kms_pid_dir,
mode=0755,
owner = params.kms_user,
group = params.user_group,
cd_access = "a",
create_parents=True
)
if params.stack_supports_pid:
File(format('{kms_conf_dir}/ranger-kms-env-piddir.sh'),
content = format("export RANGER_KMS_PID_DIR_PATH={ranger_kms_pid_dir}\nexport KMS_USER={kms_user}"),
owner = params.kms_user,
group = params.kms_group,
mode=0755
)
Directory(params.kms_log_dir,
owner = params.kms_user,
group = params.kms_group,
cd_access = 'a',
create_parents=True,
mode=0755
)
File(format('{kms_conf_dir}/ranger-kms-env-logdir.sh'),
content = format("export RANGER_KMS_LOG_DIR={kms_log_dir}"),
owner = params.kms_user,
group = params.kms_group,
mode=0755
)
Execute(('ln','-sf', format('{kms_home}/ranger-kms'),'/usr/bin/ranger-kms'),
not_if=format('ls /usr/bin/ranger-kms'),
only_if=format('ls {kms_home}/ranger-kms'),
sudo=True)
File('/usr/bin/ranger-kms', mode = 0755)
Execute(('ln','-sf', format('{kms_home}/ranger-kms'),'/usr/bin/ranger-kms-services.sh'),
not_if=format('ls /usr/bin/ranger-kms-services.sh'),
only_if=format('ls {kms_home}/ranger-kms'),
sudo=True)
File('/usr/bin/ranger-kms-services.sh', mode = 0755)
Execute(('ln','-sf', format('{kms_home}/ranger-kms-initd'),format('{kms_home}/ranger-kms-services.sh')),
not_if=format('ls {kms_home}/ranger-kms-services.sh'),
only_if=format('ls {kms_home}/ranger-kms-initd'),
sudo=True)
File(format('{kms_home}/ranger-kms-services.sh'), mode = 0755)
Directory(params.kms_log_dir,
owner = params.kms_user,
group = params.kms_group,
mode = 0775
)
do_keystore_setup(params.credential_provider_path, params.jdbc_alias, params.db_password)
do_keystore_setup(params.credential_provider_path, params.masterkey_alias, params.kms_master_key_password)
if params.stack_support_kms_hsm and params.enable_kms_hsm:
do_keystore_setup(params.credential_provider_path, params.hms_partition_alias, unicode(params.hms_partition_passwd))
if params.stack_supports_ranger_kms_ssl and params.ranger_kms_ssl_enabled:
do_keystore_setup(params.ranger_kms_cred_ssl_path, params.ranger_kms_ssl_keystore_alias, params.ranger_kms_ssl_passwd)
# remove plain-text password from xml configs
dbks_site_copy = {}
dbks_site_copy.update(params.config['configurations']['dbks-site'])
for prop in params.dbks_site_password_properties:
if prop in dbks_site_copy:
dbks_site_copy[prop] = "_"
XmlConfig("dbks-site.xml",
conf_dir=params.kms_conf_dir,
configurations=dbks_site_copy,
configuration_attributes=params.config['configuration_attributes']['dbks-site'],
owner=params.kms_user,
group=params.kms_group,
mode=0644
)
ranger_kms_site_copy = {}
ranger_kms_site_copy.update(params.config['configurations']['ranger-kms-site'])
if params.stack_supports_ranger_kms_ssl:
# remove plain-text password from xml configs
for prop in params.ranger_kms_site_password_properties:
if prop in ranger_kms_site_copy:
ranger_kms_site_copy[prop] = "_"
XmlConfig("ranger-kms-site.xml",
conf_dir=params.kms_conf_dir,
configurations=ranger_kms_site_copy,
configuration_attributes=params.config['configuration_attributes']['ranger-kms-site'],
owner=params.kms_user,
group=params.kms_group,
mode=0644
)
XmlConfig("kms-site.xml",
conf_dir=params.kms_conf_dir,
configurations=params.config['configurations']['kms-site'],
configuration_attributes=params.config['configuration_attributes']['kms-site'],
owner=params.kms_user,
group=params.kms_group,
mode=0644
)
File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
owner=params.kms_user,
group=params.kms_group,
content=InlineTemplate(params.kms_log4j),
mode=0644
)
if params.security_enabled:
# core-site.xml linking required by setup for HDFS encryption
XmlConfig("core-site.xml",
conf_dir=params.kms_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.kms_user,
group=params.kms_group,
mode=0644
)
else:
File(format('{kms_conf_dir}/core-site.xml'), action="delete")
def copy_jdbc_connector(stack_version=None):
import params
if params.jdbc_jar_name is None and params.driver_curl_source.endswith("/None"):
error_message = "Error! Sorry, but we can't find jdbc driver related to {0} database to download from {1}. \
Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'".format(params.db_flavor, params.jdk_location)
Logger.error(error_message)
if params.driver_curl_source and not params.driver_curl_source.endswith("/None"):
if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
File(params.previous_jdbc_jar, action='delete')
kms_home = params.kms_home
if stack_version is not None:
kms_home = format("{stack_root}/{stack_version}/ranger-kms")
driver_curl_target = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source),
mode = 0644
)
Directory(os.path.join(kms_home, 'ews', 'lib'),
mode=0755
)
if params.db_flavor.lower() == 'sqla':
Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir), sudo = True)
Execute(('cp', '--remove-destination', params.jar_path_in_archive, os.path.join(kms_home, 'ews', 'webapp', 'lib')),
path=["/bin", "/usr/bin/"],
sudo=True)
Directory(params.jdbc_libs_dir,
cd_access="a",
create_parents=True)
Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
path=["/bin", "/usr/bin/"])
File(os.path.join(kms_home, 'ews', 'webapp', 'lib', 'sajdbc4.jar'), mode=0644)
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, os.path.join(kms_home, 'ews', 'webapp', 'lib')),
path=["/bin", "/usr/bin/"],
sudo=True)
File(os.path.join(kms_home, 'ews', 'webapp', 'lib', params.jdbc_jar_name), mode=0644)
ModifyPropertiesFile(format("{kms_home}/install.properties"),
properties = params.config['configurations']['kms-properties'],
owner = params.kms_user
)
if params.db_flavor.lower() == 'sqla':
ModifyPropertiesFile(format("{kms_home}/install.properties"),
properties = {'SQL_CONNECTOR_JAR': format('{kms_home}/ews/webapp/lib/sajdbc4.jar')},
owner = params.kms_user,
)
else:
ModifyPropertiesFile(format("{kms_home}/install.properties"),
properties = {'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
owner = params.kms_user,
)
def enable_kms_plugin():
import params
if params.has_ranger_admin:
ranger_flag = False
if params.stack_supports_ranger_kerberos and params.security_enabled:
if not is_empty(params.rangerkms_principal) and params.rangerkms_principal != '':
ranger_flag = check_ranger_service_support_kerberos(params.kms_user, params.rangerkms_keytab, params.rangerkms_principal)
else:
ranger_flag = check_ranger_service_support_kerberos(params.kms_user, params.spengo_keytab, params.spnego_principal)
else:
ranger_flag = check_ranger_service()
if not ranger_flag:
Logger.error('Error in Get/Create service for Ranger Kms.')
current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
File(format('{kms_conf_dir}/ranger-security.xml'),
owner = params.kms_user,
group = params.kms_group,
mode = 0644,
content = format('<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>')
)
Directory([os.path.join('/etc', 'ranger', params.repo_name), os.path.join('/etc', 'ranger', params.repo_name, 'policycache')],
owner = params.kms_user,
group = params.kms_group,
mode=0775,
create_parents = True
)
File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',format('kms_{repo_name}.json')),
owner = params.kms_user,
group = params.kms_group,
mode = 0644
)
# remove plain-text password from xml configs
plugin_audit_properties_copy = {}
plugin_audit_properties_copy.update(params.config['configurations']['ranger-kms-audit'])
if params.plugin_audit_password_property in plugin_audit_properties_copy:
plugin_audit_properties_copy[params.plugin_audit_password_property] = "crypted"
XmlConfig("ranger-kms-audit.xml",
conf_dir=params.kms_conf_dir,
configurations=plugin_audit_properties_copy,
configuration_attributes=params.config['configuration_attributes']['ranger-kms-audit'],
owner=params.kms_user,
group=params.kms_group,
mode=0744)
XmlConfig("ranger-kms-security.xml",
conf_dir=params.kms_conf_dir,
configurations=params.config['configurations']['ranger-kms-security'],
configuration_attributes=params.config['configuration_attributes']['ranger-kms-security'],
owner=params.kms_user,
group=params.kms_group,
mode=0744)
# remove plain-text password from xml configs
ranger_kms_policymgr_ssl_copy = {}
ranger_kms_policymgr_ssl_copy.update(params.config['configurations']['ranger-kms-policymgr-ssl'])
for prop in params.kms_plugin_password_properties:
if prop in ranger_kms_policymgr_ssl_copy:
ranger_kms_policymgr_ssl_copy[prop] = "crypted"
XmlConfig("ranger-policymgr-ssl.xml",
conf_dir=params.kms_conf_dir,
configurations=ranger_kms_policymgr_ssl_copy,
configuration_attributes=params.config['configuration_attributes']['ranger-kms-policymgr-ssl'],
owner=params.kms_user,
group=params.kms_group,
mode=0744)
if params.xa_audit_db_is_enabled:
cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'auditDBCred', '-v', PasswordString(params.xa_audit_db_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'sslKeyStore', '-v', PasswordString(params.ssl_keystore_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'sslTrustStore', '-v', PasswordString(params.ssl_truststore_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
File(params.credential_file,
owner = params.kms_user,
group = params.kms_group,
mode = 0640
)
# create ranger kms audit directory
if params.xa_audit_hdfs_is_enabled and params.has_namenode and params.has_hdfs_client_on_node:
params.HdfsResource("/ranger/audit",
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
group=params.hdfs_user,
mode=0755,
recursive_chmod=True
)
params.HdfsResource("/ranger/audit/kms",
type="directory",
action="create_on_execute",
owner=params.kms_user,
group=params.kms_group,
mode=0750,
recursive_chmod=True
)
params.HdfsResource(None, action="execute")
if params.xa_audit_hdfs_is_enabled and len(params.namenode_host) > 1:
Logger.info('Audit to Hdfs enabled in NameNode HA environment, creating hdfs-site.xml')
XmlConfig("hdfs-site.xml",
conf_dir=params.kms_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.kms_user,
group=params.kms_group,
mode=0644
)
else:
File(format('{kms_conf_dir}/hdfs-site.xml'), action="delete")
def setup_kms_jce():
import params
if params.jce_name is not None:
Directory(params.jce_source_dir,
create_parents = True
)
jce_target = format('{jce_source_dir}/{jce_name}')
File(jce_target,
content = DownloadSource(format('{jdk_location}/{jce_name}')),
mode = 0644,
)
File([format("{java_home}/jre/lib/security/local_policy.jar"), format("{java_home}/jre/lib/security/US_export_policy.jar")],
action = "delete",
)
unzip_cmd = ("unzip", "-o", "-j", "-q", jce_target, "-d", format("{java_home}/jre/lib/security"))
Execute(unzip_cmd,
only_if = format("test -e {java_home}/jre/lib/security && test -f {jce_target}"),
path = ['/bin/','/usr/bin'],
sudo = True
)
else:
Logger.warning("Required jce policy zip is not available, need to setup manually")
def check_ranger_service():
import params
policymgr_mgr_url = params.policymgr_mgr_url
if policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url)
ambari_username_password_for_ranger = format("{ambari_ranger_admin}:{ambari_ranger_password}")
response_code = ranger_adm_obj.check_ranger_login_urllib2(policymgr_mgr_url)
if response_code is not None and response_code == 200:
user_resp_code = ranger_adm_obj.create_ambari_admin_user(params.ambari_ranger_admin, params.ambari_ranger_password, params.admin_uname_password)
if user_resp_code is not None and user_resp_code == 200:
get_repo_flag = get_repo(policymgr_mgr_url, params.repo_name, ambari_username_password_for_ranger)
if not get_repo_flag:
return create_repo(policymgr_mgr_url, json.dumps(params.kms_ranger_plugin_repo), ambari_username_password_for_ranger)
else:
return True
else:
return False
else:
Logger.error('Ranger service is not reachable')
return False
@safe_retry(times=5, sleep_time=8, backoff_factor=1.5, err_class=Fail, return_on_fail=False)
def create_repo(url, data, usernamepassword):
try:
base_url = url + '/service/public/v2/api/service'
base64string = base64.encodestring('{0}'.format(usernamepassword)).replace('\n', '')
headers = {
'Accept': 'application/json',
"Content-Type": "application/json"
}
request = urllib2.Request(base_url, data, headers)
request.add_header("Authorization", "Basic {0}".format(base64string))
result = urllib2.urlopen(request, timeout=20)
response_code = result.getcode()
response = json.loads(json.JSONEncoder().encode(result.read()))
if response_code == 200:
Logger.info('Repository created Successfully')
return True
else:
Logger.info('Repository not created')
return False
except urllib2.URLError, e:
if isinstance(e, urllib2.HTTPError):
raise Fail("Error creating service. Http status code - {0}. \n {1}".format(e.code, e.read()))
else:
raise Fail("Error creating service. Reason - {0}.".format(e.reason))
except socket.timeout as e:
raise Fail("Error creating service. Reason - {0}".format(e))
@safe_retry(times=5, sleep_time=8, backoff_factor=1.5, err_class=Fail, return_on_fail=False)
def get_repo(url, name, usernamepassword):
try:
base_url = url + '/service/public/v2/api/service?serviceName=' + name + '&serviceType=kms&isEnabled=true'
request = urllib2.Request(base_url)
base64string = base64.encodestring(usernamepassword).replace('\n', '')
request.add_header("Content-Type", "application/json")
request.add_header("Accept", "application/json")
request.add_header("Authorization", "Basic {0}".format(base64string))
result = urllib2.urlopen(request, timeout=20)
response_code = result.getcode()
response = json.loads(result.read())
if response_code == 200 and len(response) > 0:
for repo in response:
if repo.get('name').lower() == name.lower() and repo.has_key('name'):
Logger.info('KMS repository exist')
return True
else:
Logger.info('KMS repository doesnot exist')
return False
else:
Logger.info('KMS repository doesnot exist')
return False
except urllib2.URLError, e:
if isinstance(e, urllib2.HTTPError):
raise Fail("Error getting {0} service. Http status code - {1}. \n {2}".format(name, e.code, e.read()))
else:
raise Fail("Error getting {0} service. Reason - {1}.".format(name, e.reason))
except socket.timeout as e:
raise Fail("Error creating service. Reason - {0}".format(e))
def check_ranger_service_support_kerberos(user, keytab, principal):
import params
policymgr_mgr_url = params.policymgr_mgr_url
if policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url)
response_code = ranger_adm_obj.check_ranger_login_curl(user, keytab, principal, policymgr_mgr_url, True)
if response_code is not None and response_code[0] == 200:
get_repo_name_response = ranger_adm_obj.get_repository_by_name_curl(user, keytab, principal, params.repo_name, 'kms', 'true', is_keyadmin = True)
if get_repo_name_response is not None:
Logger.info('KMS repository {0} exist'.format(get_repo_name_response['name']))
return True
else:
create_repo_response = ranger_adm_obj.create_repository_curl(user, keytab, principal, params.repo_name, json.dumps(params.kms_ranger_plugin_repo), None, is_keyadmin = True)
if create_repo_response is not None and len(create_repo_response) > 0:
return True
else:
return False
else:
Logger.error('Ranger service is not reachable')
return False
def update_password_configs():
import params
ModifyPropertiesFile(format("{kms_home}/install.properties"),
properties = {'db_root_password': '_', 'db_password': '_', 'KMS_MASTER_KEY_PASSWD': '_', 'REPOSITORY_CONFIG_PASSWORD': '_'},
owner = params.kms_user,
)
|
|
import numpy as np
import time
import sys
import subprocess
import os
import random
from NCRFAE.neurocrf_autoencoder_EM import *
import gzip
import cPickle
from NCRFAE.utils import *
import argparse
def run(parsed_args):
lfilename = parsed_args.labeled_set
per_labeled = parsed_args.percent_labeled
per_unlabeled = parsed_args.percent_unlabeled
em_weight_unlabeled = parsed_args.em_weight_unlabeled
lg_head = lfilename.split('/')[1]
folder = lg_head + '_semi_EM' + '_results'
if not os.path.exists(folder):
os.mkdir(folder)
# load the dataset
f = gzip.open(lfilename,'rb')
train_set, valid_set, test_set, dic = cPickle.load(f)
print lfilename + " loaded."
idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())
idx2char = dict((k, v) for v, k in dic['chars2idx'].iteritems())
idx2vec = dic['idx2vec']
train_lex, train_char, train_y = train_set
valid_lex, valid_char, valid_y = valid_set
test_lex, test_char, test_y = test_set
vocsize = len(idx2vec)
charsize = len(idx2char)
# number of classes
n_classes = len(idx2label)
# print n_classes
char_embeddingdim = parsed_args.char_emb_dim
embeddingdim = parsed_args.emb_dimension
hiddensize = parsed_args.hiddensize
char_hiddensize = parsed_args.char_hiddensize
randomseed = parsed_args.seed
windowsize = parsed_args.context_win
dropout_rate = parsed_args.dropout_rate
# initialize a random number generator
rng = np.random.RandomState(randomseed)
# word embeddings
if parsed_args.random_emb:
# add one for PADDING at the end or beginning(the dummy word) ; word vectors are parameters as well
embeddings = 0.2 * rng.uniform(-1.0, 1.0, (vocsize+1, embeddingdim)).astype(np.float32)
else:
# using Mikolov's embeddings
embeddings = np.zeros((vocsize+1, embeddingdim), dtype=np.float32)
for idx, value in idx2vec.iteritems():
embeddings[idx] = value
# char embeddings
char_embeddings = 0.2 * rng.uniform(-1.0, 1.0, (charsize+1, char_embeddingdim)).astype(np.float32)
# instanciate the model
classifier = CRF_Auto_Encoder(rng, embeddings, char_embeddings, hiddensize, char_hiddensize, embeddingdim, char_embeddingdim, windowsize, n_classes, vocsize+1, dropout_rate = dropout_rate)
classifier.compile()
# semi-supervised learning starting from here
training_idxs = np.arange(len(train_lex))
# train with early stopping on validation set
best_res = -np.inf # infinity
#divide the training set into labeled data and unlabeled data
n_threshold_labeled = len(train_lex)/100*per_labeled
n_threshold_unlabeled = n_threshold_labeled + len(train_lex)/100*per_unlabeled
#initialize parameters of decoder by using labeled dataset
temp_theta_table = np.zeros((classifier.n_classes, classifier.dic_size))
for idx, i in enumerate(training_idxs):
if i < n_threshold_labeled:
for x, y in zip(train_lex[i], train_y[i]): # x, y are indices of word, label
temp_theta_table[y, x] += 1
temp_theta_table = npsoftmax(temp_theta_table)
classifier.decoder_update_func(temp_theta_table)
for e in xrange(parsed_args.nepochs):
# shuffle
rng.shuffle(training_idxs)
current_epoch = e
# training the encoder
tic = time.time()
for idx, i in enumerate(training_idxs):
trainx = contextwin(train_lex[i], windowsize)
trainx_char = contextwin_char(train_char[i], windowsize)
trainy = train_y[i]
if i < n_threshold_labeled:
cost_value, predicted_value = classifier.train_xy_func(trainx, trainx_char, trainy)
elif i >= n_threshold_labeled and i < n_threshold_unlabeled:
cost_value, predicted_value = classifier.train_x_func(trainx, trainx_char)
else:
continue
if parsed_args.verbose:
print '[Semi-supervised learning] per %2.2f%% epoch %i >> %2.2f%%' % (1*per_labeled, e, (idx+1)*100./len(train_lex)), 'completed in %.2f (sec) <<\r' % (time.time()-tic),
sys.stdout.flush()
new_theta_table = np.zeros((classifier.n_classes, classifier.dic_size))
# directly optimize the decoder
for idx, i in enumerate(training_idxs):
if i < n_threshold_labeled:
for x, y in zip(train_lex[i], train_y[i]): # x, y are indices of word, label
new_theta_table[y, x] += 1
elif i >= n_threshold_labeled and i < n_threshold_unlabeled:
trainx = contextwin(train_lex[i], windowsize)
trainx_char = contextwin_char(train_char[i], windowsize)
alpha_table, beta_table, Z = classifier.forward_backward_func(trainx, trainx_char)
for t in xrange(train_lex[i].shape[0]):
expected_count = alpha_table[t] * beta_table[t] / Z * em_weight_unlabeled
v_id = train_lex[i][t]
new_theta_table[:,v_id] += expected_count
else:
continue
new_theta_table = npsoftmax(new_theta_table)
classifier.decoder_update_func(new_theta_table)
# evaluation // back into the real world : id -> words
# validation
tic = time.time()
predictions_valid = []
for i in xrange(len(valid_lex)):
validx = contextwin(valid_lex[i], windowsize)
validx_char = contextwin_char(valid_char[i], windowsize)
temp = classifier.infer_func(validx, validx_char).astype(np.int32)
validpred = temp[1:-1]
predictions_valid.append(map(lambda u: idx2label[u], validpred))
if parsed_args.verbose:
print '[Testing on validation set] per %2.2f%% epoch %i >> %2.2f%%' % (1*per_labeled, e, (i+1)*100./len(valid_lex)), 'completed in %.2f (sec) <<\r' % (time.time()-tic),
sys.stdout.flush()
groundtruth_valid = [map(lambda u: idx2label[u], y) for y in valid_y]
words_valid = [map(lambda u: idx2word[u], w) for w in valid_lex]
# compute the accuracy using pos
res_valid = poseval(predictions_valid, groundtruth_valid, words_valid, folder + '/' + str(per_labeled) + '_current.valid.txt')
if res_valid['wordacc'] > best_res:
# testing
tic = time.time()
predictions_test = []
for i in xrange(len(test_lex)):
testx = contextwin(test_lex[i], windowsize)
testx_char = contextwin_char(test_char[i], windowsize)
temp = classifier.infer_func(testx, testx_char).astype(np.int32) # a list of integers
testpred = temp[1:-1]
predictions_test.append(map(lambda u: idx2label[u], testpred))
if parsed_args.verbose:
print '[Testing on testing set] per %2.2f%% epoch %i >> %2.2f%%' % (1*per_labeled, e, (i+1)*100./len(test_lex)), 'completed in %.2f (sec) <<\r' % (time.time()-tic),
sys.stdout.flush()
groundtruth_test = [map(lambda u: idx2label[u], y) for y in test_y]
words_test = [map(lambda u: idx2word[u], w) for w in test_lex]
res_test = poseval(predictions_test, groundtruth_test, words_test, folder + '/' + str(per_labeled) + '_current.test.txt')
classifier.save(folder, '_' + str(current_epoch) + '.model')
best_res = res_valid['wordacc']
if parsed_args.verbose:
print 'NEW BEST: epoch', e, 'valid acc', res_valid['wordacc'], 'best test acc', res_test['wordacc'], ' '*20
print ''
vsacc, vwacc = res_valid['sentenceacc'], res_valid['wordacc']
tsacc, twacc = res_test['sentenceacc'], res_test['wordacc']
best_epoch = e
subprocess.call(['mv', folder + '/' + str(per_labeled) + '_current.valid.txt', folder + '/' + str(per_labeled) + '_best.valid.txt'])
print("semi-supervised")
subprocess.call(['mv', folder + '/' + str(per_labeled) + '_current.test.txt', folder + '/' + str(per_labeled) + '_best.test.txt'])
print 'BEST RESULT: epoch', best_epoch, 'with the model', folder, 'with percent of labeled data', per_labeled, 'percent of un-labeled data', per_unlabeled
print 'valid word accuracy', vwacc, 'best test word accuracy', twacc
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("--labeled_set", type=str, help="The labeled dataset")
argparser.add_argument("--percent_labeled", type=int, default=50, help="Percentage of labeled data")
argparser.add_argument("--percent_unlabeled", type=int, default=50, help="Percentage of unlabeled data")
argparser.add_argument("--em_weight_unlabeled", type=float, default=1.00, help="weight for unlabled data in EM")
argparser.add_argument("--verbose", type=bool, default=False, help="Verbose output")
argparser.add_argument("--seed", type=int, default=2017, help="Set up the random seed")
argparser.add_argument("--random_emb", type=bool, default=False, help="Use the randomized word embedding")
argparser.add_argument("--emb_dimension", type=int, default=200, help="Word embedding dimension")
argparser.add_argument("--char_emb_dim", type=int, default=15, help="Char embedding dimension")
argparser.add_argument("--context_win", type=int, default=3, help="Context window size")
argparser.add_argument("--hiddensize", type=int, default=20, help="Number of nodes in the hidden layer")
argparser.add_argument("--char_hiddensize", type=int, default=20, help="Number of nodes in the hidden layer for char layer")
argparser.add_argument("--nepochs", type=int, default=25, help="Maximum number of epochs")
argparser.add_argument("--dropout_rate", type=float, default=0.5, help="Dropout rate for the dropout layer")
parsed_args = argparser.parse_args()
run(parsed_args)
|
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
import struct
import sys
import tempfile
import unittest
import six
import pytest
from mock import Mock, patch
from behave.formatter._registry import make_formatters
from behave.formatter import pretty
from behave.formatter.base import StreamOpener
from behave.model import Tag, Feature, Scenario, Step
from behave.model_core import Status
from behave.matchers import Match
class TestGetTerminalSize(unittest.TestCase):
def setUp(self):
try:
self.ioctl_patch = patch("fcntl.ioctl")
self.ioctl = self.ioctl_patch.start()
except ImportError:
self.ioctl_patch = None
self.ioctl = None
self.zero_struct = struct.pack("HHHH", 0, 0, 0, 0)
def tearDown(self):
if self.ioctl_patch:
self.ioctl_patch.stop()
def test_windows_fallback(self): # pylint: disable=no-self-use
platform = sys.platform
sys.platform = "windows"
assert pretty.get_terminal_size() == (80, 24)
sys.platform = platform
def test_termios_fallback(self): # pylint: disable=no-self-use
try:
import termios
return
except ImportError:
pass
assert pretty.get_terminal_size() == (80, 24)
def test_exception_in_ioctl(self):
try:
import termios
except ImportError:
return
def raiser(*args, **kwargs): # pylint: disable=unused-argument
raise Exception("yeehar!")
self.ioctl.side_effect = raiser
assert pretty.get_terminal_size() == (80, 24)
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def test_happy_path(self):
try:
import termios
except ImportError:
return
self.ioctl.return_value = struct.pack("HHHH", 17, 23, 5, 5)
assert pretty.get_terminal_size() == (23, 17)
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def test_zero_size_fallback(self):
try:
import termios
except ImportError:
return
self.ioctl.return_value = self.zero_struct
assert pretty.get_terminal_size() == (80, 24)
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def _tf():
"""Open a temp file that looks a bunch like stdout."""
if six.PY3:
# in python3 it's got an encoding and accepts new-style strings
return tempfile.TemporaryFile(mode="w", encoding="UTF-8")
# pre-python3 it's not got an encoding and accepts encoded data
# (old-style strings)
return tempfile.TemporaryFile(mode="w")
class FormatterTests(unittest.TestCase):
formatter_name = "plain" # SANE DEFAULT, overwritten by concrete classes
def setUp(self):
self.config = Mock()
self.config.color = True
self.config.outputs = [StreamOpener(stream=sys.stdout)]
self.config.format = [self.formatter_name]
_line = 0
@property
def line(self):
self._line += 1
return self._line
def _formatter(self, file_object, config): # pylint: disable=no-self-use
stream_opener = StreamOpener(stream=file_object)
f = make_formatters(config, [stream_opener])[0]
f.uri("<string>")
return f
def _feature(self, keyword=u"k\xe9yword", name=u"name", tags=None,
location=u"location", # pylint: disable=unused-argument
description=None, scenarios=None, background=None):
if tags is None:
tags = [u"spam", u"ham"]
if description is None:
description = [u"description"]
if scenarios is None:
scenarios = []
line = self.line
tags = [Tag(name, line) for name in tags]
return Feature("<string>", line, keyword, name, tags=tags,
description=description, scenarios=scenarios,
background=background)
def _scenario(self, keyword=u"k\xe9yword", name=u"name", tags=None, steps=None):
if tags is None:
tags = []
if steps is None:
steps = []
line = self.line
tags = [Tag(name, line) for name in tags]
return Scenario("<string>", line, keyword, name, tags=tags, steps=steps)
def _step(self, keyword=u"k\xe9yword", step_type="given", name=u"name",
text=None, table=None):
line = self.line
return Step("<string>", line, keyword, step_type, name, text=text,
table=table)
def _match(self, arguments=None): # pylint: disable=no-self-use
def dummy():
pass
return Match(dummy, arguments)
def test_feature(self):
# this test does not actually check the result of the formatting; it
# just exists to make sure that formatting doesn't explode in the face of
# unicode and stuff
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
def test_scenario(self):
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
s = self._scenario()
p.scenario(s)
def test_step(self):
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
scenario = self._scenario()
p.scenario(scenario)
s = self._step()
p.step(s)
p.match(self._match([]))
s.status = Status.passed
p.result(s)
class TestPretty(FormatterTests):
formatter_name = "pretty"
class TestPlain(FormatterTests):
formatter_name = "plain"
class TestJson(FormatterTests):
formatter_name = "json"
class TestTagsCount(FormatterTests):
formatter_name = "tags"
def test_tag_counts(self):
p = self._formatter(_tf(), self.config)
s = self._scenario(tags=[u"ham", u"foo"])
f = self._feature(scenarios=[s]) # feature.tags= ham, spam
p.feature(f)
p.scenario(s)
assert p.tag_counts == {"ham": [f, s], "spam": [f], "foo": [s]}
class MultipleFormattersTests(FormatterTests):
formatters = []
def setUp(self):
self.config = Mock()
self.config.color = True
self.config.outputs = [StreamOpener(stream=sys.stdout)
for i in self.formatters]
self.config.format = self.formatters
def _formatters(self, file_object, config): # pylint: disable=no-self-use
stream_opener = StreamOpener(stream=file_object)
formatters = make_formatters(config, [stream_opener])
for f in formatters:
f.uri("<string>")
return formatters
def test_feature(self):
# this test does not actually check the result of the formatting; it
# just exists to make sure that formatting doesn't explode in the face of
# unicode and stuff
formatters = self._formatters(_tf(), self.config)
f = self._feature()
for p in formatters:
p.feature(f)
def test_scenario(self):
formatters = self._formatters(_tf(), self.config)
f = self._feature()
for p in formatters:
p.feature(f)
s = self._scenario()
p.scenario(s)
def test_step(self):
formatters = self._formatters(_tf(), self.config)
f = self._feature()
for p in formatters:
p.feature(f)
scenario = self._scenario()
p.scenario(scenario)
s = self._step()
p.step(s)
p.match(self._match([]))
s.status = Status.passed
p.result(s)
class TestPrettyAndPlain(MultipleFormattersTests):
formatters = ["pretty", "plain"]
class TestPrettyAndJSON(MultipleFormattersTests):
formatters = ["pretty", "json"]
class TestJSONAndPlain(MultipleFormattersTests):
formatters = ["json", "plain"]
|
|
from django.test import TestCase
from ..models import TaxSaveInputs, WorkerNodesCounter
from ..models import convert_to_floats
from ..helpers import (expand_1D, expand_2D, expand_list, package_up_vars,
format_csv, arrange_totals_by_row, default_taxcalc_data)
from ...taxbrain import compute as compute
from ..views import convert_val
import taxcalc
from taxcalc import Policy
import pytest
FBY = 2015
@pytest.mark.django_db
def test_compute():
assert compute
compute.DROPQ_WORKERS = [1,2,3,4,5,6,7,8,9,10]
compute.NUM_BUDGET_YEARS = 5
wnc, created = WorkerNodesCounter.objects.get_or_create(singleton_enforce=1)
dropq_worker_offset = wnc.current_offset
hostnames = compute.DROPQ_WORKERS[dropq_worker_offset:
dropq_worker_offset + compute.NUM_BUDGET_YEARS]
assert hostnames == [1,2,3,4,5]
wnc.current_offset = (dropq_worker_offset + compute.NUM_BUDGET_YEARS) % len(compute.DROPQ_WORKERS)
wnc.save()
assert wnc.current_offset == 5
dropq_worker_offset = wnc.current_offset
hostnames = compute.DROPQ_WORKERS[dropq_worker_offset:
dropq_worker_offset + compute.NUM_BUDGET_YEARS]
assert hostnames == [6,7,8,9,10]
wnc.current_offset = (dropq_worker_offset + compute.NUM_BUDGET_YEARS) % len(compute.DROPQ_WORKERS)
wnc.save()
assert wnc.current_offset == 0
dropq_worker_offset = wnc.current_offset
hostnames = compute.DROPQ_WORKERS[dropq_worker_offset:
dropq_worker_offset+ compute.NUM_BUDGET_YEARS]
assert hostnames == [1,2,3,4,5]
#Reset to original values
compute.DROPQ_WORKERS = ['localhost:5050']
wnc.current_offset = 0
wnc.save()
compute.NUM_BUDGET_YEARS = 2
def test_convert_val():
field = u'*,*,130000'
out = [convert_val(x) for x in field.split(',')]
exp = ['*', '*', 130000.0]
assert out == exp
field = u'False'
out = [convert_val(x) for x in field.split(',')]
exp = [False]
assert out == exp
field = u'0.12,0.13,0.14'
out = [convert_val(x) for x in field.split(',')]
exp = [0.12, 0.13, 0.14]
assert out == exp
def cycler(max):
count = 0
while True:
yield count
count = (count + 1) % max
class TaxInputTests(TestCase):
def test_convert(self):
values = {"II_brk2_0": [36000., 38000., 40000.],
"II_brk2_1": [72250., 74000.],
"II_brk2_2": [36500.]
}
ans = package_up_vars(values, first_budget_year=FBY)
pp = Policy(start_year=2013)
pp.set_year(FBY)
# irates are rates for 2015, 2016, and 2017
irates = pp.indexing_rates_for_update(param_name='II_brk2', calyear=FBY,
num_years_to_expand=3)
# User choices propagate through to all future years
# The user has specified part of the parameter up to 2017.
# So, we choose to fill in the propagated value, which is
# either inflated or not.
f2_2016 = int(36500 * (1.0 + irates[0]))
f3_2016 = int(50200 * (1.0 + irates[0]))
f4_2016 = int(74900 * (1.0 + irates[0]))
f5_2016 = int(37450 * (1.0 + irates[0]))
f1_2017 = int(74000 * (1.0 + irates[1]))
f2_2017 = int(f2_2016 * (1.0 + irates[1]))
exp = [[36000, 72250, 36500, 50200, 74900, 37450],
[38000, 74000, f2_2016, 50400, 75300, 37650],
[40000, f1_2017, f2_2017, None, None, None]]
assert ans['_II_brk2'] == exp
assert len(ans) == 1
def test_package_up_vars_wildcards(self):
values = {"AMT_tthd": ['*','*',204000.]}
ans = package_up_vars(values, first_budget_year=FBY)
exp = [185400., 186300., 204000.]
assert ans['_AMT_tthd'] == exp
def test_package_up_vars_CTC(self):
values = {"CTC_c": [2000.0]}
ans = package_up_vars(values, first_budget_year=FBY)
exp = [2000.0]
assert ans['_CTC_c'] == exp
def test_package_up_vars_with_cpi(self):
values = {"CTC_c_cpi": True}
ans = package_up_vars(values, first_budget_year=FBY)
assert ans == {'_CTC_c_cpi': True}
def test_convert_4_budget_years(self):
values = {"II_brk2_0": [36000., 38000., 40000., 41000],
"II_brk2_1": [72250., 74000.],
"II_brk2_2": [36500.]
}
ans = package_up_vars(values, first_budget_year=FBY)
pp = Policy(start_year=2013)
pp.set_year(FBY)
# irates are rates for 2015, 2016, and 2017
irates = pp.indexing_rates_for_update(param_name='II_brk2', calyear=FBY,
num_years_to_expand=4)
# User choices propagate through to all future years
# The user has specified part of the parameter up to 2017.
# So, we choose to fill in the propagated value, which is
# either inflated or not.
f2_2016 = int(36500 * (1.0 + irates[0]))
f3_2016 = int(50200 * (1.0 + irates[0]))
f4_2016 = int(74900 * (1.0 + irates[0]))
f5_2016 = int(37450 * (1.0 + irates[0]))
f1_2017 = int(74000 * (1.0 + irates[1]))
f2_2017 = int(f2_2016 * (1.0 + irates[1]))
f1_2018 = int(f1_2017 * (1.0 + irates[2]))
f2_2018 = int(f2_2017 * (1.0 + irates[2]))
exp = [[36000, 72250, 36500, 50200, 74900, 37450],
[38000, 74000, f2_2016, 50400, 75300, 37650],
[40000, f1_2017, f2_2017, None, None, None],
[41000, f1_2018, f2_2018, None, None, None]]
assert ans['_II_brk2'] == exp
def test_convert_multiple_items(self):
values = {"II_brk2_0": [36000., 38000., 40000., 41000],
"II_brk2_1": [72250., 74000.],
"II_brk2_2": [36500.]
}
values['II_em'] = [4000]
ans = package_up_vars(values, first_budget_year=FBY)
defaults = taxcalc.policy.Policy.default_data(start_year=FBY)
pp = Policy(start_year=2013)
pp.set_year(FBY)
# irates are rates for 2015, 2016, and 2017
irates = pp.indexing_rates_for_update(param_name='II_brk2', calyear=FBY,
num_years_to_expand=4)
# User choices propagate through to all future years
# The user has specified part of the parameter up to 2017.
# So, we choose to fill in the propagated value, which is
# either inflated or not.
f2_2016 = int(36500 * (1.0 + irates[0]))
f3_2016 = int(50200 * (1.0 + irates[0]))
f4_2016 = int(74900 * (1.0 + irates[0]))
f5_2016 = int(37450 * (1.0 + irates[0]))
f1_2017 = int(74000 * (1.0 + irates[1]))
f2_2017 = int(f2_2016 * (1.0 + irates[1]))
f1_2018 = int(f1_2017 * (1.0 + irates[2]))
f2_2018 = int(f2_2017 * (1.0 + irates[2]))
exp = [[36000, 72250, 36500, 50200, 74900, 37450],
[38000, 74000, f2_2016, 50400, 75300, 37650],
[40000, f1_2017, f2_2017, None, None, None],
[41000, f1_2018, f2_2018, None, None, None]]
assert ans['_II_brk2'] == exp
# For scalar parameter values, we still have that all user
# choices propagate up through whatever is specified as
# a default. We know that _II_em is specified up to 2016, so
# package up vars needs to overwrite those default and return
# 2015 and 2016 values
exp_em = [4000, int(4000 *(1 + irates[0]))]
assert ans['_II_em'] == exp_em
assert len(ans) == 2
def test_convert_non_cpi_inflated(self):
values = {"FEI_ec_c": [100000.]}
ans = package_up_vars(values, first_budget_year=FBY)
defaults = taxcalc.policy.Policy.default_data(start_year=2015)
pp = Policy(start_year=2013)
pp.set_year(FBY)
# irates are rates for 2015, 2016, and 2017
irates = pp.indexing_rates_for_update(param_name='FEI_ec_c', calyear=FBY,
num_years_to_expand=2)
# User choices propagate through to all future years
# The user has specified the parameter just for 2015, but
# the defaults JSON file has values up to 2016. We should
# give back values up to 2016, with user choice propagating
f2_2016 = 100000
exp = [100000, f2_2016]
assert ans['_FEI_ec_c'] == exp
def test_package_up_eitc(self):
values = {'EITC_rt_2': [0.44], 'EITC_rt_0': [0.08415], 'EITC_rt_1': [0.374, 0.39],
'EITC_rt_3': [0.495], 'EITC_prt_1': [0.17578],
'EITC_prt_0': [0.08415, 0.09], 'EITC_prt_3': [0.23166],
'EITC_prt_2': [0.23166]}
ans = package_up_vars(values, first_budget_year=FBY)
assert ans == {'_EITC_rt': [[0.08415, 0.374, 0.44, 0.495],
[0.08415, 0.39, 0.44, 0.495]],
'_EITC_prt': [[0.08415, 0.17578, 0.23166, 0.23166],
[0.09, 0.17578, 0.23166, 0.23166]]}
def test_package_up_vars_Behavioral_params(self):
user_values = {'FICA_ss_trt': [0.11],
'BE_inc': [0.04]}
ans = package_up_vars(user_values, first_budget_year=FBY)
assert ans['_BE_inc'] == [0.04]
def test_package_up_vars_multi_year(self):
user_values = {'SS_Earnings_c': [118500, 999999]}
ans = package_up_vars(user_values, first_budget_year=2016)
assert ans['_SS_Earnings_c'] == [118500.0, 999999.0]
def test_expand1d(self):
x = [1, 2, 3]
assert expand_1D(x, 5) == [1, 2, 3, None, None]
def test_expand2d(self):
x = [[1, 2, 3], [4, 5, 6]]
exp = [[1, 2, 3], [4, 5, 6], [None, None, None]]
assert expand_2D(x, 3) == exp
def test_expand_list_1(self):
x = [1, 2, 3]
assert expand_list(x, 5) == [1, 2, 3, None, None]
def test_expand2d(self):
x = [[1, 2, 3], [4, 5, 6]]
exp = [[1, 2, 3], [4, 5, 6], [None, None, None]]
assert expand_list(x, 3) == exp
def test_format_csv(self):
c = cycler(40)
tab_types = [u'mY_bin', u'mX_bin', u'mY_dec', u'mX_dec', u'df_dec',
u'df_bin', u'fiscal_tots']
bin_keys = [u'thirty_forty_2', u'thirty_forty_0', u'thirty_forty_1',
u'seventyfive_hundred_2',
u'forty_fifty_2', u'forty_fifty_1', u'forty_fifty_0',
u'ten_twenty_2',
u'ten_twenty_0', u'ten_twenty_1', u'hundred_twohundred_0',
u'hundred_twohundred_1',
u'seventyfive_hundred_1', u'seventyfive_hundred_0',
u'twenty_thirty_0', u'twenty_thirty_1', u'twenty_thirty_2',
u'fifty_seventyfive_2', u'fifty_seventyfive_1',
u'fifty_seventyfive_0', u'twohundred_fivehundred_2',
u'twohundred_fivehundred_0', u'twohundred_fivehundred_1',
u'thousand_up_2', u'thousand_up_0', u'thousand_up_1',
u'less_than_10_2', u'fivehundred_thousand_2',
u'fivehundred_thousand_0', u'fivehundred_thousand_1',
u'hundred_twohundred_2', u'less_than_10_1', u'less_than_10_0',
u'all_1', u'all_0', u'all_2']
dec_keys = [u'perc20-30_0', u'perc20-30_1', u'perc20-30_2', u'perc50-60_0',
u'perc50-60_1', u'perc50-60_2', u'perc40-50_0', u'perc40-50_1',
u'perc40-50_2', u'perc90-100_0', u'perc90-100_1',
u'perc90-100_2', u'perc30-40_0', u'perc30-40_1',
u'perc30-40_2', u'perc0-10_1', u'perc0-10_0', u'perc0-10_2',
u'perc70-80_0', u'perc70-80_1', u'perc70-80_2', u'all_1',
u'all_0', u'all_2', u'perc80-90_0', u'perc80-90_1',
u'perc80-90_2', u'perc10-20_0', u'perc10-20_1', u'perc10-20_2',
u'perc60-70_0', u'perc60-70_1', u'perc60-70_2']
tot_keys = [u'combined_tax', u'ind_tax', u'payroll_tax']
tax_results = {}
tax_results[u'fiscal_tots'] = {k:[1,2,3] for k in tot_keys}
tax_results[u'mY_bin'] = { k:[next(c)] for k in bin_keys}
tax_results[u'mX_bin'] = { k:[next(c)] for k in bin_keys}
tax_results[u'df_bin'] = { k:[next(c)] for k in bin_keys}
tax_results[u'mY_dec'] = { k:[next(c)] for k in dec_keys}
tax_results[u'mX_dec'] = { k:[next(c)] for k in dec_keys}
tax_results[u'df_dec'] = { k:[next(c)] for k in dec_keys}
ans = format_csv(tax_results, u'42', first_budget_year=FBY)
assert ans[0] == ['#URL: http://www.ospc.org/taxbrain/42/']
def test_arrange_totals_by_row(self):
total_row_names = ["ind_tax", "payroll_tax", "combined_tax"]
tots = {'ind_tax_0': "1", 'ind_tax_1': "2", 'ind_tax_2': "3",
'payroll_tax_0': "4", 'payroll_tax_1': "5", 'payroll_tax_2': "6",
'combined_tax_0': "7", 'combined_tax_1': "8", 'combined_tax_2': "9"}
ans = arrange_totals_by_row(tots, total_row_names)
exp = {'ind_tax': ["1", "2", "3"], 'payroll_tax': ["4", "5", "6"], 'combined_tax': ["7", "8", "9"]}
assert ans == exp
def test_default_taxcalc_data(self):
import math
dd = default_taxcalc_data(taxcalc.policy.Policy, start_year=2017)
dd_raw = taxcalc.policy.Policy.default_data(start_year=2017)
dd_meta = default_taxcalc_data(taxcalc.policy.Policy, start_year=2017, metadata=True)
floored_std_aged = list(map(math.floor, dd['_STD_Aged'][0]))
assert dd['_STD_Aged'] == [floored_std_aged]
assert dd_meta['_STD_Aged']['value'] == [floored_std_aged]
floored_ii_em_ps = list(map(math.floor, dd['_II_em_ps'][0]))
assert dd['_II_em_ps'] == [floored_ii_em_ps]
assert dd_meta['_II_em_ps']['value'] == [floored_ii_em_ps]
floored_ii_em = [math.floor(dd['_II_em'][0])]
assert dd['_II_em'] == floored_ii_em
assert dd_meta['_II_em']['value'] == floored_ii_em
assert dd_raw['_II_rt6'] == dd['_II_rt6']
|
|
"""
Searching for names with given scope and name. This is very central in Jedi and
Python. The name resolution is quite complicated with descripter,
``__getattribute__``, ``__getattr__``, ``global``, etc.
If you want to understand name resolution, please read the first few chapters
in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/.
Flow checks
+++++++++++
Flow checks are not really mature. There's only a check for ``isinstance``. It
would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
check for -> a is a string). There's big potential in these checks.
"""
from jedi.parser.python import tree
from jedi import debug
from jedi.common import unite
from jedi import settings
from jedi.evaluate import representation as er
from jedi.evaluate.instance import AbstractInstanceContext
from jedi.evaluate import compiled
from jedi.evaluate import pep0484
from jedi.evaluate import iterable
from jedi.evaluate import imports
from jedi.evaluate import analysis
from jedi.evaluate import flow_analysis
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate.filters import get_global_filters
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
class NameFinder(object):
def __init__(self, evaluator, context, name_context, name_or_str, position=None):
self._evaluator = evaluator
# Make sure that it's not just a syntax tree node.
self._context = context
self._name_context = name_context
self._name = name_or_str
if isinstance(name_or_str, tree.Name):
self._string_name = name_or_str.value
else:
self._string_name = name_or_str
self._position = position
self._found_predefined_types = None
@debug.increase_indent
def find(self, filters, attribute_lookup):
"""
:params bool attribute_lookup: Tell to logic if we're accessing the
attribute or the contents of e.g. a function.
"""
names = self.filter_name(filters)
if self._found_predefined_types is not None and names:
check = flow_analysis.reachability_check(
self._context, self._context.tree_node, self._name)
if check is flow_analysis.UNREACHABLE:
return set()
return self._found_predefined_types
types = self._names_to_types(names, attribute_lookup)
if not names and not types \
and not (isinstance(self._name, tree.Name) and
isinstance(self._name.parent.parent, tree.Param)):
if isinstance(self._name, tree.Name):
if attribute_lookup:
analysis.add_attribute_error(
self._name_context, self._context, self._name)
else:
message = ("NameError: name '%s' is not defined."
% self._string_name)
analysis.add(self._name_context, 'name-error', self._name, message)
return types
def _get_origin_scope(self):
if isinstance(self._name, tree.Name):
scope = self._name
while scope.parent is not None:
# TODO why if classes?
if not isinstance(scope, tree.Scope):
break
scope = scope.parent
return scope
else:
return None
def get_filters(self, search_global=False):
origin_scope = self._get_origin_scope()
if search_global:
return get_global_filters(self._evaluator, self._context, self._position, origin_scope)
else:
return self._context.get_filters(search_global, self._position, origin_scope=origin_scope)
def filter_name(self, filters):
"""
Searches names that are defined in a scope (the different
``filters``), until a name fits.
"""
names = []
if self._context.predefined_names:
# TODO is this ok? node might not always be a tree.Name
node = self._name
while node is not None and not node.is_scope():
node = node.parent
if node.type in ("if_stmt", "for_stmt", "comp_for"):
try:
name_dict = self._context.predefined_names[node]
types = name_dict[self._string_name]
except KeyError:
continue
else:
self._found_predefined_types = types
break
for filter in filters:
names = filter.get(self._name)
if names:
break
debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self._string_name,
self._context, names, self._position)
return list(names)
def _check_getattr(self, inst):
"""Checks for both __getattr__ and __getattribute__ methods"""
# str is important, because it shouldn't be `Name`!
name = compiled.create(self._evaluator, self._string_name)
# This is a little bit special. `__getattribute__` is in Python
# executed before `__getattr__`. But: I know no use case, where
# this could be practical and where Jedi would return wrong types.
# If you ever find something, let me know!
# We are inversing this, because a hand-crafted `__getattribute__`
# could still call another hand-crafted `__getattr__`, but not the
# other way around.
names = (inst.get_function_slot_names('__getattr__') or
inst.get_function_slot_names('__getattribute__'))
return inst.execute_function_slots(names, name)
def _names_to_types(self, names, attribute_lookup):
types = set()
types = unite(name.infer() for name in names)
debug.dbg('finder._names_to_types: %s -> %s', names, types)
if not names and isinstance(self._context, AbstractInstanceContext):
# handling __getattr__ / __getattribute__
return self._check_getattr(self._context)
# Add isinstance and other if/assert knowledge.
if not types and isinstance(self._name, tree.Name) and \
not isinstance(self._name_context, AbstractInstanceContext):
flow_scope = self._name
base_node = self._name_context.tree_node
if base_node.type == 'comp_for':
return types
while True:
flow_scope = flow_scope.get_parent_scope(include_flows=True)
n = _check_flow_information(self._name_context, flow_scope,
self._name, self._position)
if n is not None:
return n
if flow_scope == base_node:
break
return types
def _name_to_types(evaluator, context, tree_name):
types = []
node = tree_name.get_definition()
typ = node.type
if typ == 'for_stmt':
types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
if types:
return types
if typ == 'with_stmt':
types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
if types:
return types
if typ in ('for_stmt', 'comp_for'):
try:
types = context.predefined_names[node][tree_name.value]
except KeyError:
cn = ContextualizedNode(context, node.children[3])
for_types = iterable.py__iter__types(evaluator, cn.infer(), cn)
c_node = ContextualizedName(context, tree_name)
types = check_tuple_assignments(evaluator, c_node, for_types)
elif typ == 'expr_stmt':
types = _remove_statements(evaluator, context, node, tree_name)
elif typ == 'with_stmt':
types = context.eval_node(node.node_from_name(tree_name))
elif typ in ('import_from', 'import_name'):
types = imports.infer_import(context, tree_name)
elif typ in ('funcdef', 'classdef'):
types = _apply_decorators(evaluator, context, node)
elif typ == 'global_stmt':
context = evaluator.create_context(context, tree_name)
finder = NameFinder(evaluator, context, context, str(tree_name))
filters = finder.get_filters(search_global=True)
# For global_stmt lookups, we only need the first possible scope,
# which means the function itself.
filters = [next(filters)]
types += finder.find(filters, attribute_lookup=False)
elif typ == 'try_stmt':
# TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to
# the static analysis report.
exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
types = unite(
evaluator.execute(t, param.ValuesArguments([]))
for t in exceptions
)
else:
raise ValueError("Should not happen.")
return types
def _apply_decorators(evaluator, context, node):
"""
Returns the function, that should to be executed in the end.
This is also the places where the decorators are processed.
"""
if node.type == 'classdef':
decoratee_context = er.ClassContext(
evaluator,
parent_context=context,
classdef=node
)
else:
decoratee_context = er.FunctionContext(
evaluator,
parent_context=context,
funcdef=node
)
initial = values = set([decoratee_context])
for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values)
dec_values = context.eval_node(dec.children[1])
trailer_nodes = dec.children[2:-1]
if trailer_nodes:
# Create a trailer and evaluate it.
trailer = tree.PythonNode('trailer', trailer_nodes)
trailer.parent = dec
dec_values = evaluator.eval_trailer(context, dec_values, trailer)
if not len(dec_values):
debug.warning('decorator not found: %s on %s', dec, node)
return initial
values = unite(dec_value.execute(param.ValuesArguments([values]))
for dec_value in dec_values)
if not len(values):
debug.warning('not possible to resolve wrappers found %s', node)
return initial
debug.dbg('decorator end %s', values)
return values
def _remove_statements(evaluator, context, stmt, name):
"""
This is the part where statements are being stripped.
Due to lazy evaluation, statements like a = func; b = a; b() have to be
evaluated.
"""
types = set()
check_instance = None
pep0484types = \
pep0484.find_type_from_comment_hint_assign(context, stmt, name)
if pep0484types:
return pep0484types
types |= context.eval_stmt(stmt, seek_name=name)
if check_instance is not None:
# class renames
types = set([er.get_instance_el(evaluator, check_instance, a, True)
if isinstance(a, er.Function) else a for a in types])
return types
def _check_flow_information(context, flow, search_name, pos):
""" Try to find out the type of a variable just with the information that
is given by the flows: e.g. It is also responsible for assert checks.::
if isinstance(k, str):
k. # <- completion here
ensures that `k` is a string.
"""
if not settings.dynamic_flow_information:
return None
result = None
if flow.is_scope():
# Check for asserts.
module_node = flow.get_root_node()
try:
names = module_node.used_names[search_name.value]
except KeyError:
return None
names = reversed([
n for n in names
if flow.start_pos <= n.start_pos < (pos or flow.end_pos)
])
for name in names:
ass = tree.search_ancestor(name, 'assert_stmt')
if ass is not None:
result = _check_isinstance_type(context, ass.assertion(), search_name)
if result is not None:
return result
if flow.type in ('if_stmt', 'while_stmt'):
potential_ifs = [c for c in flow.children[1::4] if c != ':']
for if_test in reversed(potential_ifs):
if search_name.start_pos > if_test.end_pos:
return _check_isinstance_type(context, if_test, search_name)
return result
def _check_isinstance_type(context, element, search_name):
try:
assert element.type in ('power', 'atom_expr')
# this might be removed if we analyze and, etc
assert len(element.children) == 2
first, trailer = element.children
assert first.type == 'name' and first.value == 'isinstance'
assert trailer.type == 'trailer' and trailer.children[0] == '('
assert len(trailer.children) == 3
# arglist stuff
arglist = trailer.children[1]
args = param.TreeArguments(context.evaluator, context, arglist, trailer)
param_list = list(args.unpack())
# Disallow keyword arguments
assert len(param_list) == 2
(key1, lazy_context_object), (key2, lazy_context_cls) = param_list
assert key1 is None and key2 is None
call = helpers.call_of_leaf(search_name)
is_instance_call = helpers.call_of_leaf(lazy_context_object.data)
# Do a simple get_code comparison. They should just have the same code,
# and everything will be all right.
assert is_instance_call.get_code(normalized=True) == call.get_code(normalized=True)
except AssertionError:
return None
result = set()
for cls_or_tup in lazy_context_cls.infer():
if isinstance(cls_or_tup, iterable.AbstractSequence) and \
cls_or_tup.array_type == 'tuple':
for lazy_context in cls_or_tup.py__iter__():
for context in lazy_context.infer():
result |= context.execute_evaluated()
else:
result |= cls_or_tup.execute_evaluated()
return result
def check_tuple_assignments(evaluator, contextualized_name, types):
"""
Checks if tuples are assigned.
"""
lazy_context = None
for index, node in contextualized_name.assignment_indexes():
cn = ContextualizedNode(contextualized_name.context, node)
iterated = iterable.py__iter__(evaluator, types, cn)
for _ in range(index + 1):
try:
lazy_context = next(iterated)
except StopIteration:
# We could do this with the default param in next. But this
# would allow this loop to run for a very long time if the
# index number is high. Therefore break if the loop is
# finished.
return set()
types = lazy_context.infer()
return types
|
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
""" IMPORTS """
import urllib3
import traceback
from typing import List, Dict, Optional, Tuple, Generator
# disable insecure warnings
urllib3.disable_warnings()
INTEGRATION_NAME = "Cofense Feed"
_RESULTS_PER_PAGE = 50 # Max for Cofense is 100
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
class Client(BaseClient):
"""Implements class for miners of Cofense feed over http/https."""
available_fields = ["all", "malware"]
cofense_to_indicator = {
"IPv4 Address": FeedIndicatorType.IP,
"Domain Name": FeedIndicatorType.Domain,
"URL": FeedIndicatorType.URL,
"Email": FeedIndicatorType.Email,
}
def __init__(
self,
url: str,
auth: Tuple[str, str],
threat_type: Optional[str] = None,
verify: bool = False,
proxy: bool = False,
read_time_out: Optional[float] = 120.0,
tags: list = [],
tlp_color: Optional[str] = None
):
"""Constructor of Client and BaseClient
Arguments:
url {str} -- url for Cofense feed
auth {Tuple[str, str]} -- (username, password)
Keyword Arguments:
threat_type {Optional[str]} -- One of available_fields (default: {None})
verify {bool} -- Should verify certificate. (default: {False})
proxy {bool} -- Should use proxy. (default: {False})
read_time_out {int} -- Read time out in seconds. (default: {30})
tags {list} -- A list of tags to add to the feed.
tlp_color {str} -- Traffic Light Protocol color.
"""
self.read_time_out = read_time_out
self.threat_type = (
threat_type if threat_type in self.available_fields else "all"
)
# Request related attributes
self.suffix = "/apiv1/threat/search/"
self.tags = tags
self.tlp_color = tlp_color
super().__init__(url, verify=verify, proxy=proxy, auth=auth)
def _http_request(self, *args, **kwargs) -> dict:
if "timeout" not in kwargs:
kwargs["timeout"] = (5.0, self.read_time_out)
return super()._http_request(*args, **kwargs)
def build_iterator(
self, begin_time: Optional[int] = None, end_time: Optional[int] = None
) -> Generator:
"""Builds an iterator from given data filtered by start and end times.
Keyword Arguments:
begin_time {Optional[str, int]} --
Where to start fetching.
Timestamp represented in unix format. (default: {None})
end_time {Optional[int]} --
Time to stop fetching (if not supplied, will be time now).
Timestamp represented in unix format. (default: {None}).
Yields:
Dict -- Threat from Cofense
"""
# if not getting now
if not end_time:
end_time = get_now()
payload = {
"beginTimestamp": str(begin_time),
"endTimestamp": str(end_time),
"threatType": self.threat_type,
"resultsPerPage": _RESULTS_PER_PAGE,
}
# For first fetch, there is only start time.
if not begin_time:
payload["beginTimestamp"] = str(end_time)
del payload["endTimestamp"]
demisto.debug(f"{INTEGRATION_NAME} - pulling {begin_time}/{end_time}")
cur_page = 0
total_pages = 1
while cur_page < total_pages:
payload["page"] = cur_page
raw_response = self._http_request("post", self.suffix, params=payload)
data = raw_response.get("data", {})
if data:
if total_pages <= 1:
# Call to get all pages.
total_pages = data.get("page", {}).get("totalPages")
if total_pages is None:
return_error('No "totalPages" in response')
demisto.debug(f"total_pages set to {total_pages}")
threats = data.get("threats", [])
for t in threats:
yield t
demisto.debug(f"{INTEGRATION_NAME} - pulling {cur_page+1}/{total_pages}. page size: {_RESULTS_PER_PAGE}")
cur_page += 1
else:
return_error(f'{INTEGRATION_NAME} - no "data" in response')
@classmethod
def _convert_block(cls, block: dict) -> Tuple[str, str]:
"""Gets a Cofense block from blockSet and enriches it.
Args:
block:
Returns:
indicator type, value
"""
indicator_type = block.get("blockType", "")
indicator_type = str(cls.cofense_to_indicator.get(indicator_type))
# Only URL indicator has inside information in data_1
if indicator_type == FeedIndicatorType.URL:
value = block.get("data_1", {}).get("url")
else:
value = block.get("data_1")
# If a domain has '*' in the value it is of type domainGlob
if indicator_type == FeedIndicatorType.Domain and '*' in value:
indicator_type = FeedIndicatorType.DomainGlob
return indicator_type, value
def process_item(self, threat: dict) -> List[dict]:
"""Gets a threat and processes them.
Arguments:
threat {dict} -- A threat from Cofense ("threats" key)
Returns:
list -- List of dicts representing indicators.
Examples:
>>> Client.process_item({"id": 123, "blockSet": [{"data_1": "ip", "blockType": "IPv4 Address"}]})
[{'value': 'ip', 'type': 'IP', 'rawJSON': \
{'data_1': 'ip', 'blockType': 'IPv4 Address', 'value': 'ip', 'type': 'IP', 'threat_id': 123}}]
"""
results = list()
block_set: List[dict] = threat.get("blockSet", [])
threat_id = threat.get("id")
for block in block_set:
indicator, value = self._convert_block(block)
block["value"] = value
block["type"] = indicator
block["threat_id"] = f'[{threat_id}]({threat.get("threatDetailURL")})'
malware_family: dict = block.get("malwareFamily", {})
ip_detail: dict = block.get("ipDetail", {})
if indicator:
indicator_obj = {
"value": value,
"type": indicator,
"rawJSON": block,
"fields": {
"tags": self.tags,
"malwarefamily": malware_family.get("familyName"),
"description": malware_family.get("description"),
"sourceoriginalseverity": block.get("impact"),
"threattypes": {
"threatcategoryconfidence": block.get("confidence"),
"threatcategory": block.get("role")
},
"geocountry": ip_detail.get("countryIsoCode"),
"geolocation": f'{ip_detail.get("latitude", "")},{ip_detail.get("longitude", "")}' if ip_detail
else "",
"asn": ip_detail.get("asn"),
"cofensefeedthreatid": f'[{threat_id}]({threat.get("threatDetailURL")})',
"cofensefeedcontinentcode": ip_detail.get("continentCode"),
"cofensefeedlookupon": ip_detail.get("lookupOn"),
"cofensefeedroledescription": block.get("roleDescription"),
"cofensefeedasnorganization": ip_detail.get("asnOrganization"),
"cofensefeedcontinentname": ip_detail.get("continentName"),
"countryname": ip_detail.get("countryName"),
"cofensefeedinfrastructuretypedescription":
block.get("infrastructureTypeSubclass", {}).get("description"),
"cofensefeedisp": ip_detail.get("isp"),
"organization": ip_detail.get("organization"),
"cofensefeedpostalcode": ip_detail.get("postalCode"),
"cofensefeedsubdivisionisocode": ip_detail.get("subdivisionIsoCode"),
"cofensefeedsubdivisionname": ip_detail.get("subdivisionName"),
"cofensefeedtimezone": ip_detail.get("timeZone")
}
}
if self.tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = self.tlp_color # type: ignore
results.append(indicator_obj)
return results
def process_file_item(self, threat: dict) -> List[dict]:
"""Gets a threat and processes them.
Arguments:
threat {dict} -- A threat from Cofense ("threats" key)
Returns:
list -- List of dicts representing File indicators.
Examples:
>>> Client.process_item({"id": 123, "executableSet": [{"md5Hex": "f57ba3e467c72bbdb44b0a65",
"fileName": "abc.exe"}]})
[{'value': 'f57ba3e467c72bbdb44b0a65', 'type': 'File', 'rawJSON': \
{'md5Hex': 'f57ba3e467c72bbdb44b0a65', 'fileName': 'abc.exe', 'value': 'f57ba3e467c72bbdb44b0a65',
'type': 'File', 'threat_id': 123}}]
"""
results = list()
file_set: List[dict] = threat.get("executableSet", [])
threat_id = threat.get("id")
for file in file_set:
file_type = file.get("type")
indicator_type = FeedIndicatorType.File
value = file.get("md5Hex")
file["value"] = value
file["type"] = indicator_type
file["threat_id"] = f'[{threat_id}]({threat.get("threatDetailURL")})'
file['impact'] = file.get("severityLevel")
malware_family: dict = file.get("malwareFamily", {})
if indicator_type:
indicator_obj = {
"value": value,
"type": indicator_type,
"rawJSON": file,
"fields": {
"tags": self.tags,
"malwarefamily": malware_family.get("familyName"),
"description": malware_family.get("description"),
"sourceoriginalseverity": file.get("severityLevel"),
"cofensefeedthreatid": f'[{threat_id}]({threat.get("threatDetailURL")})',
"cofensefeedfilename": file.get("fileName"),
"filetype": file_type,
"md5": file.get("md5Hex"),
"sha1": file.get("sha1Hex"),
"cofensefeedsha224": file.get("sha224Hex"),
"sha256": file.get("sha256Hex"),
"cofensefeedsha384": file.get("sha384Hex"),
"sha512": file.get("sha512Hex"),
"ssdeep": file.get("ssdeep"),
"fileextension": file.get("fileNameExtension"),
"cofensefeedentereddate": arg_to_datetime(file.get("dateEntered")).strftime(DATE_FORMAT) # type: ignore
if file.get("dateEntered") else None
}
}
if self.tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = self.tlp_color # type: ignore
results.append(indicator_obj)
return results
def test_module(client: Client) -> Tuple[str, dict, dict]:
"""A simple test module
Arguments:
client {Client} -- Client derives from BaseClient
Returns:
str -- "ok" if succeeded, else raises a error.
"""
for _ in client.build_iterator():
return "ok", {}, {}
return "ok", {}, {}
def fetch_indicators_command(
client: Client,
begin_time: Optional[int] = None,
end_time: Optional[int] = None,
limit: Optional[int] = None,
) -> List[Dict]:
"""Fetches the indicators from client.
Arguments:
client {Client} -- Client derives from BaseClient
Keyword Arguments:
begin_time {Optional[int]} -- Time to start fetch from (default: {None})
end_time {Optional[int]} -- Time to stop fetch to (default: {None})
limit {Optional[int]} -- Maximum amount of indicators to fetch. (default: {None})
Returns:
List[Dict] -- List of indicators from threat
"""
indicators = list()
for threat in client.build_iterator(begin_time=begin_time, end_time=end_time):
# get maximum of limit
new_indicators = client.process_item(threat)
new_file_indicators = client.process_file_item(threat)
new_indicators.extend(new_file_indicators)
indicators.extend(new_indicators)
if limit and limit < len(indicators):
indicators = indicators[:limit]
break
return indicators
def build_fetch_times(fetch_time: str, last_fetch: Optional[dict] = None) -> Tuple[int, int]:
"""Build the start and end time of the fetch session.
Args:
fetch_time: fetch time (for example: "3 days")
last_fetch: Last fetch object
Returns:
begin_time, end_time
"""
if isinstance(last_fetch, dict) and last_fetch.get("timestamp"):
begin_time = last_fetch.get("timestamp", 0) # type: int
end_time = get_now()
else: # First fetch
begin_time, end_time = parse_date_range_no_milliseconds(fetch_time)
return begin_time, end_time
def parse_date_range_no_milliseconds(from_time: str) -> Tuple[int, int]:
"""Gets a range back and return time before the string and to now.
Without milliseconds.
Args:
from_time:The date range to be parsed (required)
Returns:
start time, now
Examples:
>>> parse_date_range_no_milliseconds("3 days")
(1578729151, 1578988351)
"""
begin_time, end_time = parse_date_range(from_time, to_timestamp=True)
return int(begin_time / 1000), int(end_time / 1000)
def get_indicators_command(client: Client, args: dict) -> Tuple[str, list]:
"""Getting indicators into Demisto's incident.
Arguments:
client {Client} -- A client object
args {dict} -- Usually demisto.args()
Returns:
Tuple[str, list] -- human_readable, raw_response
"""
limit = int(args.get("limit", 10))
from_time = args.get("from_time", "3 days")
begin_time, end_time = build_fetch_times(from_time)
indicators = fetch_indicators_command(
client, begin_time=begin_time, end_time=end_time, limit=limit)
human_readable = tableToMarkdown(
f"Results from {INTEGRATION_NAME}:",
[indicator.get("rawJSON") for indicator in indicators],
["threat_id", "type", "value", "impact", "confidence", "roleDescription"],
)
return human_readable, indicators
def get_now() -> int:
"""Returns time now without milliseconds
Returns:
int -- time now without milliseconds.
"""
return int(datetime.now().timestamp() / 1000)
def main():
"""Main function
"""
params = demisto.params()
# handle params
url = "https://www.threathq.com"
credentials = params.get("credentials", {})
if not credentials:
raise DemistoException("Credentials are empty. "
"Fill up the username/password fields in the integration configuration.")
auth = (credentials.get("identifier"), credentials.get("password"))
verify = not params.get("insecure")
proxy = params.get("proxy")
threat_type = params.get("threat_type")
tags = argToList(params.get('feedTags'))
tlp_color = params.get('tlp_color')
client = Client(url, auth=auth, verify=verify, proxy=proxy, threat_type=threat_type, tags=tags, tlp_color=tlp_color)
demisto.info(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
return_outputs(*test_module(client))
elif demisto.command() == "fetch-indicators":
begin_time, end_time = build_fetch_times(params.get("fetch_time", "3 days"))
indicators = fetch_indicators_command(client, begin_time, end_time)
# Send indicators to demisto
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif demisto.command() == "cofense-get-indicators":
# dummy command for testing
readable_outputs, raw_response = get_indicators_command(client, demisto.args())
return_outputs(readable_outputs, {}, raw_response=raw_response)
except Exception as err:
return_error(f"Error in {INTEGRATION_NAME} integration:\n{str(err)}\n\nTrace:{traceback.format_exc()}")
if __name__ in ["__main__", "builtin", "builtins"]:
main()
|
|
import pytest
import io
import json
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.providers.figshare import metadata
from waterbutler.providers.figshare import provider
from waterbutler.providers.figshare.settings import PRIVATE_IDENTIFIER, MAX_PAGE_SIZE
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': '[email protected]',
'callback_url': 'http://sup.com/api/v1/project/v8s9q/waterbutler/logs/',
'id': 'fakey',
}
@pytest.fixture
def credentials():
return {
'token': 'freddie',
}
@pytest.fixture
def project_settings():
return {
'container_type': 'project',
'container_id': '13423',
}
@pytest.fixture
def article_settings():
return {
'container_type': 'article',
'container_id': '4037952',
}
@pytest.fixture
def project_provider(auth, credentials, project_settings):
return provider.FigshareProvider(auth, credentials, project_settings)
@pytest.fixture
def article_provider(auth, credentials, article_settings):
return provider.FigshareProvider(auth, credentials, article_settings)
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def list_project_articles():
return [
{ "modified_date": "2016-10-18T12:56:27Z",
"doi": "",
"title": "file_article",
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4037952",
"created_date": "2016-10-18T12:55:44Z",
"id": 4037952,
"published_date": None
},
{
"modified_date": "2016-10-18T20:47:25Z",
"doi": "",
"title": "folder_article",
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4040019",
"created_date": "2016-10-18T20:47:25Z",
"id": 4040019,
"published_date": None
}
]
@pytest.fixture
def file_article_metadata():
return {
"group_resource_id": None,
"embargo_date": None,
"citation": "Baxter, Thomas (): file_article. figshare.\n \n Retrieved: 19 20, Oct 19, 2016 (GMT)",
"embargo_reason": "",
"references": [],
"id": 4037952,
"custom_fields": [],
"size": 0,
"metadata_reason": "",
"funding": "",
"figshare_url": "https://figshare.com/articles/_/4037952",
"embargo_type": None,
"title": "file_article",
"defined_type": 3,
"is_embargoed": False,
"version": 0,
"resource_doi": None,
"confidential_reason": "",
"files": [{
"status": "available",
"is_link_only": False,
"name": "file",
"viewer_type": "",
"preview_state": "preview_not_supported",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "b3e656f8b0828a31f3ed396a1c868786",
"computed_md5": "b3e656f8b0828a31f3ed396a1c868786",
"upload_token": "878068bf-8cdb-40c9-bcf4-5d8065ac2f7d",
"upload_url": "",
"id": 6530715,
"size": 7
}],
"description": "",
"tags": [],
"created_date": "2016-10-18T12:55:44Z",
"is_active": True,
"authors": [{
"url_name": "_",
"is_active": True,
"id": 2665435,
"full_name": "Thomas Baxter",
"orcid_id": ""
}],
"is_public": False,
"categories": [],
"modified_date": "2016-10-18T12:56:27Z",
"is_confidential": False,
"doi": "",
"license": {
"url": "https://creativecommons.org/licenses/by/4.0/",
"name": "CC-BY",
"value": 1
},
"has_linked_file": False,
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4037952",
"resource_title": None,
"status": "draft",
"published_date": None,
"is_metadata_record": False
}
@pytest.fixture
def file_metadata():
return{
"status": "available",
"is_link_only": False,
"name": "file",
"viewer_type": "",
"preview_state": "preview_not_supported",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "b3e656f8b0828a31f3ed396a1c868786",
"computed_md5": "b3e656f8b0828a31f3ed396a1c868786",
"upload_token": "878068bf-8cdb-40c9-bcf4-5d8065ac2f7d",
"upload_url": "",
"id": 6530715,
"size": 7
}
@pytest.fixture
def folder_article_metadata():
return {
"group_resource_id": None,
"embargo_date": None,
"citation": "Baxter, Thomas (): folder_article. figshare.\n \n Retrieved: 19 27, Oct 19, 2016 (GMT)",
"embargo_reason": "",
"references": [],
"id": 4040019,
"custom_fields": [],
"size": 0,
"metadata_reason": "",
"funding": "",
"figshare_url": "https://figshare.com/articles/_/4040019",
"embargo_type": None,
"title": "folder_article",
"defined_type": 4,
"is_embargoed": False,
"version": 0,
"resource_doi": None,
"confidential_reason": "",
"files": [{
"status": "available",
"is_link_only": False,
"name": "folder_file.png",
"viewer_type": "image",
"preview_state": "preview_available",
"download_url": "https://ndownloader.figshare.com/files/6517539",
"supplied_md5": "",
"computed_md5": "03dee7cf60f17a8453ccd2f51cbbbd86",
"upload_token": "3f106f31-d62e-40e7-bac8-c6092392142d",
"upload_url": "",
"id": 6517539,
"size": 15584
}],
"description": "",
"tags": [],
"created_date": "2016-10-18T20:47:25Z",
"is_active": True,
"authors": [{
"url_name": "_",
"is_active": True,
"id": 2665435,
"full_name": "Thomas Baxter",
"orcid_id": ""
}],
"is_public": False,
"categories": [],
"modified_date": "2016-10-18T20:47:25Z",
"is_confidential": False,
"doi": "",
"license": {
"url": "https://creativecommons.org/licenses/by/4.0/",
"name": "CC-BY",
"value": 1
},
"has_linked_file": False,
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4040019",
"resource_title": None,
"status": "draft",
"published_date": None,
"is_metadata_record": False
}
@pytest.fixture
def folder_file_metadata():
return{
"status": "available",
"is_link_only": False,
"name": "folder_file.png",
"viewer_type": "image",
"preview_state": "preview_available",
"download_url": "https://ndownloader.figshare.com/files/6517539",
"supplied_md5": "",
"computed_md5": "03dee7cf60f17a8453ccd2f51cbbbd86",
"upload_token": "3f106f31-d62e-40e7-bac8-c6092392142d",
"upload_url": "",
"id": 6517539,
"size": 15584
}
@pytest.fixture
def create_article_metadata():
return {
"location": "https://api.figshare.com/v2/account/projects/13423/articles/4055568"
}
@pytest.fixture
def create_file_metadata():
return {
"location": "https://api.figshare.com/v2/account/articles/4055568/files/6530715"}
@pytest.fixture
def get_file_metadata():
return {
"status": "created",
"is_link_only": False,
"name": "barricade.gif",
"viewer_type": "",
"preview_state": "preview_not_available",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "",
"computed_md5": "",
"upload_token": "c9d1a465-f3f6-402c-8106-db3493942303",
"upload_url": "https://fup100310.figshare.com/upload/c9d1a465-f3f6-402c-8106-db3493942303",
"id": 6530715,
"size": 7}
@pytest.fixture
def get_upload_metadata():
return {
"token": "c9d1a465-f3f6-402c-8106-db3493942303",
"md5": "",
"size": 1071709,
"name": "6530715/barricade.gif",
"status": "PENDING",
"parts": [{
"partNo": 1,
"startOffset": 0,
"endOffset": 6,
"status": "PENDING",
"locked": False}]}
@pytest.fixture
def upload_article_metadata():
return {
"group_resource_id": None,
"embargo_date": None,
"citation": "Baxter, Thomas (): barricade.gif. figshare.\n \n Retrieved: 19 20, Oct 19, 2016 (GMT)",
"embargo_reason": "",
"references": [],
"id": 4055568,
"custom_fields": [],
"size": 0,
"metadata_reason": "",
"funding": "",
"figshare_url": "https://figshare.com/articles/_/4037952",
"embargo_type": None,
"title": "barricade.gif",
"defined_type": 3,
"is_embargoed": False,
"version": 0,
"resource_doi": None,
"confidential_reason": "",
"files": [{
"status": "available",
"is_link_only": False,
"name": "barricade.gif",
"viewer_type": "",
"preview_state": "preview_not_supported",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "b3e656f8b0828a31f3ed396a1c868786",
"computed_md5": "b3e656f8b0828a31f3ed396a1c868786",
"upload_token": "878068bf-8cdb-40c9-bcf4-5d8065ac2f7d",
"upload_url": "",
"id": 6530715,
"size": 7
}],
"description": "",
"tags": [],
"created_date": "2016-10-18T12:55:44Z",
"is_active": True,
"authors": [{
"url_name": "_",
"is_active": True,
"id": 2665435,
"full_name": "Thomas Baxter",
"orcid_id": ""
}],
"is_public": False,
"categories": [],
"modified_date": "2016-10-18T12:56:27Z",
"is_confidential": False,
"doi": "",
"license": {
"url": "https://creativecommons.org/licenses/by/4.0/",
"name": "CC-BY",
"value": 1
},
"has_linked_file": False,
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4037952",
"resource_title": None,
"status": "draft",
"published_date": None,
"is_metadata_record": False
}
@pytest.fixture
def upload_folder_article_metadata():
return {
"group_resource_id": None,
"embargo_date": None,
"citation": "Baxter, Thomas (): barricade.gif. figshare.\n \n Retrieved: 19 20, Oct 19, 2016 (GMT)",
"embargo_reason": "",
"references": [],
"id": 4040019,
"custom_fields": [],
"size": 0,
"metadata_reason": "",
"funding": "",
"figshare_url": "https://figshare.com/articles/_/4040019",
"embargo_type": None,
"title": "barricade.gif",
"defined_type": 4,
"is_embargoed": False,
"version": 0,
"resource_doi": None,
"confidential_reason": "",
"files": [{
"status": "available",
"is_link_only": False,
"name": "barricade.gif",
"viewer_type": "",
"preview_state": "preview_not_supported",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "b3e656f8b0828a31f3ed396a1c868786",
"computed_md5": "b3e656f8b0828a31f3ed396a1c868786",
"upload_token": "878068bf-8cdb-40c9-bcf4-5d8065ac2f7d",
"upload_url": "",
"id": 6530715,
"size": 7
}],
"description": "",
"tags": [],
"created_date": "2016-10-18T12:55:44Z",
"is_active": True,
"authors": [{
"url_name": "_",
"is_active": True,
"id": 2665435,
"full_name": "Thomas Baxter",
"orcid_id": ""
}],
"is_public": False,
"categories": [],
"modified_date": "2016-10-18T12:56:27Z",
"is_confidential": False,
"doi": "",
"license": {
"url": "https://creativecommons.org/licenses/by/4.0/",
"name": "CC-BY",
"value": 1
},
"has_linked_file": False,
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4040019",
"resource_title": None,
"status": "draft",
"published_date": None,
"is_metadata_record": False
}
class TestPolymorphism:
# These should not be passing but are
async def test_project_provider(self, project_settings, project_provider):
assert isinstance(project_provider, provider.FigshareProjectProvider)
assert project_provider.project_id == project_settings['container_id']
async def test_article_provider(self, article_settings, article_provider):
assert isinstance(article_provider, provider.FigshareArticleProvider)
assert article_provider.article_id == article_settings['container_id']
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_contents(self, project_provider, list_project_articles,
file_article_metadata, folder_article_metadata):
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_metadata_url = project_provider.build_url(False, *root_parts,'articles',
str(list_project_articles[0]['id']))
folder_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
str(list_project_articles[1]['id']))
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_article_metadata)
aiohttpretty.register_json_uri('GET', folder_metadata_url, body=folder_article_metadata)
path = await project_provider.validate_path('/')
result = await project_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=file_metadata_url)
assert aiohttpretty.has_call(method='GET', uri=folder_metadata_url)
assert result == [
metadata.FigshareFileMetadata(file_article_metadata, file_article_metadata['files'][0]),
metadata.FigshareFolderMetadata(folder_article_metadata)
]
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_file_article_contents(self, project_provider, list_project_articles,
file_article_metadata, file_metadata):
root_parts = project_provider.root_path_parts
article_id = str(file_article_metadata['id'])
article_name = file_article_metadata['title']
file_id = str(file_metadata['id'])
file_name = file_metadata['name']
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_article_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id)
file_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id, 'files', file_id)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_article_metadata_url, body=file_article_metadata)
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_metadata)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_id))
result = await project_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=file_article_metadata_url)
assert aiohttpretty.has_call(method='GET', uri=file_metadata_url)
expected = metadata.FigshareFileMetadata(file_article_metadata, file_metadata)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert str(result.article_id) == article_id
assert result.article_name == article_name
assert result.size == file_metadata['size']
assert result.is_public == (PRIVATE_IDENTIFIER not in file_article_metadata['url'])
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_article_contents(self, project_provider, list_project_articles,
folder_article_metadata, folder_file_metadata):
root_parts = project_provider.root_path_parts
article_id = str(folder_article_metadata['id'])
article_name = folder_article_metadata['title']
file_id = str(folder_file_metadata['id'])
file_name = folder_file_metadata['name']
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
folder_article_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id)
file_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id, 'files', file_id)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', folder_article_metadata_url,
body=folder_article_metadata)
aiohttpretty.register_json_uri('GET', file_metadata_url, body=folder_file_metadata)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_id))
result = await project_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=folder_article_metadata_url)
assert aiohttpretty.has_call(method='GET', uri=file_metadata_url)
expected = metadata.FigshareFileMetadata(folder_article_metadata, folder_file_metadata)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert str(result.article_id) == article_id
assert result.article_name == article_name
assert result.size == folder_file_metadata['size']
assert result.is_public == (PRIVATE_IDENTIFIER not in folder_article_metadata['url'])
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_file_contents(self, article_provider, folder_article_metadata,
folder_file_metadata):
root_parts = article_provider.root_path_parts
article_id = str(folder_article_metadata['id'])
article_name = folder_article_metadata['title']
file_id = str(folder_file_metadata['id'])
file_name = folder_file_metadata['name']
folder_article_metadata_url = article_provider.build_url(False, *root_parts)
file_metadata_url = article_provider.build_url(False, *root_parts, 'files', file_id)
print("%%%%%%% HERH?: {}".format(file_metadata_url))
aiohttpretty.register_json_uri('GET', folder_article_metadata_url,
body=folder_article_metadata)
aiohttpretty.register_json_uri('GET', file_metadata_url, body=folder_file_metadata)
path = await article_provider.validate_path('/{}'.format(file_id))
result = await article_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=folder_article_metadata_url)
assert aiohttpretty.has_call(method='GET', uri=file_metadata_url)
expected = metadata.FigshareFileMetadata(folder_article_metadata, folder_file_metadata)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert result.article_name == article_name
assert result.size == folder_file_metadata['size']
assert result.is_public == (PRIVATE_IDENTIFIER not in folder_article_metadata['url'])
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_upload(self, project_provider, list_project_articles,
create_article_metadata, create_file_metadata,
get_file_metadata, get_upload_metadata, file_stream,
upload_article_metadata):
file_name = 'barricade.gif'
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
validate_article_url = project_provider.build_url(False, *root_parts, 'articles', file_name)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_uri('GET', validate_article_url, status=404)
path = await project_provider.validate_path('/' + file_name)
article_id = str(upload_article_metadata['id'])
create_article_url = project_provider.build_url(False, *root_parts, 'articles')
create_file_url = project_provider.build_url(False, 'articles', article_id, 'files')
file_url = project_provider.build_url(False, 'articles', article_id, 'files',
str(get_file_metadata['id']))
get_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
upload_url = get_file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_article_url, body=create_article_metadata, status=201)
aiohttpretty.register_json_uri('POST', create_file_url, body=create_file_metadata, status=201)
aiohttpretty.register_json_uri('GET', file_url, body=get_file_metadata)
aiohttpretty.register_json_uri('GET', upload_url, body=get_upload_metadata)
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url, body=upload_article_metadata)
result, created = await project_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
upload_article_metadata,
upload_article_metadata['files'][0],
)
assert aiohttpretty.has_call(
method='POST',
uri=create_article_url,
data=json.dumps({
'title': 'barricade.gif',
})
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_upload(self, file_stream, project_provider, list_project_articles,
folder_article_metadata, get_file_metadata,
create_file_metadata, get_upload_metadata,
upload_folder_article_metadata):
file_name = 'barricade.gif'
article_id = str(list_project_articles[1]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
validate_folder_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
validate_file_url = project_provider.build_url(False, *root_parts, 'articles', article_id,
'files', file_name)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', validate_folder_url, body=folder_article_metadata)
aiohttpretty.register_uri('GET', validate_file_url, status=404)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_name))
create_file_url = project_provider.build_url(False, 'articles', article_id, 'files')
file_url = project_provider.build_url(False, 'articles', article_id, 'files',
str(get_file_metadata['id']))
get_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
upload_url = get_file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_file_url, body=create_file_metadata,
status=201)
aiohttpretty.register_json_uri('GET', file_url, body=get_file_metadata)
aiohttpretty.register_json_uri('GET', upload_url, body=get_upload_metadata)
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url, body=upload_folder_article_metadata)
result, created = await project_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
upload_folder_article_metadata,
upload_folder_article_metadata['files'][0],
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_upload(self, file_stream, article_provider, folder_article_metadata,
get_file_metadata, create_file_metadata, get_upload_metadata,
upload_folder_article_metadata):
file_name = 'barricade.gif'
file_id = str(get_file_metadata['id'])
root_parts = article_provider.root_path_parts
validate_file_url = article_provider.build_url(False, *root_parts, 'files', file_name)
aiohttpretty.register_uri('GET', validate_file_url, status=404)
path = await article_provider.validate_path('/' + file_name)
create_file_url = article_provider.build_url(False, *root_parts, 'files')
file_url = article_provider.build_url(False, *root_parts, 'files', file_id)
get_article_url = article_provider.build_url(False, *root_parts)
upload_url = get_file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_file_url, body=create_file_metadata, status=201)
aiohttpretty.register_json_uri('GET', file_url, body=get_file_metadata)
aiohttpretty.register_json_uri('GET', get_file_metadata['upload_url'], body=get_upload_metadata)
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url, body=upload_folder_article_metadata)
result, created = await article_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
upload_folder_article_metadata,
upload_folder_article_metadata['files'][0],
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_article_download(self, project_provider, file_article_metadata,
list_project_articles, file_metadata):
article_id = str(list_project_articles[0]['id'])
file_id = str(file_article_metadata['files'][0]['id'])
body = b'castle on a cloud'
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_metadata_url = project_provider.build_url(False, *root_parts, 'articles', article_id,
'files', file_id)
article_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id)
download_url = file_metadata['download_url']
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_metadata)
aiohttpretty.register_json_uri('GET', article_metadata_url, body=file_article_metadata)
aiohttpretty.register_uri('GET', download_url, params={'token': project_provider.token},
body=body, auto_length=True)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_id))
result = await project_provider.download(path)
content = await result.read()
assert content == body
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_download(self, article_provider, file_article_metadata, file_metadata):
body = b'castle on a cloud'
file_id = str(file_metadata['id'])
root_parts = article_provider.root_path_parts
file_metadata_url = article_provider.build_url(False, *root_parts, 'files', file_id)
article_metadata_url = article_provider.build_url(False, *root_parts)
download_url = file_metadata['download_url']
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_metadata)
aiohttpretty.register_json_uri('GET', article_metadata_url, body=file_article_metadata)
aiohttpretty.register_uri('GET', download_url, params={'token': article_provider.token},
body=body, auto_length=True)
path = await article_provider.validate_path('/{}'.format(file_id))
result = await article_provider.download(path)
content = await result.read()
assert content == body
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_file_delete(self, project_provider, list_project_articles,
file_article_metadata, file_metadata):
file_id = str(file_metadata['id'])
article_id = str(list_project_articles[0]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_url = project_provider.build_url(False, *root_parts, 'articles', article_id, 'files',
file_id)
file_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_url, body=file_metadata)
aiohttpretty.register_json_uri('GET', file_article_url, body=file_article_metadata)
aiohttpretty.register_uri('DELETE', file_article_url, status=204)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_id))
result = await project_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=file_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_delete(self, project_provider, list_project_articles,
folder_article_metadata):
article_id = str(list_project_articles[1]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
folder_article_url = project_provider.build_url(False, *root_parts,'articles', article_id)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', folder_article_url, body=folder_article_metadata)
aiohttpretty.register_uri('DELETE', folder_article_url, status=204)
path = await project_provider.validate_path('/{}'.format(article_id))
result = await project_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=folder_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_file_delete(self, article_provider, file_metadata):
file_id = str(file_metadata['id'])
file_url = article_provider.build_url(False, *article_provider.root_path_parts, 'files',
file_id)
aiohttpretty.register_json_uri('GET', file_url, body=file_metadata)
aiohttpretty.register_uri('DELETE', file_url, status=204)
path = await article_provider.validate_path('/{}'.format(file_id))
result = await article_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=file_url)
class TestRevalidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_revalidate_path(self, project_provider, list_project_articles,
file_article_metadata, folder_article_metadata):
file_article_id = str(list_project_articles[0]['id'])
folder_article_id = str(list_project_articles[1]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_article_url = project_provider.build_url(False, *root_parts, 'articles',
file_article_id)
folder_article_url = project_provider.build_url(False, *root_parts, 'articles',
folder_article_id)
print("%%%%%% list_articles_url: {}".format(list_articles_url))
print("%%%%%% file_article_url: {}".format(file_article_url))
print("%%%%%% folder_article_url: {}".format(folder_article_url))
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_article_url, body=file_article_metadata)
aiohttpretty.register_json_uri('GET', folder_article_url, body=folder_article_metadata)
path = await project_provider.validate_path('/')
result = await project_provider.revalidate_path(path, '{}'.format('file'), folder=False)
assert result.is_dir is False
assert result.name == 'file'
assert result.identifier == str(file_article_metadata['files'][0]['id'])
|
|
#!/usr/bin/env python
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
##############################################################################
# create_module_appearance.py helps VisTrails package developers customize
# the appearance of modules in their packages.
from PyQt4 import QtGui, QtCore, Qt
from module_appearance import Ui_MainWindow
import sys
default_pen = QtGui.QPen()
default_pen.setWidth(2)
default_pen.setColor(QtCore.Qt.black)
selected_pen = QtGui.QPen()
selected_pen.setWidth(3)
selected_pen.setColor(QtCore.Qt.yellow)
##############################################################################
class ModuleFringeJoint(QtGui.QGraphicsEllipseItem):
brush = QtGui.QBrush(QtGui.QColor(192, 192, 192))
def __init__(self, leftLine, rightLine):
pt = rightLine.line().p1()
sz = 5
QtGui.QGraphicsEllipseItem.__init__(self, -sz/2, -sz/2, sz, sz)
self.setPos(pt)
self.leftLine = leftLine
self.rightLine = rightLine
self.setAcceptHoverEvents(True)
self.setPen(default_pen)
self.setBrush(self.brush)
self.setZValue(1.0)
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True)
self.setFlag(QtGui.QGraphicsItem.ItemIsFocusable, True)
def removeSelf(self):
scene = self.scene()
scene.removeItem(self)
scene.removeItem(self.rightLine)
self.leftLine.rightJoint = self.rightLine.rightJoint
self.leftLine.moveRightPoint(self.rightLine.line().p2())
if self.rightLine.rightJoint is not None:
self.rightLine.rightJoint.leftLine = self.leftLine
def mouseMoveEvent(self, event):
QtGui.QGraphicsItem.mouseMoveEvent(self, event)
self.leftLine.moveRightPoint(self.scenePos())
self.rightLine.moveLeftPoint(self.scenePos())
app.window.update_text_view()
def keyPressEvent(self, event):
if event.matches(QtGui.QKeySequence.Delete):
self.removeSelf()
def hoverEnterEvent(self, event):
self.setPen(selected_pen)
self.setFocus(QtCore.Qt.OtherFocusReason)
def hoverLeaveEvent(self, event):
self.setPen(default_pen)
self.clearFocus()
@staticmethod
def toScene(pt, side):
if side == 'left':
raise Exception("unimplemented")
elif side == 'right':
return QtCore.QPointF(pt[0] * 80.0 + 100.0, pt[1] * 80.0 - 40.0)
else:
raise Exception("side must be either 'right' or 'left'")
def toVisTrails(self, side):
px, py = self.scenePos().x(), self.scenePos().y()
if side == 'left':
return ((100.0+px)/80.0, (40.0-py)/80.0)
elif side == 'right':
return ((px-100.0)/80.0, (40.0-py)/80.0)
else:
raise Exception("side must be either 'right' or 'left'")
class ModuleFringeLine(QtGui.QGraphicsLineItem):
def __init__(self, leftJoint, rightJoint, *args, **kwargs):
QtGui.QGraphicsLineItem.__init__(self, *args, **kwargs)
self.setAcceptHoverEvents(True)
self.setPen(default_pen)
self.setAcceptedMouseButtons(QtCore.Qt.LeftButton)
self.leftJoint = leftJoint
self.rightJoint = rightJoint
self.setZValue(0.0)
def mouseDoubleClickEvent(self, event):
self.createNewLine(event.pos())
app.window.update_text_view()
def hoverEnterEvent(self, event):
self.setPen(selected_pen)
def hoverLeaveEvent(self, event):
self.setPen(default_pen)
def moveLeftPoint(self, pt):
self.setLine(QtCore.QLineF(pt, self.line().p2()))
def moveRightPoint(self, pt):
self.setLine(QtCore.QLineF(self.line().p1(), pt))
def createNewLine(self, pt):
old_line = self.line()
self.setLine(QtCore.QLineF(old_line.p1(), pt))
new_line = QtCore.QLineF(pt, old_line.p2())
old_joint = self.rightJoint
new_fringe_line = ModuleFringeLine(None, self.rightJoint, new_line)
new_joint = ModuleFringeJoint(self, new_fringe_line)
self.rightJoint = new_joint
new_fringe_line.leftJoint = new_joint
if old_joint is not None:
old_joint.leftLine = new_fringe_line
scene = self.scene()
scene.addItem(new_fringe_line)
scene.addItem(new_joint)
return new_fringe_line
##############################################################################
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.gv = self.ui.graphicsView
self.setup_graphics_view()
self.connect(self.ui.pushButton_Quit,
QtCore.SIGNAL("clicked()"),
QtGui.qApp.quit)
self.connect(self.ui.pushButton_symmetric,
QtCore.SIGNAL("clicked()"),
self.make_symmetric)
self.connect(self.ui.pushButton_mirrored,
QtCore.SIGNAL("clicked()"),
self.make_mirrored)
self.connect(self.ui.pushButton_Clear,
QtCore.SIGNAL("clicked()"),
self.clear_points)
def setup_graphics_view(self):
self.scene = QtGui.QGraphicsScene()
self.gv.setScene(self.scene)
self.gv.setBackgroundBrush(QtCore.Qt.gray)
pen = QtGui.QPen()
pen.setWidth(2)
pen.setColor(QtCore.Qt.black)
self.scene.addLine(-100,-40,100,-40,pen)
self.scene.addLine(-100, 40,100, 40,pen)
# l1 is the left line
self.l1 = ModuleFringeLine(None, None, -100, 40, -100, -40)
# l2 is the right line
self.l2 = ModuleFringeLine(None, None, 100, -40, 100, 40)
self.scene.addItem(self.l1)
self.scene.addItem(self.l2)
self.update_text_view()
@staticmethod
def get_points(line, side):
lst = []
while not (line.rightJoint is None):
lst.append(line.rightJoint.toVisTrails(side))
line = line.rightJoint.rightLine
return lst
def update_text_view(self):
s = ''
left_list = self.get_points(self.l1, 'left')
right_list = self.get_points(self.l2, 'right')
for (preamble, postamble,
side, pts) in zip(['reg.add_module(your_class,\n moduleLeftFringe=[(0.0, 0.0), ',
' moduleRightFringe=[(0.0, 0.0), '],
['(0.0, 1.0)],\n', '(0.0, 1.0)])\n'],
['left', 'right'],
[left_list, reversed(right_list)]):
f = pts
s += preamble
for p in pts:
s += ('(%.4f, %.4f), ' % p)
s += postamble
self.ui.textEdit.clear()
self.ui.textEdit.append(s)
##########################################################################
# slots
def make_symmetric(self):
while not (self.l2.rightJoint is None):
self.l2.rightJoint.removeSelf()
l1 = self.l1
l2 = self.l2
while not (l1.rightJoint is None):
t = l1.rightJoint.toVisTrails('left')
t = (-t[0], t[1])
p = ModuleFringeJoint.toScene(t, 'right')
l2 = l2.createNewLine(p)
l1 = l1.rightJoint.rightLine
self.update_text_view()
def make_mirrored(self):
while not (self.l2.rightJoint is None):
self.l2.rightJoint.removeSelf()
l1 = self.l1
pl = []
while not (l1.rightJoint is None):
p = l1.rightJoint.scenePos()
pl.append(QtCore.QPointF(-p.x(), p.y()))
l1 = l1.rightJoint.rightLine
l2 = self.l2
for p in reversed(pl):
l2 = l2.createNewLine(p)
self.update_text_view()
def clear_points(self):
while not (self.l2.rightJoint is None):
self.l2.rightJoint.removeSelf()
while not (self.l1.rightJoint is None):
self.l1.rightJoint.removeSelf()
self.update_text_view()
##############################################################################
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.window = MainWindow()
app.window.show()
app.exec_()
|
|
import os
import string
import os.path as op
import sys
import shutil
from collections import namedtuple
try:
from seqcluster import prepare_data as prepare
from seqcluster import templates as template_seqcluster
from seqcluster.seqbuster import _create_counts, _read_miraligner, _tab_output
except ImportError:
pass
from bcbio.utils import file_exists, safe_makedir, move_safe, append_stem
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.pipeline.sample import process_alignment
from bcbio.srna import mirdeep
def run_prepare(*data):
"""
Run seqcluster prepare to merge all samples in one file
"""
out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare")
out_dir = os.path.abspath(safe_makedir(out_dir))
prepare_dir = os.path.join(out_dir, "prepare")
fn = []
for sample in data:
name = sample[0]["rgnames"]['sample']
fn.append("%s\t%s" % (sample[0]['collapse'], name))
args = namedtuple('args', 'debug print_debug minc minl maxl out')
args = args(False, False, 2, 17, 40, out_dir)
ma_out = op.join(out_dir, "seqs.ma")
seq_out = op.join(out_dir, "seqs.fastq")
min_shared = max(int(len(fn) / 10.0), 1)
if not file_exists(ma_out):
seq_l, sample_l = prepare._read_fastq_files(fn, args)
with file_transaction(ma_out) as ma_tx:
with open(ma_tx, 'w') as ma_handle:
with open(seq_out, 'w') as seq_handle:
prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared)
return data
def run_align(*data):
"""
Prepare data to run alignment step, only once for each project
"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "seqcluster", "prepare")
seq_out = op.join(out_dir, "seqs.fastq")
bam_dir = op.join(work_dir, "align")
new_bam_file = op.join(bam_dir, "seqs.bam")
if not file_exists(new_bam_file):
sample = process_alignment(data[0][0], [seq_out, None])
bam_file = dd.get_work_bam(sample[0][0])
shutil.move(bam_file, new_bam_file)
shutil.move(bam_file + ".bai", new_bam_file + ".bai")
shutil.rmtree(op.join(bam_dir, sample[0][0]["rgnames"]['sample']))
return data
def run_cluster(*data):
"""
Run seqcluster cluster to detect smallRNA clusters
"""
sample = data[0][0]
work_dir = dd.get_work_dir(sample)
out_dir = op.join(work_dir, "seqcluster", "cluster")
out_dir = op.abspath(safe_makedir(out_dir))
prepare_dir = op.join(work_dir, "seqcluster", "prepare")
bam_file = op.join(work_dir, "align", "seqs.bam")
cluster_dir = _cluster(bam_file, prepare_dir, out_dir, dd.get_ref_file(sample), dd.get_srna_gtf_file(sample))
sample["report"] = _report(sample, dd.get_ref_file(sample))
sample["seqcluster"] = out_dir
out_mirna = _make_isomir_counts(data, out_dir=op.join(work_dir, "mirbase"))
if out_mirna:
sample = dd.set_mirna_counts(sample, out_mirna[0])
sample = dd.set_isomir_counts(sample, out_mirna[1])
out_novel = _make_isomir_counts(data, "seqbuster_novel", op.join(work_dir, "mirdeep2"), "_novel")
novel_db = mirdeep.run(data)
if out_novel:
sample = dd.set_novel_mirna_counts(sample, out_novel[0])
sample = dd.set_novel_isomir_counts(sample, out_novel[1])
data[0][0] = sample
return data
def _cluster(bam_file, prepare_dir, out_dir, reference, annotation_file=None):
"""
Connect to seqcluster to run cluster with python directly
"""
seqcluster = op.join(os.path.dirname(sys.executable), "seqcluster")
ma_file = op.join(prepare_dir, "seqs.ma")
# cl = ["cluster", "-o", out_dir, "-m", ma_file, "-a", bam_file, "-r", reference]
if annotation_file:
annotation_file = "-g " + annotation_file
else:
annotation_file = ""
if not file_exists(op.join(out_dir, "counts.tsv")):
cmd = ("{seqcluster} cluster -o {out_dir} -m {ma_file} -a {bam_file} -r {reference} {annotation_file}")
do.run(cmd.format(**locals()), "Running seqcluster.")
return out_dir
def _report(data, reference):
"""
Run report of seqcluster to get browser options for results
"""
seqcluster = op.join(os.path.dirname(sys.executable), "seqcluster")
work_dir = dd.get_work_dir(data)
out_dir = safe_makedir(os.path.join(work_dir, "seqcluster", "report"))
out_file = op.join(out_dir, "seqcluster.db")
json = op.join(work_dir, "seqcluster", "cluster", "seqcluster.json")
cmd = ("{seqcluster} report -o {out_dir} -r {reference} -j {json}")
if not file_exists(out_file):
do.run(cmd.format(**locals()), "Run report on clusters")
return out_file
def report(data):
"""Create a Rmd report for small RNAseq analysis"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "report")
safe_makedir(out_dir)
summary_file = op.join(out_dir, "summary.csv")
with file_transaction(summary_file) as out_tx:
with open(out_tx, 'w') as out_handle:
print >>out_handle, "sample_id,size_stats,miraligner,group"
for sample in data:
info = sample[0]
group = _guess_group(info)
files = info["seqbuster"] if "seqbuster" in info else "None"
print >>out_handle, ",".join([dd.get_sample_name(info),
info["size_stats"],
files, group])
_create_rmd(summary_file)
return summary_file
def _guess_group(info):
"""Add the first group to get report with some factor"""
value = "fake"
if "metadata" in info:
if info["metadata"]:
key, value = info['metadata'].popitem()
return value
def _create_rmd(summary_fn):
"""Create relatie path files for Rmd report"""
root_path, fn = op.split(os.path.abspath(summary_fn))
out_file = op.join(root_path, fn.replace(".csv", "_re.csv"))
with open(summary_fn) as in_handle:
with open(out_file, 'w') as out_handle:
for line in in_handle:
cols = line.strip().split(",")
fix_line = ",".join([op.relpath(c, root_path) if op.exists(c) else c for c in cols])
print >>out_handle, fix_line
report_file = _modify_report(root_path, out_file)
return out_file, report_file
def _modify_report(summary_path, summary_fn):
"""Read Rmd template and dump with project path."""
summary_path = op.abspath(summary_path)
template = op.normpath(op.join(op.dirname(op.realpath(template_seqcluster.__file__)), "report.rmd"))
content = open(template).read()
out_content = string.Template(content).safe_substitute({'path_abs': summary_path,
'path_summary': os.path.join(summary_path, summary_fn)})
out_file = op.join(op.dirname(summary_fn), "srna_report.rmd")
with open(out_file, 'w') as out_handle:
print >>out_handle, out_content
return out_file
def _make_isomir_counts(data, srna_type="seqbuster", out_dir=None, stem=""):
"""
Parse miraligner files to create count matrix.
"""
work_dir = dd.get_work_dir(data[0][0])
if not out_dir:
out_dir = op.join(work_dir, "mirbase")
out_novel_isomir = append_stem(op.join(out_dir, "counts.tsv"), stem)
out_novel_mirna = append_stem(op.join(out_dir, "counts_mirna.tsv"), stem)
logger.debug("Create %s count data at %s." % (srna_type, out_dir))
if file_exists(out_novel_mirna):
return [out_novel_mirna, out_novel_isomir]
out_dts = []
for sample in data:
if sample[0].get(srna_type):
miraligner_fn = sample[0][srna_type]
reads = _read_miraligner(miraligner_fn)
if reads:
out_file, dt, dt_pre = _tab_output(reads, miraligner_fn + ".back", dd.get_sample_name(sample[0]))
out_dts.append(dt)
else:
logger.debug("WARNING::%s has NOT miRNA annotated for %s. Check if fasta files is small or species value." % (dd.get_sample_name(sample[0]), srna_type))
if out_dts:
out_files = _create_counts(out_dts, out_dir)
out_files = [move_safe(out_files[0], out_novel_isomir), move_safe(out_files[1], out_novel_mirna)]
return out_files
else:
logger.debug("WARNING::any samples have miRNA annotated for %s. Check if fasta files is small or species value." % srna_type)
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# System imports
import os
import sys
import logging
from mock.mock import patch
# Local imports
from stacks.utils.RMFTestCase import *
import resource_management.libraries.functions.file_system
COMMON_SERVICES_ALERTS_DIR = "HDFS/2.1.0.2.0/package/alerts"
DATA_DIR_MOUNT_HIST_FILE_PATH = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
file_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(file_path)))))
file_path = os.path.join(file_path, "main", "resources", "common-services", COMMON_SERVICES_ALERTS_DIR)
RESULT_STATE_OK = "OK"
RESULT_STATE_WARNING = "WARNING"
RESULT_STATE_CRITICAL = "CRITICAL"
RESULT_STATE_UNKNOWN = "UNKNOWN"
class TestAlertDataNodeUnmountedDataDir(RMFTestCase):
def setUp(self):
"""
Import the class under test.
Because the class is present in a different folder, append its dir to the system path.
Also, shorten the import name and make it a global so the test functions can access it.
:return:
"""
self.logger = logging.getLogger()
sys.path.append(file_path)
global alert
import alert_datanode_unmounted_data_dir as alert
def test_missing_configs(self):
"""
Check that the status is UNKNOWN when configs are missing.
"""
configs = {}
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_UNKNOWN)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue('is a required parameter for the script' in messages[0])
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": ""
}
[status, messages] = alert.execute(configurations=configs)
self.assertNotEqual(status, RESULT_STATE_UNKNOWN)
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_mount_history_file_does_not_exist(self, is_dir_mock, exists_mock, get_mount_mock):
"""
Test that the status is WARNING when the data dirs are mounted on root, but the mount history file
does not exist.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data"
}
# Mock calls
exists_mock.return_value = False
is_dir_mock.return_value = True
get_mount_mock.return_value = "/"
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_WARNING)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("{0} was not found".format(DATA_DIR_MOUNT_HIST_FILE_PATH) in messages[0])
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_all_dirs_on_root(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock):
"""
Test that the status is OK when all drives are mounted on the root partition
and this coincides with the expected values.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data"
}
# Mock calls
exists_mock.return_value = True
is_dir_mock.return_value = True
get_mount_mock.return_value = "/"
get_data_dir_to_mount_from_file_mock.return_value = {"/grid/0/data": "/",
"/grid/1/data": "/",
"/grid/2/data": "/"}
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_OK)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("The following data dir(s) are valid" in messages[0])
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_match_expected(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock):
"""
Test that the status is OK when the mount points match the expected values.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data"
}
# Mock calls
exists_mock.return_value = True
is_dir_mock.return_value = True
get_mount_mock.side_effect = ["/device1", "/device2", "/"]
get_data_dir_to_mount_from_file_mock.return_value = {"/grid/0/data": "/device1",
"/grid/1/data": "/device2",
"/grid/2/data": "/"}
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_OK)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("The following data dir(s) are valid" in messages[0])
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_critical_one_root_one_mounted(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock):
"""
Test that the status is CRITICAL when the history file is missing
and at least one data dir is on a mount and at least one data dir is on the root partition.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data"
}
# Mock calls
exists_mock.return_value = False
is_dir_mock.return_value = True
# The first 2 data dirs will report an error.
get_mount_mock.side_effect = ["/", "/", "/device1", "/device2"]
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_CRITICAL)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("Detected at least one data dir on a mount point, but these are writing to the root partition:\n/grid/0/data\n/grid/1/data" in messages[0])
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_critical_unmounted(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock):
"""
Test that the status is CRITICAL when the history file exists and one of the dirs
became unmounted.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data"
}
# Mock calls
exists_mock.return_value = True
is_dir_mock.return_value = True
get_mount_mock.side_effect = ["/", "/", "/device3", "/device4"]
get_data_dir_to_mount_from_file_mock.return_value = {"/grid/0/data": "/", # remained on /
"/grid/1/data": "/device2", # became unmounted
"/grid/2/data": "/", # became mounted
"/grid/3/data": "/device4"} # remained mounted
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_CRITICAL)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("Detected data dir(s) that became unmounted and are now writing to the root partition:\n/grid/1/data" in messages[0])
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_file_uri_and_meta_tags(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock):
"""
Test that the status is OK when the locations include file:// schemes and meta tags.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}":"[SSD]file:///grid/0/data"
}
# Mock calls
exists_mock.return_value = True
is_dir_mock.return_value = True
get_mount_mock.return_value = "/"
get_data_dir_to_mount_from_file_mock.return_value = {"/grid/0/data":"/"}
[status, messages] = alert.execute(configurations = configs)
self.assertEqual(status, RESULT_STATE_OK)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertEqual("The following data dir(s) are valid:\n/grid/0/data", messages[0])
|
|
import GPy
import numpy as np
import pytest
from scipy.optimize import check_grad
import emukit.multi_fidelity.models
from emukit.multi_fidelity.models.non_linear_multi_fidelity_model import make_non_linear_kernels
class TestNonLinearModel:
@pytest.fixture()
def x_init(self):
x_init = np.zeros((15, 3))
for i in range(3):
x_init[i * 5 : (i + 1) * 5, :2] = np.random.randn(5, 2)
x_init[i * 5 : (i + 1) * 5, 2] = i
return x_init
@pytest.fixture()
def y_init(self):
y_init = np.zeros((15, 1))
for i in range(3):
y_init[i * 5 : (i + 1) * 5, :] = np.random.randn(5, 1)
return y_init
@pytest.fixture()
def non_linear_model(self, x_init, y_init):
"""
Creates a NonLinearModel instance to use in tests
"""
np.random.seed(123)
base_kernel = GPy.kern.RBF
kernel = make_non_linear_kernels(base_kernel, len(x_init), x_init.shape[1] - 1)
model = emukit.multi_fidelity.models.NonLinearMultiFidelityModel(x_init, y_init, 3, kernel, n_samples=3)
return model
def test_invalid_kernel(self, x_init, y_init):
"""
Check sensible error is thrown if we pass in a kernel instance rather than class definition
"""
base_kernel = GPy.kern.RBF(1)
with pytest.raises(TypeError):
emukit.multi_fidelity.models.NonLinearMultiFidelityModel(x_init, y_init, base_kernel, n_samples=70)
def test_invalid_input(self, x_init, y_init):
"""
Test for sensible error message if we pass arrays rather than lists to constructor
"""
base_kernel = GPy.kern.RBF
X_init = np.random.rand(5, 3)
Y_init = np.random.rand(5, 3)
with pytest.raises(TypeError):
emukit.multi_fidelity.models.NonLinearMultiFidelityModel([X_init], [Y_init], base_kernel, n_samples=70)
with pytest.raises(TypeError):
emukit.multi_fidelity.models.NonLinearMultiFidelityModel([X_init], Y_init, base_kernel, n_samples=70)
def test_get_fmin(self, non_linear_model):
"""
Tests get_fmin returns the correct value
"""
min_value = non_linear_model.get_f_minimum()
assert min_value == non_linear_model.models[-1].Y.min()
def test_optimize(self, non_linear_model):
"""
Tests the optimization doesn't fail
"""
non_linear_model.optimize()
def test_update(self, non_linear_model):
"""
Tests updating the model works
"""
x = np.zeros((15, 3))
for i in range(3):
x[i * 5 : (i + 1) * 5, :2] = np.random.randn(5, 2)
x[i * 5 : (i + 1) * 5, 2] = i
y = np.zeros((15, 1))
for i in range(3):
y[i * 5 : (i + 1) * 5, :] = np.random.randn(5, 1)
non_linear_model.set_data(x, y)
assert non_linear_model.models[0].X.shape == (5, 2)
assert non_linear_model.models[1].X.shape == (5, 3)
assert non_linear_model.models[2].X.shape == (5, 3)
assert non_linear_model.models[0].Y.shape == (5, 1)
assert non_linear_model.models[1].Y.shape == (5, 1)
assert non_linear_model.models[2].Y.shape == (5, 1)
def test_X(self, non_linear_model):
assert isinstance(non_linear_model.X, np.ndarray)
assert non_linear_model.X.ndim == 2
assert non_linear_model.X.shape == (15, 3)
def test_Y(self, non_linear_model):
assert isinstance(non_linear_model.Y, np.ndarray)
assert non_linear_model.Y.ndim == 2
assert non_linear_model.Y.shape == (15, 1)
def test_non_linear_model_with_3_fidelities(self, non_linear_model):
"""
Test the model prediction doesn't fail and shapes are correct
"""
x_test = np.random.rand(2, 3)
x_test[:, -1] = 2
dmean_dx, dvar_dx = non_linear_model.get_prediction_gradients(x_test)
assert dmean_dx.shape == (2, 2)
assert dvar_dx.shape == (2, 2)
def test_non_linear_model_prediction(self, non_linear_model):
"""
Test the model prediction doesn't fail and shapes are correct
"""
X = np.random.rand(2, 3)
X[:, -1] = 2
mean, var = non_linear_model.predict(X)
assert mean.shape == (2, 1)
assert var.shape == (2, 1)
def test_non_linear_model_prediction_with_grads(self, non_linear_model):
"""
Test the model prediction doesn't fail and shapes are correct
"""
x_test = np.random.rand(2, 3)
x_test[:, -1] = 2
dmean_dx, dvar_dx = non_linear_model.get_prediction_gradients(x_test)
assert dmean_dx.shape == (2, 2)
assert dvar_dx.shape == (2, 2)
@pytest.mark.parametrize(
"fidelity_idx,func_idx,grad_idx",
[
pytest.param(2, 0, 1, id="mean_gradient_highest_fidelity"),
pytest.param(2, 2, 3, id="var_gradient_highest_fidelity"),
pytest.param(1, 0, 1, id="mean_gradient_middle_fidelity"),
pytest.param(1, 2, 3, id="var_gradient_middle_fidelity"),
pytest.param(0, 0, 1, id="mean_gradient_lowest_fidelity"),
pytest.param(0, 2, 3, id="var_gradient_lowest_fidelity"),
],
)
def test_non_linear_sample_fidelities_gradient(self, non_linear_model, fidelity_idx, func_idx, grad_idx):
np.random.seed(1234)
x0 = np.random.rand(2)
func = lambda x: np.sum(
non_linear_model._predict_samples_with_gradients(x[None, :], fidelity_idx)[func_idx], axis=0
)
grad = lambda x: np.sum(
non_linear_model._predict_samples_with_gradients(x[None, :], fidelity_idx)[grad_idx], axis=0
)
assert check_grad(func, grad, x0) < 1e-6
def test_non_linear_model_mean_gradient(self, non_linear_model):
"""
Check the gradient of the mean prediction is correct
"""
np.random.seed(1234)
x0 = np.random.rand(2)
# wrap function so fidelity index doesn't change
def wrap_func(x):
x_full = np.concatenate([x[None, :], [[2]]], axis=1)
mean, variance = non_linear_model.predict(x_full)
return mean[0]
def wrap_gradients(x):
x_full = np.concatenate([x[None, :], [[2]]], axis=1)
return non_linear_model.get_prediction_gradients(x_full)[0]
assert np.all(check_grad(wrap_func, wrap_gradients, x0) < 1e-6)
def test_non_linear_model_variance_gradient(self, non_linear_model):
"""
Check the gradient of the predictive variance is correct
"""
np.random.seed(1234)
x0 = np.random.rand(2)
# wrap function so fidelity index doesn't change
def wrap_func(x):
x_full = np.concatenate([x[None, :], [[2]]], axis=1)
mean, variance = non_linear_model.predict(x_full)
return variance[0]
def wrap_gradients(x):
x_full = np.concatenate([x[None, :], [[2]]], axis=1)
return non_linear_model.get_prediction_gradients(x_full)[1]
assert np.all(check_grad(wrap_func, wrap_gradients, x0) < 1e-6)
def test_non_linear_kernel_ard():
"""
Test that the kernels that act on the input space have the correct number of lengthscales when ARD is true
"""
kernels = make_non_linear_kernels(GPy.kern.RBF, 2, 2, ARD=True)
assert len(kernels[0].lengthscale) == 2
assert len(kernels[1].bias_kernel_fidelity2.lengthscale) == 2
assert len(kernels[1].mul.scale_kernel_fidelity2.lengthscale) == 2
|
|
import unittest
from covermi.gr import Gr, Entry, Chrom, Iterate, bisect_left
class TestEntry(unittest.TestCase):
def test_constructor(self):
self.assertEqual(Entry("1", 10, 20), Entry("chr1", 10, 20))
self.assertEqual(Entry("23", 10, 20), Entry("23", 10, 20))
self.assertEqual(Entry("23", 10, 20), Entry(23, 10, 20))
self.assertEqual(Entry(23, 10, 20), Entry("chrX", 10, 20))
self.assertEqual(Entry(23, 10, 20), Entry("X", 10, 20))
self.assertEqual(Entry(23, 10, 20), Entry("X", 10, 20, "?"))
self.assertEqual(Entry(23, 10, 20), Entry("X", 10, 20, "?", "+"))
self.assertEqual(Entry(23, 10, 20).name, ".")
self.assertEqual(Entry(23, 10, 20).strand, ".")
with self.assertRaises(KeyError):
Entry(0, 10, 20)
Entry(26, 10, 20)
Entry("?", 10, 20)
def test_repr(self):
self.assertEqual(eval(repr(Entry(23, 10, 20))), Entry(23, 10, 20,))
self.assertEqual(repr(Entry(23, 10, 20, "?", "+")), 'Entry("chrX", 10, 20, "?", "+")')
def test_location(self):
self.assertEqual(Entry(23, 10, 20).location, "chrX:10-20")
def test_eq_hash(self):
self.assertNotEqual(Entry(23, 10, 20), set([Entry(23, 10, 21)]))
self.assertNotEqual(Entry(23, 10, 20), set([Entry(23, 9, 2)]))
self.assertNotEqual(Entry(23, 10, 20), set([Entry(21, 10, 21)]))
self.assertIn(Entry(23, 10, 20, "?", "+"), set([Entry(23, 10, 20)]))
self.assertNotIn(Entry(23, 10, 20, "?", "+"), set([Entry(23, 11, 20, "?", "+")]))
class TestIterate(unittest.TestCase):
def test_bisect_left(self):
a = Entry(1, 5, 9)
b = Entry(1, 10, 14)
c = Entry(1, 20, 24)
d = Entry(1, 30, 34)
e = Entry(1, 40, 44)
array = Gr([e, d, c, b, a]).data[1]
self.assertEqual(bisect_left(array, 30, 0, len(array)), 3)
array = Gr([e, d, c, b, a, Entry(1, 100, 130)]).data[1]
self.assertEqual(bisect_left(array, 30, 0, len(array)), 0)
array = Gr([e, d, c, b, a, Entry(1, 100, 120)]).data[1]
self.assertEqual(bisect_left(array, 30, 0, len(array)), 1)
array = Gr([e, d, c, b, a, Entry(1, 100, 200)]).data[1]
self.assertEqual(bisect_left(array, 30, 0, len(array)), 0)
array = Gr([e, d, c, b, a, Entry(1, 100, 120)]).data[1]
self.assertEqual(bisect_left(array, 400, 0, len(array)), len(array))
def test_iterate(self):
a = Entry(1, 5, 9)
b = Entry(1, 10, 14)
c = Entry(1, 20, 24)
d = Entry(1, 30, 34)
e = Entry(1, 40, 44)
array = Gr([e, d, c, b, a]).data[1]
self.assertEqual(list(Iterate(array).yield_overlapping(1, 4)), [])
self.assertEqual(list(Iterate(array).yield_overlapping(1, 5)), [a])
self.assertEqual(list(Iterate(array).yield_overlapping(24, 30)), [c, d])
self.assertEqual(list(Iterate(array).yield_overlapping(24, 40)), [c, d, e])
self.assertEqual(list(Iterate(array).yield_overlapping(24, 1000)), [c, d, e])
array = Gr([e, d, c, b, a, Entry(1, 1000, 2000)]).data[1]
self.assertEqual(list(Iterate(array).yield_overlapping(29, 35)), [d])
x = Entry(1, 9, 30)
array = Gr([e, d, c, b, a, x]).data[1]
self.assertEqual(list(Iterate(array).yield_overlapping(30, 30)), [x, d])
class TestGr(unittest.TestCase):
def test_repr(self):
gr = Gr([Entry(23, 10, 20), Entry(1, 56, 88, "?", "+")])
self.assertEqual(eval(repr(gr)), gr)
self.assertEqual(repr(gr), 'Gr([Entry("1", 56, 88, "?", "+"), Entry(Chrom(23), 10, 20, ".", ".")])')
def test_initialisation(self):
a = Entry(1, 5, 9, "c")
b = Entry(1, 10, 20, "d")
c = Entry(1, 10, 21, "c")
d = Entry(1, 30, 40, "a")
e = Entry(2, 10, 20, "b")
gr = Gr([e, d, c, b, a])
self.assertEqual(gr, Gr([a, b, c, d, e]))
self.assertEqual(list(gr), [a, b, c, d, e])
def test_properties(self):
a = Entry(1, 5, 9, "c")
b = Entry(1, 10, 20, "d")
c = Entry(1, 10, 21, "c")
d = Entry(1, 30, 40, "a")
e = Entry(2, 10, 20, "b")
gr = Gr([e, d, c, b, a])
self.assertEqual(gr.number_of_components, 5)
self.assertEqual(gr.bases, 50)
self.assertEqual(gr.is_empty, False)
self.assertEqual(Gr().is_empty, True)
self.assertEqual(gr.names, ["a", "b", "c", "d"])
self.assertEqual(gr.names_as_string, "a, b, c, d") # Need to test exons
self.assertEqual(gr.locations_as_string, "chr1:5-9, chr1:10-20, chr1:10-21, chr1:30-40, chr2:10-20")
self.assertEqual(list(gr.sorted_by_name), [d, e, a, c, b])
def test_hash_eq(self): # test with variants
a = Entry(1, 5, 9, "c")
b = Entry(1, 10, 20, "d")
c = Entry(1, 10, 21, "c")
d = Entry(1, 30, 40, "a")
e = Entry(2, 10, 20, "b")
gr = Gr([e, d, c, b, a])
self.assertEqual(gr, Gr([a, b, c, d, e]))
self.assertNotEqual(gr, Gr([b, c, d, e]))
self.assertNotEqual(gr, Gr([a, a, b, c, d, e]))
self.assertIn(gr, set([Gr([a, b, c, d, e])]))
self.assertNotIn(gr, set([Gr([b, c, d, e])]))
self.assertNotIn(gr, set([Gr([a, a, b, c, d, e])]))
def merged(self):
a = Entry(1, 5, 9, "c")
b = Entry(1, 10, 20, "d")
c = Entry(1, 11, 15, "c")
d = Entry(1, 17, 40, "a")
e = Entry(1, 42, 60, "b")
f = Entry(2, 42, 60, "e")
self.assertEqual(Gr([a, a, a]).merged, Gr([a]))
self.assertEqual(Gr([a, b, c, d, e, f]).merged, Gr([Entry(1, 5, 40), e, f]))
self.assertEqual(Gr([b, e]).merged, Gr([b, e]))
self.assertEqual(Gr([b, c]).merged, Gr([b]))
def test_inverted(self):
a = Entry(1, 5, 9, "c")
b = Entry(1, 10, 20, "d")
c = Entry(1, 10, 21, "c")
d = Entry(1, 30, 40, "a")
e = Entry(2, 10, 20, "b")
gr = Gr([a])
self.assertEqual(gr.inverted.inverted, gr)
self.assertEqual(gr.inverted.number_of_components, 26)
self.assertEqual(len(set(entry.chrom for entry in gr.inverted)), 25)
gr = Gr([e, d, c, b, a])
self.assertEqual(gr.inverted.inverted, gr.merged)
self.assertEqual(gr.inverted.number_of_components, 28)
def test_overlapped_by(self):
a = Entry(1, 5, 9)
b = Entry(1, 10, 20)
c = Entry(1, 11, 15)
d = Entry(1, 17, 40)
e = Entry(1, 42, 60)
f = Entry(2, 42, 60)
gr = Gr([a, b, c, d, e, f])
gr2 = Gr([a, b, c, d, e, f]*2)
x = Entry(1, 12, 13)
self.assertEqual(gr.overlapped_by(Gr([Entry(1,2,3)])), Gr())
self.assertEqual(gr.overlapped_by(gr), gr)
self.assertEqual(gr.overlapped_by(gr2), gr)
self.assertEqual(gr2.overlapped_by(gr), gr2)
self.assertEqual(gr2.overlapped_by(gr2), gr2)
self.assertEqual(gr.overlapped_by(Gr([x])), Gr([x]*2))
self.assertEqual(gr.overlapped_by(Gr([Entry(1, 9, 10)])), Gr([Entry(1, 9, 9), Entry(1, 10, 10)]))
self.assertEqual(Gr([Entry(1, 2, 200)]).overlapped_by(gr), Gr([Entry(1, 5, 40), e]))
def test_touched_by(self):
a = Entry(1, 5, 9)
b = Entry(1, 10, 20)
c = Entry(1, 11, 15)
d = Entry(1, 17, 40)
e = Entry(1, 42, 60)
f = Entry(2, 42, 60)
gr = Gr([a, b, c, d, e, f])
self.assertEqual(gr.touched_by(Gr([Entry(1, 3, 4)])), Gr())
self.assertEqual(gr.touched_by(Gr([Entry(1, 5, 5)])), Gr([a]))
self.assertEqual(gr.touched_by(Gr([Entry(1, 1, 100)])), Gr([a, b, c, d, e]))
self.assertEqual(gr.touched_by(Gr([Entry(1, 16, 17)])), Gr([b, d]))
self.assertEqual(Gr([b]+[c]*1000+[d]).touched_by(Gr([Entry(1, 19, 19)])), Gr([b, d]))
self.assertEqual(gr.touched_by(Gr([Entry(1, 3, 4)]*2)), Gr())
self.assertEqual(gr.touched_by(Gr([Entry(1, 5, 5)]*2)), Gr([a]))
self.assertEqual(gr.touched_by(Gr([Entry(1, 1, 100)]*2)), Gr([a, b, c, d, e]))
self.assertEqual(gr.touched_by(Gr([Entry(1, 16, 17)]*2)), Gr([b, d]))
self.assertEqual(Gr([b]+[c]*1000+[d]).touched_by(Gr([Entry(1, 19, 19)]*2)), Gr([b, d]))
def test_not_touched_by(self):
a = Entry(1, 5, 9)
b = Entry(1, 10, 20)
c = Entry(1, 11, 15)
d = Entry(1, 17, 40)
e = Entry(1, 42, 60)
f = Entry(2, 42, 60)
gr = Gr([a, b, c, d, e, f])
self.assertEqual(gr.not_touched_by(Gr([Entry(1, 3, 4)])), gr)
self.assertEqual(gr.not_touched_by(Gr([Entry(1, 5, 5)])), Gr([b, c, d, e, f]))
self.assertEqual(gr.not_touched_by(Gr([Entry(1, 1, 100)])), Gr([f]))
self.assertEqual(gr.not_touched_by(Gr([Entry(1, 16, 17)])), Gr([a, c, e, f]))
self.assertEqual(Gr([b]+[c]*1000+[d]).not_touched_by(Gr([Entry(1, 19, 19)])), Gr([c]*1000))
self.assertEqual(gr.not_touched_by(Gr([Entry(1, 3, 4)]*2)), gr)
self.assertEqual(gr.not_touched_by(Gr([Entry(1, 5, 5)]*2)), Gr([b, c, d, e, f]))
self.assertEqual(gr.not_touched_by(Gr([Entry(1, 1, 100)]*2)), Gr([f]))
self.assertEqual(gr.not_touched_by(Gr([Entry(1, 16, 17)]*2)), Gr([a, c, e, f]))
self.assertEqual(Gr([b]+[c]*1000+[d]).not_touched_by(Gr([Entry(1, 19, 19)]*2)), Gr([c]*1000))
def test_subranges_covered_by(self):
a = Entry(1, 5, 9)
b = Entry(1, 10, 20)
c = Entry(1, 11, 15)
d = Entry(1, 17, 40)
e = Entry(1, 42, 60)
f = Entry(2, 42, 60)
x = Entry(1, 16, 30)
gr = Gr([a, x])
self.assertEqual(gr.subranges_covered_by(Gr([b])), Gr())
self.assertEqual(gr.subranges_covered_by(Gr([b, c, d])), Gr([x]))
self.assertEqual(gr.subranges_covered_by(Gr([b]*2)), Gr())
self.assertEqual(gr.subranges_covered_by(Gr([b, c, d]*2)), Gr([x]))
def test_combined_with(self):
a = Entry(1, 5, 9)
b = Entry(1, 10, 20)
c = Entry(1, 11, 15)
d = Entry(1, 17, 40)
e = Entry(1, 42, 60)
gr = Gr([a, b, c, d, e])
self.assertEqual(gr.combined_with(gr), Gr([a, b, c, d, e]*2))
self.assertEqual(Gr([a]).combined_with(Gr([b])), Gr([a, b]))
def test_subset(self): # need to test exons
a = Entry(1, 5, 9, "x")
b = Entry(1, 10, 20, "b")
c = Entry(1, 10, 21, "x")
d = Entry(1, 30, 40, "d")
e = Entry(2, 10, 20, "x")
gr = Gr([a, b, c, d, e])
self.assertEqual(gr.subset(["x"]), Gr([a, c, e]))
if __name__ == '__main__':
unittest.main()
|
|
""" Functions and Classes used to fit an estimate of an unabsorbed
continuum to a QSO spectrum.
"""
# p2.6+ compatibility
from __future__ import division, print_function, unicode_literals
try:
unicode
except NameError:
unicode = basestring = str
import numpy as np
import matplotlib.pyplot as pl
import matplotlib.transforms as mtran
from .stats import Gaussian
from .utilities import between, stats, indexnear
from .convolve import convolve_psf
from .io import loadobj, saveobj
from .interp import AkimaSpline
from .sed import qso_template
import os
def spline_continuum(wa, fl, er, edges, minfrac=0.01, nsig=3.0,
resid_std=1.3, debug=False):
""" Fit a continuum to a chunk of a spectrum.
Very loosely based on the method in Aguirre et al. 2002.
Parameters
----------
wa : Wavelengths.
fl : Fluxes.
er : One sigma errors.
edges : Wavelengths giving the chunk edges.
minfrac = 0.01 : At least this fraction of pixels in a single chunk
contributes to the fit.
nsig = 3.0 : No. of sigma for rejection for clipping.
resid_std = 1.3 : Maximum residual st. dev. in a given chunk.
debug = False : If True, make helpful plots.
Returns
-------
Continuum array and spline points
"""
# Overview:
# (1) Calculate the median flux value for each wavelength chunk.
# (2) fit a 1st order spline (i.e. series of straight line
# segments) through the set of points given by the central
# wavelength for each chunk and the median flux value for each
# chunk.
# (3) Remove any flux values that fall more than nsig*er below
# the spline.
# Repeat 1-3 until the continuum converges on a solution (if it
# doesn't throw hands up in despair! Essential to choose a
# suitable first guess with small enough chunks).
if len(edges) < 2:
raise ValueError('must be at least two bin edges!')
wa,fl,er = (np.asarray(a, np.float64) for a in (wa,fl,er))
if debug:
ax = pl.gca()
ax.cla()
ax.plot(wa,fl)
ax.plot(wa,er)
ax.axhline(0, color='0.7')
good = ~np.isnan(fl) & ~np.isnan(er) & ~np.isinf(fl)
ymax = 2*np.percentile(fl[good], 0.90)
ax.set_ylim(-0.1*ymax, ymax)
ax.set_xlim(min(edges), max(edges))
ax.set_autoscale_on(0)
pl.draw()
npts = len(wa)
mask = np.ones(npts, bool)
oldco = np.zeros(npts, float)
co = np.zeros(npts, float)
# find indices of chunk edges and central wavelengths of chunks
indices = wa.searchsorted(edges)
indices = [(i0,i1) for i0,i1 in zip(indices[:-1],indices[1:])]
if debug: print(' indices', indices)
wavc = [0.5*(w1 + w2) for w1,w2 in zip(edges[:-1],edges[1:])]
# information per chunks
npts = len(indices)
mfl = np.zeros(npts, float) # median fluxes at chunk centres
goodfit = np.zeros(npts, bool) # is fit acceptable?
res_std = np.zeros(npts, float) # residuals standard dev
res_med = np.zeros(npts, float) # residuals median
if debug:
print('chunk centres', wavc)
cont, = ax.plot(wa,co,'k')
midpoints, = ax.plot(wavc, mfl,'rx',mew=1.5,ms=8)
# loop that iterative fits continuum
while True:
for i,(j1,j2) in enumerate(indices):
if goodfit[i]: continue
# calculate median flux
#print(i,j1,j2)
w,f,e,m = (item[j1:j2] for item in (wa,fl,er,mask))
ercond = (e > 0) & (~np.isnan(f))
cond = m & ercond
chfl = f[cond]
chflgood = f[ercond]
if len(chflgood) == 0: continue
#print(len(chfl), len(chflgood))
if float(len(chfl)) / len(chflgood) < minfrac:
f_cutoff = np.percentile(chflgood, minfrac)
cond = ercond & (f >= f_cutoff)
if len(f[cond]) == 0: continue
mfl[i] = np.median(f[cond])
# calculate the spline. add extra points on either end to give
# a nice slope at the end points.
extwavc = ([wavc[0] - (wavc[1] - wavc[0])] + list(wavc) +
[wavc[-1] + (wavc[-1] - wavc[-2])])
extmfl = ([mfl[0] - (mfl[1] - mfl[0])] + list(mfl) +
[mfl[-1] + (mfl[-1] - mfl[-2])])
co = np.interp(wa, extwavc, extmfl)
if debug:
cont.set_ydata(co)
midpoints.set_xdata(wavc)
midpoints.set_ydata(mfl)
pl.draw()
# calculate residuals for each chunk
for i,(j1,j2) in enumerate(indices):
if goodfit[i]: continue
ercond = er[j1:j2] > 0
cond = ercond & mask[j1:j2]
chfl = fl[j1:j2][cond]
chflgood = fl[j1:j2][ercond]
if len(chflgood) == 0: continue
if float(len(chfl)) / len(chflgood) < minfrac:
f_cutoff = np.percentile(chflgood, minfrac)
cond = ercond & (fl[j1:j2] > f_cutoff)
#print(len(co), len(fl), i1, j1, j2)
residuals = (fl[j1:j2][cond] - co[j1:j2][cond]
) / er[j1:j2][cond]
res_std[i] = residuals.std()
if len(residuals) == 0:
continue
res_med[i] = np.median(residuals)
# If residuals have std < 1.0 and mean ~1.0, we might have
# a reasonable fit.
if res_std[i] <= resid_std:
goodfit[i] = True
if debug:
print('median and st. dev. of residuals by region - aiming for 0,1')
for i,(f0,f1) in enumerate(zip(res_med, res_std)):
print('{0} {0:.2f} {0:.2f}'.format(i,f0,f1))
raw_input('Enter...')
# (3) Remove flux values that fall more than N*sigma below the
# spline fit.
cond = (co - fl) > nsig * er
if debug:
print(np.nanmax(np.abs(co - oldco)/co))
# Finish when the biggest change between the new and old
# medians is smaller than the number below.
if np.nanmax(np.abs(co - oldco)/co) < 4e-3:
break
oldco = co.copy()
mask[cond] = False
# finally fit a cubic spline through the median values to
# get a smooth continuum.
final = AkimaSpline(wavc, mfl)
return final(wa), list(zip(wavc,mfl))
def fitqsocont(wa, fl, er, redshift, oldco=None, knots=None,
nbin=1, divmult=1, forest_divmult=1, atmos=True, debug=False):
""" Find an estimate of a QSO continuum.
divmult=3 works well for R~40000, S/N~10, z=3 QSO spectrum.
nbin bins the data for plotting and continuum fitting (obsolete)
"""
# choose initial reference continuum points. Increase divmult for
# fewer initial continuum points (generally needed for poorer S/N
# spectra).
zp1 = 1 + redshift
#reflines = np.array([1025.72, 1215.6701, 1240.14, 1398.0,
# 1549.06, 1908, 2800 ])
# generate the edges of wavelength chunks to send to fitting routine
# these edges and divisions are generated by trial and error
# for S/N = 15ish and resolution = 2000ish
div = np.rec.fromrecords([(500. , 800. , 25),
(800. , 1190., 25),
(1190., 1213., 4),
(1213., 1230., 6),
(1230., 1263., 6),
(1263., 1290., 5),
(1290., 1340., 5),
(1340., 1370., 2),
(1370., 1410., 5),
(1410., 1515., 5),
(1515., 1600., 15),
(1600., 1800., 8),
(1800., 1900., 5),
(1900., 1940., 5),
(1940., 2240., 15),
(2240., 3000., 25),
(3000., 6000., 80),
(6000., 20000., 100),
], names=str('left,right,num'))
div.num[2:] = np.ceil(div.num[2:] * divmult)
div.num[:2] = np.ceil(div.num[:2] * forest_divmult)
div.left *= zp1
div.right *= zp1
if debug: print(div.tolist())
temp = [np.linspace(left, right, n+1)[:-1] for left,right,n in div]
edges = np.concatenate(temp)
if debug: stats(edges)
i0,i1,i2 = edges.searchsorted([wa[0], 1210*zp1, wa[-1]])
if debug: print(i0,i1,i2)
contpoints = []
if knots is not None:
contpoints.extend(knots)
else:
co,cp = spline_continuum(wa, fl, er, edges[i0:i2], debug=debug)
contpoints.extend(cp)
fig = pl.figure(figsize=(11, 7))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.1, top=0.95)
wrapper = InteractiveCoFit(wa, fl, er, contpoints, co=oldco, nbin=nbin,
redshift=redshift, fig=fig, atmos=atmos)
while True:
if wrapper.finished: break
pl.waitforbuttonpress()
return wrapper.continuum, wrapper.contpoints
class InteractiveCoFit(object):
help_message = """
'a' : add a new continuum point
'd' : delete the nearest point
'b' : add a break in the continuum
'r' : remove a break in the continuum
's' : smooth the spectrum
'k' : keep continuum
'q' : quit without keeping continuum
"""
def __init__(self, wa, fl, er, contpoints, co=None,
nbin=8, redshift=None, atmos=None, fig=None):
""" Initialise figure, plots and variables.
Parameters
----------
wa : Wavelengths
fl : Fluxes
er : One sigma errors
nbin : int (8)
Number of pixels to bin arrays in wavelength. Default 8.
contpoints : list of x,y tuple pairs (None)
The points through which a cubic spline is passed,
defining the continuum.
redshift : float (None)
Redshift used to plot reference emission lines.
atmos : list of wavelength pairs (None)
Regions of atmospheric absorption to plot.
Notes
-----
Updates the following attributes:
self.spec : Dictionary of wa, fl, er.
self.contpoints : Points used to define the continuum.
self.nbin : The input nbin value.
self.markers : Dictionary of matplotlib plotting artists.
self.connections : Callback connections.
self.fig : The plotting figure instance.
"""
#setup
#print co
self.WMIN_LYA = 1040
self.WMAX_LYA = 1190
self.spec = dict(wa=wa, fl=fl, er=er, co=co)
self.nbin = nbin
self.breaks = [wa[0], wa[-1]] # wavelengths of breaks in the continuum
self.contpoints = list(contpoints)
if os.path.lexists('./_knots.sav'):
c = raw_input('temporary knots file exists, use these knots? (y) ')
if c.lower() != 'n':
self.contpoints = loadobj('./_knots.sav')
self.markers = dict()
self.art_fl = None
if fig is None:
self.fig = pl.figure()
else:
self.fig = fig
# disable any existing key press callbacks
cids = list(fig.canvas.callbacks.callbacks['key_press_event'])
for cid in cids:
fig.canvas.callbacks.disconnect(cid)
self.template = None
if redshift is not None:
self.template = qso_template(wa, redshift)
self.connections = []
self.continuum = None
self.finished = False
self.redshift = redshift
self.atmos = atmos
self.smoothby = None
self.plotinit()
self.update()
self.modifypoints()
pl.draw()
def plotinit(self):
""" Set up the figure and do initial plots.
Updates the following attributes:
self.markers
"""
wa,fl,er = [self.spec[k][0:-1:self.nbin] for k in 'wa fl er'.split()]
if self.spec['co'] is not None:
co = self.spec['co'][0:-1:self.nbin]
# axis for spectrum & continuum
a0 = self.fig.add_axes((0.05,0.1,0.9,0.6))
a0.set_autoscale_on(0)
# axis for residuals
a1 = self.fig.add_axes((0.05,0.75,0.9,0.2),sharex=a0)
a1.set_autoscale_on(0)
a1.axhline(0,color='k',alpha=0.7, zorder=99)
a1.axhline(1,color='k',alpha=0.7, zorder=99)
a1.axhline(-1,color='k',alpha=0.7, zorder=99)
a1.axhline(2,color='k',linestyle='dashed',zorder=99)
a1.axhline(-2,color='k',linestyle='dashed',zorder=99)
m0, = a1.plot([0],[0],'.r',marker='.', mec='none', lw=0, mew=0, ms=6, alpha=0.5)
a1.set_ylim(-4, 4)
a0.axhline(0, color='0.7')
if self.spec['co'] is not None:
a0.plot(wa,co, color='0.7', lw=1, ls='dashed')
self.art_fl, = a0.plot(wa, fl, 'b', lw=0.5, linestyle='steps-mid')
a0.plot(wa, er, lw=0.5, color='orange')
m1, = a0.plot([0], [0], 'r', alpha=0.7)
m2, = a0.plot([0], [0], 'o', mfc='None',mew=1, ms=8, mec='r', picker=5,
alpha=0.7)
a0.set_xlim(min(wa), max(wa))
good = (er > 0) & ~np.isnan(fl) & ~np.isinf(fl)
ymax = 2 * np.abs(np.percentile(fl[good], 95))
a0.set_ylim(-0.1*ymax, ymax)
a0.text(0.9,0.9, 'z=%.2f' % self.redshift, transform=a0.transAxes)
# for histogram
trans = mtran.blended_transform_factory(a1.transAxes, a1.transData)
hist, = a1.plot([], [], color='k', transform=trans)
x = np.linspace(-3,3)
a1.plot(Gaussian(x,0,1,0.05), x, color='k', transform=trans, lw=0.5)
if self.template is not None:
trans = mtran.blended_transform_factory(a0.transData, a0.transAxes)
a0.plot(self.spec['wa'], self.template/self.template.max(), '-c', lw=2,
alpha=0.5, transform=trans)
self.fig.canvas.draw()
self.markers.update(contpoints=m2, cont=m1, resid=m0, hist_left=hist)
def update(self):
""" Calculates the new continuum, residuals and updates the plots.
Updates the following attributes:
self.markers
self.continuum
"""
wa,fl,er = (self.spec[key] for key in 'wa fl er'.split())
co = np.empty(len(wa))
co.fill(np.nan)
for b0,b1 in zip(self.breaks[:-1], self.breaks[1:]):
cpts = [(x,y) for x,y in self.contpoints if b0 <= x <= b1]
if len(cpts) < 3:
continue
spline = AkimaSpline(*list(zip(*cpts)))
i,j = wa.searchsorted([b0,b1])
co[i:j] = spline(wa[i:j])
resid = (fl - co) / er
# histogram
bins = np.arange(0, 5+0.1, 0.2)
w0,w1 = self.fig.axes[1].get_xlim()
x,_ = np.histogram(resid[between(wa, w0, w1)],
bins=bins)
b = np.repeat(bins, 2)
X = np.concatenate([[0], np.repeat(x,2), [0]])
Xmax = X.max()
X = 0.05 * X / Xmax
self.markers['hist_left'].set_data(X, b)
self.markers['contpoints'].set_data(list(zip(*self.contpoints)))
nbin = self.nbin
self.markers['cont'].set_data(wa[::nbin], co[::nbin])
self.markers['resid'].set_data(wa[::nbin], resid[::nbin])
if self.smoothby is not None:
sfl = convolve_psf(fl, self.smoothby)
self.art_fl.set_data(wa, sfl)
else:
self.art_fl.set_data(wa, fl)
self.continuum = co
saveobj('_knots.sav', self.contpoints, overwrite=True)
self.fig.canvas.draw()
def on_keypress(self, event):
""" Interactive fiddling via the keyboard
Updates:
self.contpoints
"""
if event.key == 'q':
for item in self.connections:
self.fig.canvas.mpl_disconnect(item)
self.contpoints = None
self.continuum = None
self.finished = True
return
if event.key == 'k':
for item in self.connections:
self.fig.canvas.mpl_disconnect(item)
self.finished = True
return
if event.inaxes != self.fig.axes[0]: return
if event.key == 'a':
# add a point to contpoints
x,y = event.xdata,event.ydata
if x not in zip(*self.contpoints)[0]:
self.contpoints.append((x,y))
self.update()
elif event.key == 'd':
# remove a point from contpoints
contx,conty = zip(*self.contpoints)
sep = np.hypot(event.xdata - contx, event.ydata - conty)
self.contpoints.remove(self.contpoints[sep.argmin()])
self.update()
elif event.key == 'm':
# Move a point
contx,conty = zip(*self.contpoints)
sep = np.hypot(event.xdata - contx, event.ydata - conty)
#import pdb
#pdb.set_trace()
self.contpoints[sep.argmin()] = (event.xdata,event.ydata)
self.update()
elif event.key == 'b':
# Add a break to the continuum.
self.breaks.append(event.xdata)
self.breaks.sort()
self.update()
elif event.key == 'r':
# remove a break
i = indexnear(self.breaks, event.xdata)
if i not in (0, len(self.breaks)-1):
self.breaks.remove(self.breaks[i])
self.update()
elif event.key == 'S':
# Save fit to a temporary file
print 'fitcont: Writing output to temporary file tmp.sav'
saveobj('tmp.sav', (self.continuum, self.contpoints), overwrite=1)
elif event.key == 's':
c = raw_input('New FWHM in pixels of Gaussian to convolve with? '
'(blank for no smoothing) ')
if c == '':
# restore spectrum
self.smoothby = None
self.update()
else:
try:
fwhm = float(c)
except TypeError:
print('FWHM must be a floating point number >= 1')
if fwhm < 1:
self.smoothby = None
else:
self.smoothby = fwhm
self.update()
elif event.key == '?':
print(self.help_message)
def on_button_release(self, event):
self.update()
def modifypoints(self):
""" Add/remove continuum points."""
print(self.help_message)
id1 = self.fig.canvas.mpl_connect('key_press_event',self.on_keypress)
id2 = self.fig.canvas.mpl_connect('button_release_event',self.on_button_release)
self.connections.extend([id1, id2])
|
|
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import time
import pyperclip
# EDIT ME!
currentMonthTotalDays = 31
year = 2018
currentMonthIndex = datetime.date.today().month
currentDayOfMonthIndex = datetime.date.today().day
currentDayOfYearIndex = time.localtime().tm_yday
# TODO: testing...
# currentMonthTotalDays = 31
# currentMonthIndex = 12
# currentDayOfMonthIndex = 31
# currentDayOfYearIndex = 366
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnLastDayOfMonth():
answer = ""
answer += "These participants have checked in at least once in CURRENT_MONTH_NAME:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list**:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateForJan1():
# first day of the challenge, and late signup grace period
print "using templateForJan1"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is the very first day of the year-long Stay Clean YEAR challenge. ~~We will no longer be accepting new signups.~~ Good news! We will be be accepting late signups for the next 14 days. If you forgot to sign up for the YEAR challenge, and you've been clean for all of January, just leave a \"sign me up\" comment below, and I'll add you. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateForJan2to13():
# late signup grace period
print "using templateForJan2to13"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. This is the CURRENT_DAY_OF_MONTH_NAME day of our 14 day late-signup grace period. If you forgot to sign up for the YEAR challenge, and you've been clean for all of January, just leave a \"sign me up\" comment below, and I'll add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateForJan14():
# last day of late signup grace period
print "using templateForJan14"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. This is the **last day** of our 14 day late-signup grace period. If you forgot to sign up for the YEAR challenge, and you've been clean for all of January, just leave a \"sign me up\" comment below, and I'll add you. After today, further signup requests will be silently ignored.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateForJan15():
# first day AFTER the late signup grace period
print "using templateForJan15"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Our 14 day late-signup grace period is now over. If you forgot to sign up, it's too late to sign up for Stay Clean YEAR, but feel free to leave comments here anyway, and join us over on the monthly challenge thread.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateForJan16to25():
# first day AFTER the late signup grace period
print "using templateForJan16to25"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateForJan26to30():
print "using templateForJan26to30"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(currentMonthTotalDays - currentDayOfMonthIndex) + " days to make a checkin comment (if you haven't already done so in CURRENT_MONTH_NAME) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateForJan31():
print "using templateForJan31"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already done so in CURRENT_MONTH_NAME) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnLastDayOfMonth()
print "============================================================="
return answer
def templateForUltimateMinus5toPenultimateDayOfMonth():
print "using templateForUltimateMinus5toPenultimateDayOfMonth"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(currentMonthTotalDays - currentDayOfMonthIndex) + " days to make a checkin comment (if you haven't already done so in CURRENT_MONTH_NAME) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateForUltimateDayOfMonth():
print "using templateForUltimateDayOfMonth"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already done so in CURRENT_MONTH_NAME) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipantsOnLastDayOfMonth()
print "============================================================="
return answer
def templateForUltimateMinus5toPenultimateDayOfYear():
print "using templateForUltimateMinus5toPenultimateDayOfYear"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(currentMonthTotalDays - currentDayOfMonthIndex) + " days to make a checkin comment (if you haven't already done so in CURRENT_MONTH_NAME) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateForUltimateDayOfYear():
print "using templateForUltimateDayOfYear"
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the very last day of the Stay Clean YEAR challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors.\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += templateForParticipantsOnLastDayOfMonth()
return answer
def templateForNormalDay():
print "using templateForNormalDay"
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed for not checking in at least once per month. However, if you let me know you're still with it I'll re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateToUse():
# if currentDayOfMonthIndex == 1:
# return templateFor1()
# elif currentDayOfMonthIndex == 2:
# return templateFor2()
# elif currentDayOfMonthIndex == 3:
# return templateFor3()
# elif currentDayOfMonthIndex == 4:
# return templateFor4()
# elif 5 <= currentDayOfMonthIndex <= 9:
# return templateFor5to9()
# elif 10 <= currentDayOfMonthIndex <= 14:
# return templateFor10to14()
# if currentDayOfMonthIndex == 15:
# return templateFor15()
# elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
# return templateFor16toPenultimate()
# else:
# return templateForUltimate()
if currentDayOfYearIndex == 1:
return templateForJan1()
elif 2 <= currentDayOfYearIndex <= 13:
return templateForJan2to13()
elif currentDayOfYearIndex == 14:
return templateForJan14()
elif currentDayOfYearIndex == 15:
return templateForJan15()
elif 16 <= currentDayOfYearIndex <= 25:
return templateForJan16to25()
elif 26 <= currentDayOfYearIndex <= 30:
return templateForJan26to30()
elif currentDayOfYearIndex == 31:
return templateForJan31()
elif currentMonthName == "December" and (26 <= currentDayOfMonthIndex <= 30):
return templateForUltimateMinus5toPenultimateDayOfYear()
elif currentMonthName == "December" and currentDayOfMonthIndex == 31:
return templateForUltimateDayOfYear()
# elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
elif (currentMonthPenultimateDayIndex - 4) <= currentDayOfMonthIndex <= currentMonthPenultimateDayIndex:
return templateForUltimateMinus5toPenultimateDayOfMonth()
elif currentDayOfMonthIndex == currentMonthTotalDays:
return templateForUltimateDayOfMonth()
else:
return templateForNormalDay()
pass
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_YEAR_INDEX', str(currentDayOfYearIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
answer = re.sub('CUMULATIVE_DAYS_BY_THOSE_STILL_IN', str(currentDayOfYearIndex * numberStillIn), answer)
answer = re.sub('CUMULATIVE_YEARS_BY_THOSE_STILL_IN', str(currentDayOfYearIndex * numberStillIn / 365), answer)
answer = re.sub('YEAR', str(year), answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
|
import logging, base64
from twisted.words.protocols import irc
from twisted.internet import protocol, reactor
from socbot.pluginapi import API
from socbot.userdb import UserDB
# Credits to ibid for some helpful code:
# - Ping ponger
class Connection(irc.IRCClient):
nickname = "SocBot"
_ping_deferred = None
_reconnect_deferred = None
def __init__(self):
self.factory = None
self.log = None
self.shutdown = False
self.api = None
self.channels = []
#===== Timeout control =====
def _idle_ping(self):
self.log.debug("sending idle ping")
if self._ping_deferred and self._ping_deferred.active():
self._ping_deferred.cancel()
self._ping_deferred = None
self._reconnect_deferred = reactor.callLater(
self.factory.pong_timeout, self._timeout_reconnect)
self.sendLine('PING idle-socbot')
def _timeout_reconnect(self):
self.log.info("idle timeout; reconnecting")
self.transport.loseConnection()
def irc_PONG(self, prefix_unused, params):
if params[-1] == 'idle-socbot' and self._reconnect_deferred:
self.log.debug("received idle pong")
if self._reconnect_deferred and self._reconnect_deferred.active():
self._reconnect_deferred.cancel()
self._reconnect_deferred = None
self._ping_deferred = reactor.callLater(
self.factory.ping_interval, self._idle_ping)
def connectionMade(self):
self.log.info("connected to server")
self.api.onCommand("IRC_CONNECTED", "", [])
self.factory.resetDelay()
self.factory.onConnected(self)
irc.IRCClient.connectionMade(self)
self._ping_deferred = reactor.callLater(self.factory.ping_interval, self._idle_ping)
def connectionLost(self, reason):
self.log.info("lost connection: {0}".format(reason))
self.api.onCommand("IRC_DISCONNECTED", "", [])
if self._ping_deferred and self._ping_deferred.active():
self._ping_deferred.cancel()
if self._reconnect_deferred and self._reconnect_deferred.active():
self._reconnect_deferred.cancel()
irc.IRCClient.connectionLost(self, reason)
#===== Message Control =====
def dataReceived(self, data):
irc.IRCClient.dataReceived(self, data)
if self._ping_deferred and self._ping_deferred.active():
self._ping_deferred.reset(self.factory.ping_interval)
def sendLine(self, line):
self.log.debug("sending line `{0}`".format(line))
irc.IRCClient.sendLine(self, str(line))
if self._ping_deferred and self._ping_deferred.active():
self._ping_deferred.reset(self.factory.ping_interval)
def privmsg(self, user, channel, msg):
channel = channel.lower()
if self.api.onPrivmsg(user, channel, msg):
irc.IRCClient.privmsg(self, user, channel, msg)
def handleCommand(self, command, prefix, params):
if self.api.onCommand(command, prefix, params):
irc.IRCClient.handleCommand(self, command, prefix, params)
def msg(self, target, message, length=400):
if not message or not target:
return
irc.IRCClient.msg(self, target, str(message), length)
def notice(self, user, message, length=None):
if not message or not user:
return
fmt = 'NOTICE %s :' % (user,)
if length is None:
length = self._safeMaximumLineLength(fmt)
# Account for the line terminator.
minimumLength = len(fmt) + 2
if length <= minimumLength:
raise ValueError("Maximum length must exceed %d for message "
"to %s" % (minimumLength, user))
for line in irc.split(message, length - minimumLength):
self.sendLine(fmt + line)
#===== Lifetime Control =====
def quit(self, message):
self.shutdown = True
irc.IRCClient.quit(self, message)
self.factory.shutdown()
def restart(self, message="Restarting..."):
self.factory.sharedstate['exitcode'] = 3
self.factory.shutdownAll(message)
def _doJoins(self):
if self.factory.config["channels"]:
for channel, chanconfig in self.factory.config["channels"].iteritems():
channel = channel.lower()
if not chanconfig["autojoin"]:
continue
if chanconfig["password"]:
self.join(channel, chanconfig["password"])
else:
self.join(channel)
#===== Incoming Events =====
def irc_ERR_NOMOTD(self, prefix, params):
self.log.info("no MOTD")
self._doJoins()
def receivedMOTD(self, motd):
self.log.info("received MOTD")
self._doJoins()
def joined(self, channel):
self.log.info("joined " + channel)
channel = channel.lower()
if not channel in self.channels:
self.channels.append(channel)
def left(self, channel):
self.log.info("left " + channel)
channel = channel.lower()
if channel in self.channels:
self.channels.remove(channel)
def kickedFrom(self, channel, kicker, message):
self.log.info("kicked from %s by %s (%s)" % (channel, kicker, message))
channels = self.factory.config["channels"]
if channels:
if channel.lower() in channels:
chandata = channels[channel.lower()]
if chandata['kickedrejoin']:
if chandata["password"]:
self.join(channel, chandata["password"])
else:
self.join(channel)
class BotFactory(protocol.ReconnectingClientFactory):
protocol = Connection
log = logging.getLogger("socbot")
ping_interval = 60.0
pong_timeout = 60.0
def __init__(self, config, sharedstate, main):
self.name = config['name']
self.sharedstate = sharedstate
self.instance = None
self.core = main
self.config = config
self.shuttingdown = False
self.connection = None
self.users = UserDB('conf/users.db')
def clientConnectionLost(self, connector, unused_reason):
self.log.info("connection lost")
self.connection = None
if not self.shuttingdown:
protocol.ReconnectingClientFactory.clientConnectionLost(
self, connector, unused_reason)
self.core.connectionLost(self)
def buildProtocol(self, addr):
self.log.debug("creating new connection")
p = protocol.ReconnectingClientFactory.buildProtocol(self, addr)
p.nickname = self.config['nickname']
p.log = logging.getLogger("socbot.connection")
p.api = API(p, self.users, self.sharedstate['pluginmanager'])
p.api.log = logging.getLogger("socbot.connection")
return p
def onConnected(self, bot):
if self.connection:
self.log.warning("a previous connection exists, removing it")
self.connection.quit()
self.connection = None
self.connection = bot
def shutdownAll(self, msg="Shutdown requested."):
self.core.shutdown(msg)
def shutdown(self):
self.shuttingdown = True
|
|
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2016 Ivor Wanders
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import cherrypy
import os
import logging
import json
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.manager import WebSocketManager
import ws4py
import interface
import message
class FocusStackRoot:
"""
This is the webserver root of CherryPy.
:param stack_interface: The interface object that communicates to the
serial port.
"""
def __init__(self, stack_interface, preset_dir):
self.stack = stack_interface
self.preset_dir = preset_dir
@cherrypy.expose
def ws(self):
# hangle this path with the websocket handler.
handler = cherrypy.request.ws_handler
ws._cp_config = {'tools.staticdir.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_serial_ports(self):
return interface.get_potential_ports()
@cherrypy.expose
@cherrypy.tools.json_out()
def get_preset_list(self):
presets = []
for f in os.listdir(self.preset_dir):
if (f.endswith(".json")):
presets.append(f[:-5])
return presets
@cherrypy.expose
@cherrypy.tools.json_out()
def del_preset(self, name):
preset_path = os.path.join(self.preset_dir, name + ".json")
if (not os.path.abspath(preset_path).startswith(self.preset_dir)):
# if this occurs we try to retrieve something outside the dir.
raise cherrypy.HTTPError(403, "Outside of directory...")
os.unlink(preset_path)
return self.get_preset_list()
@cherrypy.expose
@cherrypy.tools.json_out()
def get_preset_content(self, name):
preset_path = os.path.join(self.preset_dir, name + ".json")
if (not os.path.abspath(preset_path).startswith(self.preset_dir)):
# if this occurs we try to retrieve something outside the dir.
raise cherrypy.HTTPError(403, "Outside of directory...")
try:
with open(preset_path, 'r') as f:
data = json.load(f)
failed = False
except ValueError as e:
print("Failed to load preset.")
print(e)
failed = True
if failed:
raise cherrypy.HTTPError(500, "Failed to load json preset.")
return data
@cherrypy.expose
@cherrypy.tools.json_out()
def set_preset_content(self, name, data):
preset_path = os.path.join(self.preset_dir, name + ".json")
if (not os.path.abspath(preset_path).startswith(self.preset_dir)):
# if this occurs we try to retrieve something outside the dir.
raise cherrypy.HTTPError(403, "Outside of directory...")
try:
with open(preset_path, 'w') as f:
data = json.dump(json.loads(data), f, indent=4, sort_keys=True)
failed = False
except ValueError as e:
print("Failed to save preset.")
print(e)
failed = True
if failed:
raise cherrypy.HTTPError(500, "Failed to save json preset.")
return self.get_preset_list()
class WebsocketHandler(ws4py.websocket.WebSocket):
"""
The object created for each websocket. It mainly passes on the
messages it receives via the websocket to the stack interface.
:param stack_interface: The interface object that communicates to the
serial port.
:type stack_interface: `interface.StackInterface` instance.
:param websocket_manager: The websocket manager which keeps track of
the websockets.
:type websocket_manager: `ws4py.manager.WebSocketManager`.
"""
def __init__(self, stack_interface, websocket_manager, *args, **kwargs):
super(WebsocketHandler, self).__init__(*args, **kwargs)
self.stacker = stack_interface
self.manager = websocket_manager
def received_message(self, msg):
"""
Handles messages received via the websocket, so they are coming
from the browser.
"""
decoded_json = json.loads(str(msg))
msgtype, msgdata = decoded_json
if (msgtype == "serial"):
# got a serial command, we have to pass this on to the stack object
# check if we have a serial connection.
if (not self.stacker.is_serial_connected()):
self.send(json.dumps(json.dumps(["no_serial"])))
return
if (isinstance(msgdata["msg_type"], str)):
msgdata["msg_type"] = message.msg_type_id[msgdata["msg_type"]]
# convert the JSON into a message and put it.
msg = message.Msg()
try:
msg.from_dict(msgdata)
data = msg
self.stacker.put_message(data)
except TypeError as e:
cherrypy.log("Failed sending message: {}".format(e))
if (msgtype == "connect_serial"):
# we have to connet the stack interface to the right serial port.
# if this succeeds we have to broadcast the new serial port to all
# the connected websockets.
res = self.stacker.connect(msgdata["device"])
if (res):
response = ["connect_success",
self.stacker.get_serial_parameters()]
self.manager.broadcast(json.dumps(response))
else:
response = ["connect_fail", msgdata]
self.send(json.dumps(response))
if (msgtype == "get_serial_status"):
if (not self.stacker.is_serial_connected()):
self.send(json.dumps(["no_serial"]))
else:
response = ["connect_success",
self.stacker.get_serial_parameters()]
self.send(json.dumps(response))
def closed(self, code, reason=None):
print("Websocket was closed.")
self.shutdown()
def shutdown(self):
print("Socket shutdown")
@classmethod
def with_parameters(cls, stack_instance, manager):
"""
Factory method to wrap this class for use with the default
websocket arguments. This allows passing the `stack_interface` and
`manager` arguments to every initialization of the websocket
handler.
"""
def factory(*args, **kwargs):
z = cls(stack_instance, manager, *args, **kwargs)
return z
return factory
if __name__ == "__main__":
# Running as main; should start a server.
parser = argparse.ArgumentParser(description="Focus Stacking"
" webinterface.")
parser.add_argument('--port', '-p',
help="The port used to listen.",
type=int,
default=8080)
parser.add_argument('--host', '-l',
help="The interface on which to listen.",
type=str,
default="127.0.0.1")
parser.add_argument('--presetdir', '-d',
help="Folder which holds preset files.",
type=str,
default="./presets/")
# parse the arguments.
args = parser.parse_args()
# set up the interface logger.
interface_logger = logging.getLogger("interface")
interface_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s - %(asctime)s - %(levelname)s'
' - %(message)s')
ch.setFormatter(formatter)
interface_logger.addHandler(ch)
# create the preset folder if it does not exist:
preset_dir = os.path.abspath(args.presetdir)
if (not os.path.isdir(preset_dir)):
print("Preset folder {} did not exist, creating.".format(preset_dir))
os.makedirs(preset_dir)
# Add the websocket requirements.
cherrypy.tools.websocket = WebSocketTool()
a = WebSocketPlugin(cherrypy.engine)
a.manager = WebSocketManager()
a.subscribe()
stack_interface = interface.StackInterface()
stack_interface.daemon = True
stack_interface.start()
server_tree = FocusStackRoot(stack_interface, preset_dir=preset_dir)
# create a broadcast function which relays messages received over the
# serial port to the websockets via the websocketmanager.
def broadcaster():
m = stack_interface.get_message()
if m:
payload = dict(m)
payload["msg_type"] = message.msg_type_name[payload["msg_type"]]
msg = ["serial", payload]
a.manager.broadcast(json.dumps(msg))
# use this very fancy cherrypy monitor to run our broadcaster.
cherrypy.process.plugins.Monitor(cherrypy.engine,
broadcaster,
frequency=0.01).subscribe()
cherrypy.config.update({"server.socket_host": args.host,
"server.socket_port": args.port})
static_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"static")
cherrypy.log("Static folder dir: {}".format(static_folder))
config = {"/": {"tools.staticdir.on": True,
"tools.staticdir.dir": static_folder,
"tools.staticdir.index": "index.html"},
"/ws": {"tools.websocket.on": True,
"tools.websocket.handler_cls":
WebsocketHandler.with_parameters(stack_interface,
a.manager)}
}
cherrypy.quickstart(server_tree, '/', config=config)
|
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import contextlib
import json
import io
import os
import random
import shutil
import signal
import socket
import sys
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
import docker
from docker.errors import APIError, NotFound
from docker.utils import kwargs_from_env
from .base import requires_api_version
from .test import Cleanup
# FIXME: missing tests for
# export; history; insert; port; push; tag; get; load; stats
warnings.simplefilter('error')
compare_version = docker.utils.compare_version
EXEC_DRIVER = []
BUSYBOX = 'busybox:buildroot-2014.02'
def exec_driver_is_native():
global EXEC_DRIVER
if not EXEC_DRIVER:
c = docker_client()
EXEC_DRIVER = c.info()['ExecutionDriver']
c.close()
return EXEC_DRIVER.startswith('native')
def docker_client(**kwargs):
return docker.Client(**docker_client_kwargs(**kwargs))
def docker_client_kwargs(**kwargs):
client_kwargs = kwargs_from_env(assert_hostname=False)
client_kwargs.update(kwargs)
return client_kwargs
def setup_module():
c = docker_client()
try:
c.inspect_image(BUSYBOX)
except NotFound:
c.pull(BUSYBOX)
c.inspect_image(BUSYBOX)
c.close()
class BaseTestCase(unittest.TestCase):
tmp_imgs = []
tmp_containers = []
tmp_folders = []
tmp_volumes = []
def setUp(self):
if six.PY2:
self.assertRegex = self.assertRegexpMatches
self.assertCountEqual = self.assertItemsEqual
self.client = docker_client(timeout=60)
self.tmp_imgs = []
self.tmp_containers = []
self.tmp_folders = []
self.tmp_volumes = []
self.tmp_networks = []
def tearDown(self):
for img in self.tmp_imgs:
try:
self.client.remove_image(img)
except docker.errors.APIError:
pass
for container in self.tmp_containers:
try:
self.client.stop(container, timeout=1)
self.client.remove_container(container)
except docker.errors.APIError:
pass
for network in self.tmp_networks:
try:
self.client.remove_network(network)
except docker.errors.APIError:
pass
for folder in self.tmp_folders:
shutil.rmtree(folder)
for volume in self.tmp_volumes:
try:
self.client.remove_volume(volume)
except docker.errors.APIError:
pass
self.client.close()
def run_container(self, *args, **kwargs):
container = self.client.create_container(*args, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
exitcode = self.client.wait(container)
if exitcode != 0:
output = self.client.logs(container)
raise Exception(
"Container exited with code {}:\n{}"
.format(exitcode, output))
return container
#########################
# INFORMATION TESTS #
#########################
class TestVersion(BaseTestCase):
def runTest(self):
res = self.client.version()
self.assertIn('GoVersion', res)
self.assertIn('Version', res)
self.assertEqual(len(res['Version'].split('.')), 3)
class TestInfo(BaseTestCase):
def runTest(self):
res = self.client.info()
self.assertIn('Containers', res)
self.assertIn('Images', res)
self.assertIn('Debug', res)
class TestSearch(BaseTestCase):
def runTest(self):
self.client = docker_client(timeout=10)
res = self.client.search('busybox')
self.assertTrue(len(res) >= 1)
base_img = [x for x in res if x['name'] == 'busybox']
self.assertEqual(len(base_img), 1)
self.assertIn('description', base_img[0])
###################
# LISTING TESTS #
###################
class TestImages(BaseTestCase):
def runTest(self):
res1 = self.client.images(all=True)
self.assertIn('Id', res1[0])
res10 = res1[0]
self.assertIn('Created', res10)
self.assertIn('RepoTags', res10)
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
self.assertEqual(len(distinct), self.client.info()['Images'])
class TestImageIds(BaseTestCase):
def runTest(self):
res1 = self.client.images(quiet=True)
self.assertEqual(type(res1[0]), six.text_type)
class TestListContainers(BaseTestCase):
def runTest(self):
res0 = self.client.containers(all=True)
size = len(res0)
res1 = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res1)
self.client.start(res1['Id'])
self.tmp_containers.append(res1['Id'])
res2 = self.client.containers(all=True)
self.assertEqual(size + 1, len(res2))
retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])]
self.assertEqual(len(retrieved), 1)
retrieved = retrieved[0]
self.assertIn('Command', retrieved)
self.assertEqual(retrieved['Command'], six.text_type('true'))
self.assertIn('Image', retrieved)
self.assertRegex(retrieved['Image'], r'busybox:.*')
self.assertIn('Status', retrieved)
#####################
# CONTAINER TESTS #
#####################
class TestCreateContainer(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
class TestCreateContainerWithBinds(BaseTestCase):
def setUp(self):
super(TestCreateContainerWithBinds, self).setUp()
self.mount_dest = '/mnt'
# Get a random pathname - we don't need it to exist locally
self.mount_origin = tempfile.mkdtemp()
shutil.rmtree(self.mount_origin)
self.filename = 'shared.txt'
self.run_with_volume(
False,
BUSYBOX,
['touch', os.path.join(self.mount_dest, self.filename)],
)
def run_with_volume(self, ro, *args, **kwargs):
return self.run_container(
*args,
volumes={self.mount_dest: {}},
host_config=self.client.create_host_config(
binds={
self.mount_origin: {
'bind': self.mount_dest,
'ro': ro,
},
},
network_mode='none'
),
**kwargs
)
def test_rw(self):
container = self.run_with_volume(
False,
BUSYBOX,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn(self.filename, logs)
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
def test_ro(self):
container = self.run_with_volume(
True,
BUSYBOX,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn(self.filename, logs)
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, False)
def check_container_data(self, inspect_data, rw):
if docker.utils.compare_version('1.20', self.client._version) < 0:
self.assertIn('Volumes', inspect_data)
self.assertIn(self.mount_dest, inspect_data['Volumes'])
self.assertEqual(
self.mount_origin, inspect_data['Volumes'][self.mount_dest]
)
self.assertIn(self.mount_dest, inspect_data['VolumesRW'])
self.assertFalse(inspect_data['VolumesRW'][self.mount_dest])
else:
self.assertIn('Mounts', inspect_data)
filtered = list(filter(
lambda x: x['Destination'] == self.mount_dest,
inspect_data['Mounts']
))
self.assertEqual(len(filtered), 1)
mount_data = filtered[0]
self.assertEqual(mount_data['Source'], self.mount_origin)
self.assertEqual(mount_data['RW'], rw)
@requires_api_version('1.20')
class CreateContainerWithGroupAddTest(BaseTestCase):
def test_group_id_ints(self):
container = self.client.create_container(
BUSYBOX, 'id -G',
host_config=self.client.create_host_config(group_add=[1000, 1001])
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
self.assertIn('1000', groups)
self.assertIn('1001', groups)
def test_group_id_strings(self):
container = self.client.create_container(
BUSYBOX, 'id -G', host_config=self.client.create_host_config(
group_add=['1000', '1001']
)
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
self.assertIn('1000', groups)
self.assertIn('1001', groups)
class CreateContainerWithLogConfigTest(BaseTestCase):
def test_valid_log_driver_and_log_opt(self):
log_config = docker.utils.LogConfig(
type='json-file',
config={'max-file': '100'}
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], log_config.type)
self.assertEqual(container_log_config['Config'], log_config.config)
def test_invalid_log_driver_raises_exception(self):
log_config = docker.utils.LogConfig(
type='asdf-nope',
config={}
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
expected_msg = "logger: no log driver named 'asdf-nope' is registered"
with pytest.raises(APIError) as excinfo:
# raises an internal server error 500
self.client.start(container)
assert expected_msg in str(excinfo.value)
@pytest.mark.skipif(True,
reason="https://github.com/docker/docker/issues/15633")
def test_valid_no_log_driver_specified(self):
log_config = docker.utils.LogConfig(
type="",
config={'max-file': '100'}
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], "json-file")
self.assertEqual(container_log_config['Config'], log_config.config)
def test_valid_no_config_specified(self):
log_config = docker.utils.LogConfig(
type="json-file",
config=None
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], "json-file")
self.assertEqual(container_log_config['Config'], {})
class TestCreateContainerReadOnlyFs(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
ctnr = self.client.create_container(
BUSYBOX, ['mkdir', '/shrine'],
host_config=self.client.create_host_config(
read_only=True, network_mode='none'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
res = self.client.wait(ctnr)
self.assertNotEqual(res, 0)
class TestCreateContainerWithName(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, 'true', name='foobar')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Name', inspect)
self.assertEqual('/foobar', inspect['Name'])
class TestRenameContainer(BaseTestCase):
def runTest(self):
version = self.client.version()['Version']
name = 'hong_meiling'
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.rename(res, name)
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Name', inspect)
if version == '1.5.0':
self.assertEqual(name, inspect['Name'])
else:
self.assertEqual('/{0}'.format(name), inspect['Name'])
class TestStartContainer(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
class TestStartContainerWithDictInsteadOfId(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res)
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
class TestCreateContainerPrivileged(BaseTestCase):
def runTest(self):
res = self.client.create_container(
BUSYBOX, 'true', host_config=self.client.create_host_config(
privileged=True, network_mode='none'
)
)
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
# Since Nov 2013, the Privileged flag is no longer part of the
# container's config exposed via the API (safety concerns?).
#
if 'Privileged' in inspect['Config']:
self.assertEqual(inspect['Config']['Privileged'], True)
class TestWait(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
inspect = self.client.inspect_container(id)
self.assertIn('Running', inspect['State'])
self.assertEqual(inspect['State']['Running'], False)
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], exitcode)
class TestWaitWithDictInsteadOfId(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
exitcode = self.client.wait(res)
self.assertEqual(exitcode, 0)
inspect = self.client.inspect_container(res)
self.assertIn('Running', inspect['State'])
self.assertEqual(inspect['State']['Running'], False)
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], exitcode)
class TestLogs(BaseTestCase):
def runTest(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
BUSYBOX, 'echo {0}'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestLogsWithTailOption(BaseTestCase):
def runTest(self):
snippet = '''Line1
Line2'''
container = self.client.create_container(
BUSYBOX, 'echo "{0}"'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id, tail=1)
self.assertEqual(logs, ('Line2\n').encode(encoding='ascii'))
# class TestLogsStreaming(BaseTestCase):
# def runTest(self):
# snippet = 'Flowering Nights (Sakuya Iyazoi)'
# container = self.client.create_container(
# BUSYBOX, 'echo {0}'.format(snippet)
# )
# id = container['Id']
# self.client.start(id)
# self.tmp_containers.append(id)
# logs = bytes() if six.PY3 else str()
# for chunk in self.client.logs(id, stream=True):
# logs += chunk
# exitcode = self.client.wait(id)
# self.assertEqual(exitcode, 0)
# self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestLogsWithDictInsteadOfId(BaseTestCase):
def runTest(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
BUSYBOX, 'echo {0}'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(container)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestDiff(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
diff = self.client.diff(id)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
self.assertEqual(len(test_diff), 1)
self.assertIn('Kind', test_diff[0])
self.assertEqual(test_diff[0]['Kind'], 1)
class TestDiffWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
diff = self.client.diff(container)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
self.assertEqual(len(test_diff), 1)
self.assertIn('Kind', test_diff[0])
self.assertEqual(test_diff[0]['Kind'], 1)
class TestStop(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.stop(id, timeout=2)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestStopWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.assertIn('Id', container)
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
self.client.stop(container, timeout=2)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKill(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKillWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(container)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKillWithSignal(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '60'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=signal.SIGKILL)
exitcode = self.client.wait(id)
self.assertNotEqual(exitcode, 0)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False, state)
class TestPort(BaseTestCase):
def runTest(self):
port_bindings = {
'1111': ('127.0.0.1', '4567'),
'2222': ('127.0.0.1', '4568')
}
container = self.client.create_container(
BUSYBOX, ['sleep', '60'], ports=list(port_bindings.keys()),
host_config=self.client.create_host_config(
port_bindings=port_bindings, network_mode='bridge'
)
)
id = container['Id']
self.client.start(container)
# Call the port function on each biding and compare expected vs actual
for port in port_bindings:
actual_bindings = self.client.port(container, port)
port_binding = actual_bindings.pop()
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
self.assertEqual(ip, port_bindings[port][0])
self.assertEqual(host_port, port_bindings[port][1])
self.client.kill(id)
class TestMacAddress(BaseTestCase):
def runTest(self):
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
BUSYBOX, ['sleep', '60'], mac_address=mac_address_expected)
id = container['Id']
self.client.start(container)
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
self.client.kill(id)
class TestContainerTop(BaseTestCase):
def runTest(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'])
id = container['Id']
self.client.start(container)
res = self.client.top(container['Id'])
print(res)
self.assertEqual(
res['Titles'],
['UID', 'PID', 'PPID', 'C', 'STIME', 'TTY', 'TIME', 'CMD']
)
self.assertEqual(len(res['Processes']), 1)
self.assertEqual(res['Processes'][0][7], 'sleep 60')
self.client.kill(id)
class TestContainerTopWithPsArgs(BaseTestCase):
def runTest(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'])
id = container['Id']
self.client.start(container)
res = self.client.top(container['Id'], 'waux')
self.assertEqual(
res['Titles'],
['USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
'TTY', 'STAT', 'START', 'TIME', 'COMMAND'],
)
self.assertEqual(len(res['Processes']), 1)
self.assertEqual(res['Processes'][0][10], 'sleep 60')
self.client.kill(id)
class TestRestart(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
self.assertIn('State', info)
self.assertIn('StartedAt', info['State'])
start_time1 = info['State']['StartedAt']
self.client.restart(id, timeout=2)
info2 = self.client.inspect_container(id)
self.assertIn('State', info2)
self.assertIn('StartedAt', info2['State'])
start_time2 = info2['State']['StartedAt']
self.assertNotEqual(start_time1, start_time2)
self.assertIn('Running', info2['State'])
self.assertEqual(info2['State']['Running'], True)
self.client.kill(id)
class TestRestartWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.assertIn('Id', container)
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
self.assertIn('State', info)
self.assertIn('StartedAt', info['State'])
start_time1 = info['State']['StartedAt']
self.client.restart(container, timeout=2)
info2 = self.client.inspect_container(id)
self.assertIn('State', info2)
self.assertIn('StartedAt', info2['State'])
start_time2 = info2['State']['StartedAt']
self.assertNotEqual(start_time1, start_time2)
self.assertIn('Running', info2['State'])
self.assertEqual(info2['State']['Running'], True)
self.client.kill(id)
class TestRemoveContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
self.client.remove_container(id)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
self.assertEqual(len(res), 0)
class TestRemoveContainerWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
self.client.remove_container(container)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
self.assertEqual(len(res), 0)
class TestCreateContainerWithVolumesFrom(BaseTestCase):
def runTest(self):
vol_names = ['foobar_vol0', 'foobar_vol1']
res0 = self.client.create_container(
BUSYBOX, 'true', name=vol_names[0]
)
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
BUSYBOX, 'true', name=vol_names[1]
)
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
with self.assertRaises(docker.errors.DockerException):
self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True,
volumes_from=vol_names
)
res2 = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True,
host_config=self.client.create_host_config(
volumes_from=vol_names, network_mode='none'
)
)
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
info = self.client.inspect_container(res2['Id'])
self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names)
class TestCreateContainerWithLinks(BaseTestCase):
def runTest(self):
res0 = self.client.create_container(
BUSYBOX, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
BUSYBOX, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# we don't want the first /
link_path1 = self.client.inspect_container(container1_id)['Name'][1:]
link_alias1 = 'mylink1'
link_env_prefix1 = link_alias1.upper()
link_path2 = self.client.inspect_container(container2_id)['Name'][1:]
link_alias2 = 'mylink2'
link_env_prefix2 = link_alias2.upper()
res2 = self.client.create_container(
BUSYBOX, 'env', host_config=self.client.create_host_config(
links={link_path1: link_alias1, link_path2: link_alias2},
network_mode='none'
)
)
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
self.assertEqual(self.client.wait(container3_id), 0)
logs = self.client.logs(container3_id)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn('{0}_NAME='.format(link_env_prefix1), logs)
self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix1), logs)
self.assertIn('{0}_NAME='.format(link_env_prefix2), logs)
self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix2), logs)
class TestRestartingContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '2'],
host_config=self.client.create_host_config(
restart_policy={"Name": "always", "MaximumRetryCount": 0},
network_mode='none'
)
)
id = container['Id']
self.client.start(id)
self.client.wait(id)
with self.assertRaises(docker.errors.APIError) as exc:
self.client.remove_container(id)
err = exc.exception.response.text
self.assertIn(
'You cannot remove a running container', err
)
self.client.remove_container(id, force=True)
class TestExecuteCommand(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, ['echo', 'hello'])
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello\n')
class TestExecuteCommandString(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'echo hello world')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello world\n')
class TestExecuteCommandStringAsUser(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami', user='default')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'default\n')
class TestExecuteCommandStringAsRoot(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'root\n')
class TestExecuteCommandStreaming(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
self.assertIn('Id', exec_id)
res = b''
for chunk in self.client.exec_start(exec_id, stream=True):
res += chunk
self.assertEqual(res, b'hello\nworld\n')
class TestExecInspect(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
self.assertIn('Id', exec_id)
self.client.exec_start(exec_id)
exec_info = self.client.exec_inspect(exec_id)
self.assertIn('ExitCode', exec_info)
self.assertNotEqual(exec_info['ExitCode'], 0)
class TestRunContainerStreaming(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, '/bin/sh',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
sock = self.client.attach_socket(container, ws=False)
self.assertTrue(sock.fileno() > -1)
class TestPauseUnpauseContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.tmp_containers.append(id)
self.client.start(container)
self.client.pause(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], True)
self.assertIn('Paused', state)
self.assertEqual(state['Paused'], True)
self.client.unpause(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], True)
self.assertIn('Paused', state)
self.assertEqual(state['Paused'], False)
class TestCreateContainerWithHostPidMode(BaseTestCase):
def runTest(self):
ctnr = self.client.create_container(
BUSYBOX, 'true', host_config=self.client.create_host_config(
pid_mode='host', network_mode='none'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
self.assertIn('HostConfig', inspect)
host_config = inspect['HostConfig']
self.assertIn('PidMode', host_config)
self.assertEqual(host_config['PidMode'], 'host')
#################
# LINKS TESTS #
#################
class TestRemoveLink(BaseTestCase):
def runTest(self):
# Create containers
container1 = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True
)
container1_id = container1['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
# Create Link
# we don't want the first /
link_path = self.client.inspect_container(container1_id)['Name'][1:]
link_alias = 'mylink'
container2 = self.client.create_container(
BUSYBOX, 'cat', host_config=self.client.create_host_config(
links={link_path: link_alias}, network_mode='none'
)
)
container2_id = container2['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# Remove link
linked_name = self.client.inspect_container(container2_id)['Name'][1:]
link_name = '%s/%s' % (linked_name, link_alias)
self.client.remove_container(link_name, link=True)
# Link is gone
containers = self.client.containers(all=True)
retrieved = [x for x in containers if link_name in x['Names']]
self.assertEqual(len(retrieved), 0)
# Containers are still there
retrieved = [
x for x in containers if x['Id'].startswith(container1_id) or
x['Id'].startswith(container2_id)
]
self.assertEqual(len(retrieved), 2)
##################
# IMAGES TESTS #
##################
class TestPull(BaseTestCase):
def runTest(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
self.assertEqual(type(res), six.text_type)
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class TestPullStream(BaseTestCase):
def runTest(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull('hello-world', stream=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
json.loads(chunk) # ensure chunk is a single, valid JSON blob
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class TestCommit(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
self.assertIn('Container', img)
self.assertTrue(img['Container'].startswith(id))
self.assertIn('ContainerConfig', img)
self.assertIn('Image', img['ContainerConfig'])
self.assertEqual(BUSYBOX, img['ContainerConfig']['Image'])
busybox_id = self.client.inspect_image(BUSYBOX)['Id']
self.assertIn('Parent', img)
self.assertEqual(img['Parent'], busybox_id)
class TestRemoveImage(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
self.client.remove_image(img_id, force=True)
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
self.assertEqual(len(res), 0)
##################
# IMPORT TESTS #
##################
class ImportTestCase(BaseTestCase):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
class TestImportFromBytes(ImportTestCase):
'''Tests importing an image from in-memory byte data.'''
def runTest(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromFile(ImportTestCase):
'''Tests importing an image from a tar file on disk.'''
def runTest(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromStream(ImportTestCase):
'''Tests importing an image from a stream containing tar data.'''
def runTest(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromURL(ImportTestCase):
'''Tests downloading an image over HTTP.'''
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def runTest(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
TAR_SIZE = 10240
with self.dummy_tar_stream(n_bytes=TAR_SIZE) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
#################
# VOLUMES TESTS #
#################
@requires_api_version('1.21')
class TestVolumes(BaseTestCase):
def test_create_volume(self):
name = 'perfectcherryblossom'
self.tmp_volumes.append(name)
result = self.client.create_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
def test_create_volume_invalid_driver(self):
driver_name = 'invalid.driver'
with pytest.raises(docker.errors.NotFound):
self.client.create_volume('perfectcherryblossom', driver_name)
def test_list_volumes(self):
name = 'imperishablenight'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.volumes()
self.assertIn('Volumes', result)
volumes = result['Volumes']
self.assertIn(volume_info, volumes)
def test_inspect_volume(self):
name = 'embodimentofscarletdevil'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.inspect_volume(name)
self.assertEqual(volume_info, result)
def test_inspect_nonexistent_volume(self):
name = 'embodimentofscarletdevil'
with pytest.raises(docker.errors.NotFound):
self.client.inspect_volume(name)
def test_remove_volume(self):
name = 'shootthebullet'
self.tmp_volumes.append(name)
self.client.create_volume(name)
result = self.client.remove_volume(name)
self.assertTrue(result)
def test_remove_nonexistent_volume(self):
name = 'shootthebullet'
with pytest.raises(docker.errors.NotFound):
self.client.remove_volume(name)
#################
# BUILDER TESTS #
#################
class TestBuildStream(BaseTestCase):
def runTest(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
stream = self.client.build(fileobj=script, stream=True)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
json.loads(chunk) # ensure chunk is a single, valid JSON blob
logs += chunk
self.assertNotEqual(logs, '')
class TestBuildFromStringIO(BaseTestCase):
def runTest(self):
if six.PY3:
return
script = io.StringIO(six.text_type('\n').join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]))
stream = self.client.build(fileobj=script, stream=True)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
logs += chunk
self.assertNotEqual(logs, '')
@requires_api_version('1.8')
class TestBuildWithDockerignore(Cleanup, BaseTestCase):
def runTest(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'MAINTAINER docker-py',
'ADD . /test',
]))
with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
f.write("\n".join([
'ignored',
'Dockerfile',
'.dockerignore',
'', # empty line
]))
with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
f.write("this file should not be ignored")
subdir = os.path.join(base_dir, 'ignored', 'subdir')
os.makedirs(subdir)
with open(os.path.join(subdir, 'file'), 'w') as f:
f.write("this file should be ignored")
tag = 'docker-py-test-build-with-dockerignore'
stream = self.client.build(
path=base_dir,
tag=tag,
)
for chunk in stream:
pass
c = self.client.create_container(tag, ['ls', '-1A', '/test'])
self.client.start(c)
self.client.wait(c)
logs = self.client.logs(c)
if six.PY3:
logs = logs.decode('utf-8')
self.assertEqual(
list(filter(None, logs.split('\n'))),
['not-ignored'],
)
#######################
# NETWORK TESTS #
#######################
@requires_api_version('1.21')
class TestNetworks(BaseTestCase):
def create_network(self, *args, **kwargs):
net_name = 'dockerpy{}'.format(random.randrange(sys.maxint))[:14]
net_id = self.client.create_network(net_name, *args, **kwargs)['id']
self.tmp_networks.append(net_id)
return (net_name, net_id)
def test_list_networks(self):
networks = self.client.networks()
initial_size = len(networks)
net_name, net_id = self.create_network()
networks = self.client.networks()
self.assertEqual(len(networks), initial_size + 1)
self.assertTrue(net_id in [n['id'] for n in networks])
networks_by_name = self.client.networks(names=[net_name])
self.assertEqual([n['id'] for n in networks_by_name], [net_id])
networks_by_partial_id = self.client.networks(ids=[net_id[:8]])
self.assertEqual([n['id'] for n in networks_by_partial_id], [net_id])
def test_inspect_network(self):
net_name, net_id = self.create_network()
net = self.client.inspect_network(net_id)
self.assertEqual(net, {
u'name': net_name,
u'id': net_id,
u'driver': 'bridge',
u'containers': {},
})
def test_create_network_with_host_driver_fails(self):
net_name = 'dockerpy{}'.format(random.randrange(sys.maxint))[:14]
with pytest.raises(APIError):
self.client.create_network(net_name, driver='host')
def test_remove_network(self):
initial_size = len(self.client.networks())
net_name, net_id = self.create_network()
self.assertEqual(len(self.client.networks()), initial_size + 1)
self.client.remove_network(net_id)
self.assertEqual(len(self.client.networks()), initial_size)
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
container = self.client.create_container('busybox', 'top')
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('containers'))
self.client.connect_container_to_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertEqual(
list(network_data['containers'].keys()),
[container['Id']])
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('containers'))
def test_connect_on_container_create(self):
net_name, net_id = self.create_network()
container = self.client.create_container(
image='busybox',
command='top',
host_config=self.client.create_host_config(network_mode=net_name),
)
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
self.assertEqual(
list(network_data['containers'].keys()),
[container['Id']])
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('containers'))
#######################
# PY SPECIFIC TESTS #
#######################
class TestRunShlex(BaseTestCase):
def runTest(self):
commands = [
'true',
'echo "The Young Descendant of Tepes & Septette for the '
'Dead Princess"',
'echo -n "The Young Descendant of Tepes & Septette for the '
'Dead Princess"',
'/bin/sh -c "echo Hello World"',
'/bin/sh -c \'echo "Hello World"\'',
'echo "\"Night of Nights\""',
'true && echo "Night of Nights"'
]
for cmd in commands:
container = self.client.create_container(BUSYBOX, cmd)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0, msg=cmd)
class TestLoadConfig(BaseTestCase):
def runTest(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
cfg_path = os.path.join(folder, '.dockercfg')
f = open(cfg_path, 'w')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = [email protected]')
f.close()
cfg = docker.auth.load_config(cfg_path)
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
cfg = cfg[docker.auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('Auth'), None)
class TestLoadJSONConfig(BaseTestCase):
def runTest(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
cfg_path = os.path.join(folder, '.dockercfg')
f = open(os.path.join(folder, '.dockercfg'), 'w')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
email_ = '[email protected]'
f.write('{{"{0}": {{"auth": "{1}", "email": "{2}"}}}}\n'.format(
docker.auth.INDEX_URL, auth_, email_))
f.close()
cfg = docker.auth.load_config(cfg_path)
self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
cfg = cfg[docker.auth.INDEX_URL]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('Auth'), None)
class TestAutoDetectVersion(unittest.TestCase):
def test_client_init(self):
client = docker_client(version='auto')
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
api_version_2 = client.version()['ApiVersion']
self.assertEqual(client_version, api_version_2)
client.close()
def test_auto_client(self):
client = docker.AutoVersionClient(**docker_client_kwargs())
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
api_version_2 = client.version()['ApiVersion']
self.assertEqual(client_version, api_version_2)
client.close()
with self.assertRaises(docker.errors.DockerException):
docker.AutoVersionClient(**docker_client_kwargs(version='1.11'))
class TestConnectionTimeout(unittest.TestCase):
def setUp(self):
self.timeout = 0.5
self.client = docker.client.Client(base_url='http://192.168.10.2:4243',
timeout=self.timeout)
def runTest(self):
start = time.time()
res = None
# This call isn't supposed to complete, and it should fail fast.
try:
res = self.client.inspect_container('id')
except:
pass
end = time.time()
self.assertTrue(res is None)
self.assertTrue(end - start < 2 * self.timeout)
class UnixconnTestCase(unittest.TestCase):
"""
Test UNIX socket connection adapter.
"""
def test_resource_warnings(self):
"""
Test no warnings are produced when using the client.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
client = docker_client()
client.images()
client.close()
del client
assert len(w) == 0, \
"No warnings produced: {0}".format(w[0].message)
####################
# REGRESSION TESTS #
####################
class TestRegressions(BaseTestCase):
def test_443(self):
dfile = io.BytesIO()
with self.assertRaises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
self.assertEqual(exc.exception.response.status_code, 500)
dfile.close()
def test_542(self):
self.client.start(
self.client.create_container(BUSYBOX, ['true'])
)
result = self.client.containers(all=True, trunc=True)
self.assertEqual(len(result[0]['Id']), 12)
def test_647(self):
with self.assertRaises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649(self):
self.client.timeout = None
ctnr = self.client.create_container(BUSYBOX, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715(self):
ctnr = self.client.create_container(BUSYBOX, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
BUSYBOX, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
self.assertEqual(
self.client.port(ctnr, 2000)[0]['HostPort'],
six.text_type(tcp_port)
)
self.assertEqual(
self.client.port(ctnr, '2000/tcp')[0]['HostPort'],
six.text_type(tcp_port)
)
self.assertEqual(
self.client.port(ctnr, '2000/udp')[0]['HostPort'],
six.text_type(udp_port)
)
|
|
import sys
import logging
import uuid
import datetime
import collections
import copy
try:
import ujson as json
except ImportError:
import json
from collections import defaultdict
from simple_n_grams.simple_n_grams import SimpleNGrams
logger = logging.getLogger("analysis")
def analyze_tweets(tweet_generator,results):
"""
Entry point for Tweet input. A sequence of Tweet dicts and a results object are
required.
"""
for tweet in tweet_generator:
analyze_tweet(tweet,results)
def compare_results(results_analyzed, results_baseline):
results_output = {}
produce_relative_text(results_output,
results_analyzed,
results_baseline
)
return results_output
def deserialize_tweets(line_generator):
"""
Generator function to manage JSON deserialization
"""
for line in line_generator:
try:
yield json.loads(line)
except ValueError:
continue
def produce_relative_text(results_output,results_analyzed,results_baseline):
"""
Return data representing the analyzed data normalized by the baseline data
"""
# results to renormalize
keys = ['hashtags','urls']
for key in keys:
try:
analyzed = results_analyzed[key]
except KeyError:
continue
baseline = results_baseline[key]
# get normalization factors
B = sum(baseline.values())
A = sum(analyzed.values())
compared = defaultdict(int)
for a_item,a_value in analyzed.items():
if a_item not in baseline:
factor = 1
else:
a_frac = a_value / A
b_frac = baseline[a_item] / B
factor = (a_frac - b_frac)/a_frac
compared[a_item] = analyzed[a_item] * factor
results_output[key] = compared
def setup_analysis(do_conversation = False, do_audience = False, identifier = None, input_results = None):
"""
Created placeholders for quantities of interest in results structure;
return results data structure.
If an identifier is specified, place the measurement accumulators at a
particular key.
"""
def weight_and_screennames():
return {"weight": 0, "screennames": set([])}
results = {
"tweet_count": 0,
"non-tweet_lines": 0,
"tweets_per_user": defaultdict(int),
#"user_id_to_screenname":
}
if do_conversation:
results['do_conversation'] = True
results["body_term_count"] = SimpleNGrams(
char_lower_cutoff=3
,n_grams=2
,tokenizer="twitter"
)
results["hashtags"] = defaultdict(int)
results["urls"] = defaultdict(int)
results["number_of_links"] = 0
results["utc_timeline"] = defaultdict(int)
results["local_timeline"] = defaultdict(int)
results["at_mentions"] = defaultdict(weight_and_screennames)
results["in_reply_to"] = defaultdict(int)
results["RT_of_user"] = defaultdict(weight_and_screennames)
results["quote_of_user"] = defaultdict(weight_and_screennames)
else:
results['do_conversation'] = True
if do_audience:
results['do_audience'] = True
results["bio_term_count"] = SimpleNGrams(
char_lower_cutoff=3
,n_grams=1
,tokenizer="twitter"
)
results["profile_locations_regions"] = defaultdict(int)
else:
results['do_audience'] = False
# in the future we could add custom fields by adding kwarg = func where func is agg/extractor and kwarg is field name
return results
def analyze_tweet(tweet, results):
"""
Add relevant data from a tweet to 'results'
"""
######################################
# fields that are relevant for user-level and tweet-level analysis
# count the number of valid Tweets here
# if it doesn't have at least a body and an actor, it's not a tweet
try:
body = tweet["body"]
userid = tweet["actor"]["id"][:15]
results["tweet_count"] += 1
except (ValueError, KeyError):
if "non-tweet_lines" in results:
results["non-tweet_lines"] += 1
return
# count the number of tweets from each user
if "tweets_per_user" in results:
results["tweets_per_user"][tweet["actor"]["id"][15:]] += 1
#######################################
# fields that are relevant for the tweet-level analysis
# ------------------> term counts
# Tweet body term count
if "body_term_count" in results:
results["body_term_count"].add(tweet["body"])
# count the occurences of different hashtags
if "hashtags" in results:
if "hashtags" in tweet["twitter_entities"]:
for h in tweet["twitter_entities"]["hashtags"]:
results["hashtags"][h["text"].lower()] += 1
try:
# count the occurences of different top-level domains
if ("urls" in results) and ("urls" in tweet["gnip"]):
for url in tweet["gnip"]["urls"]:
try:
results["urls"][url["expanded_url"].split("/")[2]] += 1
except (KeyError,IndexError,AttributeError):
pass
# and the number of links total
if ("number_of_links" in results) and ("urls" in tweet["gnip"]):
results["number_of_links"] += len(tweet["gnip"]["urls"])
except KeyError:
pass
# -----------> timelines
# make a timeline of UTC day of Tweets posted
if "utc_timeline" in results:
date = tweet["postedTime"][0:10]
results["utc_timeline"][date] += 1
# make a timeline in normalized local time (poster's time) of all of the Tweets
if "local_timeline" in results:
utcOffset = tweet["actor"]["utcOffset"]
if utcOffset is not None:
posted = tweet["postedTime"]
hour_and_minute = (datetime.datetime.strptime(posted[0:16], "%Y-%m-%dT%H:%M") +
datetime.timedelta(seconds = int(utcOffset))).time().strftime("%H:%M")
results["local_timeline"][hour_and_minute] += 1
# ------------> mention results
# which users are @mentioned in the Tweet
if "at_mentions" in results:
for u in tweet["twitter_entities"]["user_mentions"]:
# update the mentions with weight + 1 and
# list all of the screennames (in case a name changes)
if u["id_str"] is not None:
results["at_mentions"][u["id_str"]]["weight"] += 1
results["at_mentions"][u["id_str"]]["screennames"].update([u["screen_name"].lower()])
# count the number of times each user gets replies
if ("in_reply_to" in results) and ("inReplyTo" in tweet):
results["in_reply_to"][tweet["inReplyTo"]["link"].split("/")[3].lower()] += 1
# --------------> RTs and quote Tweet
# count share actions (RTs and quote-Tweets)
# don't count self-quotes or self-RTs, because that's allowed now
if (("quote_of_user" in results) or ("RT_of_user" in results)) and (tweet["verb"] == "share"):
# if it's a quote tweet
if ("quote_of_user" in results) and ("twitter_quoted_status" in tweet["object"]):
quoted_id = tweet["object"]["twitter_quoted_status"]["actor"]["id"][15:]
quoted_name = tweet["object"]["twitter_quoted_status"]["actor"]["preferredUsername"]
if quoted_id != tweet["actor"]["id"]:
results["quote_of_user"][quoted_id]["weight"] += 1
results["quote_of_user"][quoted_id]["screennames"].update([quoted_name])
# if it's a RT
elif ("RT_of_user" in results):
rt_of_name = tweet["object"]["actor"]["preferredUsername"].lower()
rt_of_id = tweet["object"]["actor"]["id"][15:]
if rt_of_id != tweet["actor"]["id"]:
results["RT_of_user"][rt_of_id]["weight"] += 1
results["RT_of_user"][rt_of_id]["screennames"].update([rt_of_name])
############################################
# actor-property qualities
# ------------> bio terms
if "bio_term_count" in results:
if tweet["actor"]["id"][:15] not in results["tweets_per_user"]:
try:
if tweet["actor"]["summary"] is not None:
results["bio_term_count"].add(tweet["actor"]["summary"])
except KeyError:
pass
# ---------> profile locations
if "profile_locations_regions" in results:
# if possible, get the user's address
try:
address = tweet["gnip"]["profileLocations"][0]["address"]
country_key = address.get("country", "no country available")
region_key = address.get("region", "no region available")
except KeyError:
country_key = "no country available"
region_key = "no region available"
results["profile_locations_regions"][country_key + " , " + region_key] += 1
|
|
# -*- coding: utf-8 -*-
"""Tests for the project and solution file writer classes."""
import io
import os
import unittest
from vstools import resources
from vstools import writers
from tests import test_lib
class FileWriterTest(test_lib.BaseTestCase):
"""File writer tests."""
# pylint: disable=protected-access
def testInitialize(self):
"""Tests the __init__ function."""
file_writer = writers.FileWriter()
self.assertIsNotNone(file_writer)
def testOpenClose(self):
"""Tests the Open and Close functions."""
file_writer = writers.FileWriter()
with test_lib.TempDirectory() as temp_directory:
filename = os.path.join(temp_directory, 'testfile')
file_writer.Open(filename)
file_writer.Close()
def testWriteBinaryData(self):
"""Tests the WriteBinaryData function."""
file_writer = writers.FileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteBinaryData(b'Binary data')
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = b'Binary data'
self.assertEqual(output_data, expected_output_data)
def testWriteLine(self):
"""Tests the WriteLine function."""
file_writer = writers.FileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteLine('Line of text')
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = b'Line of text\r\n'
self.assertEqual(output_data, expected_output_data)
def testWriteLines(self):
"""Tests the WriteLines function."""
file_writer = writers.FileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteLines([
'First line of text',
'Second line of text'])
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'First line of text\r\nSecond line of text\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2008ProjectFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2008 project file writer test."""
# pylint: disable=protected-access
def testWriteConfiguration(self):
"""Tests the _WriteConfiguration function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfiguration(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertTrue(output_data.startswith(b'\t\t<Configuration\r\n'))
self.assertTrue(output_data.endswith(b'\t\t</Configuration>\r\n'))
def testWriteConfigurationLinkerTool(self):
"""Tests the _WriteConfigurationLinkerTool function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationLinkerTool(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\t\t\t<Tool\r\n'
b'\t\t\t\tName="VCLinkerTool"\r\n'
b'\t\t\t\tAdditionalLibraryDirectories=""$(OutDir)""\r\n'
b'\t\t\t/>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurationOption(self):
"""Tests the _WriteConfigurationOption function."""
project_configuration = resources.VSProjectConfiguration()
project_configuration.compile_as = '1'
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationOption(
project_configuration, 'CompileAs', 'compile_as', False, 4)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = b'\t\t\t\tCompileAs="1"\r\n'
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurationTool(self):
"""Tests the _WriteConfigurationTool function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationTool(
project_configuration, 'VCCLCompilerTool',
file_writer._TOOL_COMPILER_CONFIGURATION_OPTIONS)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\t\t\t<Tool\r\n'
b'\t\t\t\tName="VCCLCompilerTool"\r\n'
b'\t\t\t\tAdditionalIncludeDirectories=""\r\n'
b'\t\t\t\tPreprocessorDefinitions=""\r\n'
b'\t\t\t\tRuntimeLibrary=""\r\n'
b'\t\t\t\tWarningLevel=""\r\n'
b'\t\t\t\tCompileAs=""\r\n'
b'\t\t\t/>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurationToolFooter(self):
"""Tests the _WriteConfigurationToolFooter function."""
project_configuration = resources.VSProjectConfiguration()
project_configuration.compile_as = '1'
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationToolFooter()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = b'\t\t\t/>\r\n'
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurationToolHeader(self):
"""Tests the _WriteConfigurationToolHeader function."""
project_configuration = resources.VSProjectConfiguration()
project_configuration.compile_as = '1'
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationToolHeader('VCLinkerTool')
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\t\t\t<Tool\r\n'
b'\t\t\t\tName="VCLinkerTool"\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteHeaderFiles(self):
"""Tests the _WriteHeaderFiles function."""
header_files = ['test.h']
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteHeaderFiles(header_files)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\t\t<Filter\r\n'
b'\t\t\tName="Header Files"\r\n'
b'\t\t\tFilter="h;hpp;hxx;hm;inl;inc;xsd"\r\n'
b'\t\t\tUniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"\r\n'
b'\t\t\t>\r\n'
b'\t\t\t<File\r\n'
b'\t\t\t\tRelativePath="test.h"\r\n'
b'\t\t\t\t>\r\n'
b'\t\t\t</File>\r\n'
b'\t\t</Filter>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteResourceFiles(self):
"""Tests the _WriteResourceFiles function."""
resource_files = ['test.rc']
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteResourceFiles(resource_files)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\t\t<Filter\r\n'
b'\t\t\tName="Resource Files"\r\n'
b'\t\t\tFilter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;'
b'resx;tiff;tif;png;wav"\r\n'
b'\t\t\tUniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"\r\n'
b'\t\t\t>\r\n'
b'\t\t\t<File\r\n'
b'\t\t\t\tRelativePath="test.rc"\r\n'
b'\t\t\t\t>\r\n'
b'\t\t\t</File>\r\n'
b'\t\t</Filter>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteSourceFiles(self):
"""Tests the _WriteSourceFiles function."""
source_files = ['test.c']
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteSourceFiles(source_files)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\t\t<Filter\r\n'
b'\t\t\tName="Source Files"\r\n'
b'\t\t\tFilter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"\r\n'
b'\t\t\tUniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"\r\n'
b'\t\t\t>\r\n'
b'\t\t\t<File\r\n'
b'\t\t\t\tRelativePath="test.c"\r\n'
b'\t\t\t\t>\r\n'
b'\t\t\t</File>\r\n'
b'\t\t</Filter>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteHeader(self):
"""Tests the WriteHeader function."""
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteHeader()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = b'<?xml version="1.0" encoding="Windows-1252"?>\r\n'
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurations(self):
"""Tests the WriteConfigurations function."""
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteConfigurations(project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertTrue(output_data.startswith(b'\t<Configurations>\r\n'))
self.assertTrue(output_data.endswith(
b'\t</Configurations>\r\n'
b'\t<References>\r\n'
b'\t</References>\r\n'))
def testWriteDependencies(self):
"""Tests the WriteDependencies function."""
dependencies = []
solution_projects_by_guid = {}
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteDependencies(dependencies, solution_projects_by_guid)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertEqual(output_data, b'')
def testWriteFiles(self):
"""Tests the WriteFiles function."""
header_files = ['test.h']
resource_files = ['test.rc']
source_files = ['test.c']
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteFiles(source_files, header_files, resource_files)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertTrue(output_data.startswith(b'\t<Files>\r\n'))
self.assertTrue(output_data.endswith(
b'\t</Files>\r\n'
b'\t<Globals>\r\n'
b'\t</Globals>\r\n'))
def testWriteFooter(self):
"""Tests the WriteFooter function."""
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteFooter()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = b'</VisualStudioProject>\r\n'
self.assertEqual(output_data, expected_output_data)
def testWriteProjectConfigurations(self):
"""Tests the WriteProjectConfigurations function."""
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteProjectConfigurations(project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertEqual(output_data, b'')
def testWriteProjectInformation(self):
"""Tests the WriteProjectInformation function."""
project_information = resources.VSProjectInformation()
file_writer = writers.VS2008ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteProjectInformation(project_information)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'<VisualStudioProject\r\n'
b'\tProjectType="Visual C++"\r\n'
b'\tVersion="9,00"\r\n'
b'\tName=""\r\n'
b'\tProjectGUID="{}"\r\n'
b'\tRootNamespace=""\r\n'
b'\tTargetFrameworkVersion="131072"\r\n'
b'\t>\r\n'
b'\t<Platforms>\r\n'
b'\t\t<Platform\r\n'
b'\t\t\tName="Win32"\r\n'
b'\t\t/>\r\n'
b'\t</Platforms>\r\n'
b'\t<ToolFiles>\r\n'
b'\t</ToolFiles>\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2010ProjectFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2010 project file writer test."""
# pylint: disable=protected-access
def testWriteClCompileSection(self):
"""Tests the _WriteClCompileSection function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteClCompileSection(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ClCompile>\r\n'
b' <AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)'
b'</AdditionalIncludeDirectories>\r\n'
b' <PreprocessorDefinitions>%(PreprocessorDefinitions)'
b'</PreprocessorDefinitions>\r\n'
b' <RuntimeLibrary></RuntimeLibrary>\r\n'
b' <WarningLevel></WarningLevel>\r\n'
b' </ClCompile>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurationPropertyGroup(self):
"""Tests the _WriteConfigurationPropertyGroup function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationPropertyGroup(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'==\'|\'"'
b' Label="Configuration">\r\n'
b' <ConfigurationType></ConfigurationType>\r\n'
b' </PropertyGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurationPropertyGroupFooter(self):
"""Tests the _WriteConfigurationPropertyGroupFooter function."""
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationPropertyGroupFooter()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = b' </PropertyGroup>\r\n'
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurationPropertyGroupHeader(self):
"""Tests the _WriteConfigurationPropertyGroupHeader function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationPropertyGroupHeader(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'==\'|\'" '
b'Label="Configuration">\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteHeaderFiles(self):
"""Tests the _WriteHeaderFiles function."""
header_files = ['test.h']
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteHeaderFiles(header_files)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ItemGroup>\r\n'
b' <ClInclude Include="test.h" />\r\n'
b' </ItemGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteItemDefinitionGroup(self):
"""Tests the _WriteItemDefinitionGroup function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteItemDefinitionGroup(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ItemDefinitionGroup'
b' Condition="\'$(Configuration)|$(Platform)\'==\'|\'">\r\n'
b' <ClCompile>\r\n'
b' <AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)'
b'</AdditionalIncludeDirectories>\r\n'
b' <PreprocessorDefinitions>%(PreprocessorDefinitions)'
b'</PreprocessorDefinitions>\r\n'
b' <RuntimeLibrary></RuntimeLibrary>\r\n'
b' <WarningLevel></WarningLevel>\r\n'
b' </ClCompile>\r\n'
b' </ItemDefinitionGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteItemDefinitionGroupFooter(self):
"""Tests the _WriteItemDefinitionGroupFooter function."""
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteItemDefinitionGroupFooter()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = b' </ItemDefinitionGroup>\r\n'
self.assertEqual(output_data, expected_output_data)
def testWriteItemDefinitionGroupHeader(self):
"""Tests the _WriteItemDefinitionGroupHeader function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteItemDefinitionGroupHeader(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ItemDefinitionGroup'
b' Condition="\'$(Configuration)|$(Platform)\'==\'|\'">\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteLibrarianSection(self):
"""Tests the _WriteLibrarianSection function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteLibrarianSection(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <Lib>\r\n'
b' <OutputFile></OutputFile>\r\n'
b' <ModuleDefinitionFile>\r\n'
b' </ModuleDefinitionFile>\r\n'
b' </Lib>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteLinkerSection(self):
"""Tests the _WriteLinkerSection function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteLinkerSection(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <Link>\r\n'
b' </Link>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteOutIntDirConditions(self):
"""Tests the _WriteOutIntDirConditions function."""
configuration_name = 'Release'
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteOutIntDirConditions(
configuration_name, project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertEqual(output_data, b'')
def testWriteOutIntDirPropertyGroups(self):
"""Tests the _WriteOutIntDirPropertyGroups function."""
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteOutIntDirPropertyGroups(project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <PropertyGroup>\r\n'
b' <_ProjectFileVersion>10.0.40219.1</_ProjectFileVersion>\r\n'
b' </PropertyGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteResourceFiles(self):
"""Tests the _WriteResourceFiles function."""
resource_files = ['test.rc']
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteResourceFiles(resource_files)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ItemGroup>\r\n'
b' <ResourceCompile Include="test.rc" />\r\n'
b' </ItemGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteSourceFiles(self):
"""Tests the _WriteSourceFiles function."""
source_files = ['test.c']
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteSourceFiles(source_files)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ItemGroup>\r\n'
b' <ClCompile Include="test.c" />\r\n'
b' </ItemGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurations(self):
"""Tests the WriteConfigurations function."""
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteConfigurations(project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertTrue(output_data.startswith(
b' <Import Project="$(VCTargetsPath)\\'
b'Microsoft.Cpp.Default.props" />\r\n'))
self.assertTrue(output_data.endswith(b' </PropertyGroup>\r\n'))
def testWriteDependencies(self):
"""Tests the WriteDependencies function."""
dependencies = []
solution_projects_by_guid = {}
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteDependencies(dependencies, solution_projects_by_guid)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertEqual(output_data, b'')
def testWriteFiles(self):
"""Tests the WriteFiles function."""
header_files = ['test.h']
resource_files = ['test.rc']
source_files = ['test.c']
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteFiles(source_files, header_files, resource_files)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertTrue(output_data.startswith(b' <ItemGroup>\r\n'))
self.assertTrue(output_data.endswith(b' </ItemGroup>\r\n'))
def testWriteFooter(self):
"""Tests the WriteFooter function."""
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteFooter()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertTrue(output_data.endswith(b'</Project>'))
def testWriteHeader(self):
"""Tests the WriteHeader function."""
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteHeader()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\xef\xbb\xbf<?xml version="1.0" encoding="utf-8"?>\r\n'
b'<Project DefaultTargets="Build" ToolsVersion="4.0" '
b'xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteProjectConfigurations(self):
"""Tests the WriteProjectConfigurations function."""
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteProjectConfigurations(project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ItemGroup Label="ProjectConfigurations">\r\n'
b' </ItemGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteProjectInformation(self):
"""Tests the WriteProjectInformation function."""
project_information = resources.VSProjectInformation()
file_writer = writers.VS2010ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteProjectInformation(project_information)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <PropertyGroup Label="Globals">\r\n'
b' <ProjectGuid>{}</ProjectGuid>\r\n'
b' <RootNamespace></RootNamespace>\r\n'
b' </PropertyGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2012ProjectFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2012 project file writer test."""
# pylint: disable=protected-access
def testWriteClCompileSection(self):
"""Tests the _WriteClCompileSection function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2012ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteClCompileSection(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ClCompile>\r\n'
b' <AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)'
b'</AdditionalIncludeDirectories>\r\n'
b' <PreprocessorDefinitions>%(PreprocessorDefinitions)'
b'</PreprocessorDefinitions>\r\n'
b' <RuntimeLibrary></RuntimeLibrary>\r\n'
b' <WarningLevel></WarningLevel>\r\n'
b' </ClCompile>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurationPropertyGroup(self):
"""Tests the _WriteConfigurationPropertyGroup function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2012ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteConfigurationPropertyGroup(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'==\'|\'"'
b' Label="Configuration">\r\n'
b' <ConfigurationType></ConfigurationType>\r\n'
b' <PlatformToolset>v110</PlatformToolset>\r\n'
b' </PropertyGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteItemDefinitionGroup(self):
"""Tests the _WriteItemDefinitionGroup function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2012ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteItemDefinitionGroup(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <ItemDefinitionGroup '
b'Condition="\'$(Configuration)|$(Platform)\'==\'|\'">\r\n'
b' <ClCompile>\r\n'
b' <AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)'
b'</AdditionalIncludeDirectories>\r\n'
b' <PreprocessorDefinitions>%(PreprocessorDefinitions)'
b'</PreprocessorDefinitions>\r\n'
b' <RuntimeLibrary></RuntimeLibrary>\r\n'
b' <WarningLevel></WarningLevel>\r\n'
b' </ClCompile>\r\n'
b' </ItemDefinitionGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteLibrarianSection(self):
"""Tests the _WriteLibrarianSection function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2012ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteLibrarianSection(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <Lib>\r\n'
b' <OutputFile></OutputFile>\r\n'
b' <ModuleDefinitionFile />\r\n'
b' </Lib>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteLinkerSection(self):
"""Tests the _WriteLinkerSection function."""
project_configuration = resources.VSProjectConfiguration()
file_writer = writers.VS2012ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteLinkerSection(project_configuration)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <Link>\r\n'
b' </Link>\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteOutIntDirConditions(self):
"""Tests the _WriteOutIntDirConditions function."""
configuration_name = 'Release'
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2012ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteOutIntDirConditions(
configuration_name, project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertEqual(output_data, b'')
def testWriteOutIntDirPropertyGroups(self):
"""Tests the _WriteOutIntDirPropertyGroups function."""
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2012ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteOutIntDirPropertyGroups(project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b' <PropertyGroup>\r\n'
b' <_ProjectFileVersion>11.0.61030.0</_ProjectFileVersion>\r\n'
b' </PropertyGroup>\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2013ProjectFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2013 project file writer test."""
# pylint: disable=protected-access
def testInitialize(self):
"""Tests the __init__ function."""
file_writer = writers.VS2013ProjectFileWriter()
self.assertIsNotNone(file_writer)
class VS2015ProjectFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2015 project file writer test."""
# pylint: disable=protected-access
def testWriteOutIntDirConditions(self):
"""Tests the _WriteOutIntDirConditions function."""
configuration_name = 'Release'
project_configurations = resources.VSConfigurations()
file_writer = writers.VS2015ProjectFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteOutIntDirConditions(
configuration_name, project_configurations)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertEqual(output_data, b'')
class VSSolutionFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio solution file writer test."""
# pylint: disable=protected-access
# TODO: add tests for _WriteProjectConfigurationPlatforms.
# TODO: add tests for _WriteSolutionConfigurationPlatforms.
def testWriteSolutionProperties(self):
"""Tests the _WriteSolutionProperties function."""
file_writer = writers.VSSolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteSolutionProperties()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\tGlobalSection(SolutionProperties) = preSolution\r\n'
b'\t\tHideSolutionNode = FALSE\r\n'
b'\tEndGlobalSection\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteProjects(self):
"""Tests the WriteProjects function."""
solution_project = resources.VSSolutionProject('name', 'file', 'guid')
file_writer = writers.VSSolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteProjects([solution_project])
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
self.assertEqual(output_data, b'')
class VS2008SolutionFileWriter(test_lib.BaseTestCase):
"""Visual Studio 2008 solution file writer test."""
# pylint: disable=protected-access
def testWriteConfigurations(self):
"""Tests the WriteConfigurations function."""
solution_configurations = resources.VSConfigurations()
solution_project = resources.VSSolutionProject('name', 'filename', 'guid')
file_writer = writers.VS2008SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteConfigurations(solution_configurations, [solution_project])
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'Global\r\n'
b'\tGlobalSection(SolutionProperties) = preSolution\r\n'
b'\t\tHideSolutionNode = FALSE\r\n'
b'\tEndGlobalSection\r\n'
b'EndGlobal\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2010SolutionFileWriter(test_lib.BaseTestCase):
"""Visual Studio 2010 solution file writer test."""
# pylint: disable=protected-access
def testWriteConfigurations(self):
"""Tests the WriteConfigurations function."""
solution_configurations = resources.VSConfigurations()
solution_project = resources.VSSolutionProject('name', 'filename', 'guid')
file_writer = writers.VS2010SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteConfigurations(solution_configurations, [solution_project])
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'Global\r\n'
b'\tGlobalSection(SolutionProperties) = preSolution\r\n'
b'\t\tHideSolutionNode = FALSE\r\n'
b'\tEndGlobalSection\r\n'
b'EndGlobal\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteHeader(self):
"""Tests the WriteHeader function."""
file_writer = writers.VS2010SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteHeader()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\xef\xbb\xbf\r\n'
b'Microsoft Visual Studio Solution File, Format Version 11.00\r\n'
b'# Visual C++ Express 2010\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteProject(self):
"""Tests the WriteProject function."""
solution_project = resources.VSSolutionProject('name', 'filename', 'guid')
file_writer = writers.VS2010SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteProject(solution_project)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "name",'
b' "filename.vcxproj", "{GUID}"\r\n'
b'EndProject\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2012SolutionFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2012 solution file writer test."""
# pylint: disable=protected-access
def testWriteHeader(self):
"""Tests the WriteHeader function."""
file_writer = writers.VS2012SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteHeader()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\xef\xbb\xbf\r\n'
b'Microsoft Visual Studio Solution File, Format Version 12.00\r\n'
b'# Visual Studio Express 2012 for Windows Desktop\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteProject(self):
"""Tests the WriteProject function."""
solution_project = resources.VSSolutionProject('name', 'filename', 'guid')
file_writer = writers.VS2012SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteProject(solution_project)
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "name", '
b'"filename.vcxproj", "{GUID}"\r\nEndProject\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2013SolutionFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2013 solution file writer test."""
# pylint: disable=protected-access
def testWriteHeader(self):
"""Tests the WriteHeader function."""
file_writer = writers.VS2013SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteHeader()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\xef\xbb\xbf\r\n'
b'Microsoft Visual Studio Solution File, Format Version 12.00\r\n'
b'# Visual Studio Express 2013 for Windows Desktop\r\n'
b'VisualStudioVersion = 12.0.21005.1\r\n'
b'MinimumVisualStudioVersion = 10.0.40219.1\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2015SolutionFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2015 solution file writer test."""
# pylint: disable=protected-access
def testWriteHeader(self):
"""Tests the WriteHeader function."""
file_writer = writers.VS2015SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteHeader()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\xef\xbb\xbf\r\n'
b'Microsoft Visual Studio Solution File, Format Version 12.00\r\n'
b'# Visual Studio 14\r\n'
b'VisualStudioVersion = 14.0.25420.1\r\n'
b'MinimumVisualStudioVersion = 10.0.40219.1\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2017SolutionFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2017 solution file writer test."""
# pylint: disable=protected-access
def testWriteExtensibilityGlobals(self):
"""Tests the _WriteExtensibilityGlobals function."""
file_writer = writers.VS2017SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer._WriteExtensibilityGlobals()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\tGlobalSection(ExtensibilityGlobals) = postSolution\r\n'
b'\t\tSolutionGuid = {E41FC29C-7FE6-4F98-85AD-1ED968E86446}\r\n'
b'\tEndGlobalSection\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteConfigurations(self):
"""Tests the WriteConfigurations function."""
solution_configurations = resources.VSConfigurations()
solution_project = resources.VSSolutionProject('name', 'filename', 'guid')
file_writer = writers.VS2017SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteConfigurations(solution_configurations, [solution_project])
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
# TODO: add ExtensibilityGlobals
expected_output_data = (
b'Global\r\n'
b'\tGlobalSection(SolutionProperties) = preSolution\r\n'
b'\t\tHideSolutionNode = FALSE\r\n'
b'\tEndGlobalSection\r\n'
b'EndGlobal\r\n')
self.assertEqual(output_data, expected_output_data)
def testWriteHeader(self):
"""Tests the WriteHeader function."""
file_writer = writers.VS2017SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteHeader()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\xef\xbb\xbf\r\n'
b'Microsoft Visual Studio Solution File, Format Version 12.00\r\n'
b'# Visual Studio 15\r\n'
b'VisualStudioVersion = 15.0.26730.10\r\n'
b'MinimumVisualStudioVersion = 10.0.40219.1\r\n')
self.assertEqual(output_data, expected_output_data)
class VS2019SolutionFileWriterTest(test_lib.BaseTestCase):
"""Visual Studio 2019 solution file writer test."""
# pylint: disable=protected-access
def testWriteHeader(self):
"""Tests the WriteHeader function."""
file_writer = writers.VS2019SolutionFileWriter()
file_writer._file = io.BytesIO()
file_writer.WriteHeader()
file_writer._file.seek(0, os.SEEK_SET)
output_data = file_writer._file.read()
expected_output_data = (
b'\xef\xbb\xbf\r\n'
b'Microsoft Visual Studio Solution File, Format Version 12.00\r\n'
b'# Visual Studio 15\r\n'
b'VisualStudioVersion = 15.0.26730.10\r\n'
b'MinimumVisualStudioVersion = 10.0.40219.1\r\n')
self.assertEqual(output_data, expected_output_data)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
#
# Desc: This file is part of the ecromedos Document Preparation System
# Author: Tobias Koch <[email protected]>
# License: MIT
# URL: http://www.ecromedos.net
#
import sys, locale, functools
import lxml.etree as etree
from net.ecromedos.error import ECMDSPluginError
def getInstance(config):
"""Returns a plugin instance."""
return Plugin(config)
#end function
class Plugin():
def __init__(self, config):
self.index = {}
self.counter = 0
try:
self.__draft = config['xsl_params']['global.draft']
except KeyError:
self.__draft = "'no'"
#end function
def process(self, node, format):
"""Either saves a glossary entry or sorts and builds the glossary,
depending on which node triggered the plugin."""
# skip if in draft mode
if self.__draft == "'yes'":
return node
if node.tag == "idxterm":
node = self.__saveNode(node)
elif node.tag == "make-index":
node = self.__makeIndex(node)
#end if
return node
#end function
def flush(self):
self.index = {}
self.counter = 0
#end function
# PRIVATE
def __saveNode(self, node):
"""Substitutes a 'defterm' node with a label and stores a reference
to the node for later when the index is to be built."""
sortkey = node.attrib.get("sortkey", None)
group = node.attrib.get("group", "default")
item = None
subitem = None
subsubitem = None
# read idxterm items
for child in node.iterchildren():
if child.tag == "item":
item = child.text.strip()
elif child.tag == "subitem":
subitem = child.text.strip()
elif child.tag == "subsubitem":
subsubitem = child.text.strip()
#end if
#end for
# create label
label_id = "idx:item%06d" % self.counter
# replace node itself with label node
label_node = etree.Element("label", id=label_id)
label_node.tail = node.tail
node.getparent().replace(node, label_node)
# at least 'item' must exist
if item != None:
index = self.index.setdefault(group, [group, {}, [], None])
for entry in [item, subitem, subsubitem, None]:
if entry is None:
index[2].append(label_id)
index[3] = sortkey
break
#end if
index = index[1].setdefault(entry, [entry, {}, [], None])
#end for
#end if
self.counter += 1
return label_node
#end function
def __makeIndex(self, node):
"""Read configuration. Sort items. Build index. Build XML."""
if not self.index:
return node
# build configuration
config = self.__configuration(node)
# set locale
self.__setLocale(config['locale'], config['locale_encoding'],
config['locale_variant'])
# build DOM structures
index = self.__buildIndex(node, config)
# reset locale
self.__resetLocale()
return index
#end function
def __configuration(self, node):
"""Read node attributes and build a dictionary holding
configuration information for the collator"""
# presets
properties = {
"columns": "2",
"group": "default",
"separator": ", ",
"locale": "C",
"locale_encoding": None,
"locale_variant": None,
"alphabet": "A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z"
}
# read element attributes
properties.update(dict(node.items()))
# split locale into locale/encoding/variant
if '@' in properties['locale']:
properties['locale'], properties['locale_variant'] = \
properties['locale'].split('@', 1)
if '.' in properties['locale']:
properties['locale'], properties['locale_encoding'] = \
properties['locale'].split('.', 1)
#end ifs
# parse the alphabet
alphabet = []
for ch in [x.strip() for x in properties['alphabet'].split(",")]:
if ch[0] == '[' and ch[-1] == ']':
properties['symbols'] = ch[1:-1].strip()
else:
alphabet.append(ch)
#end if
#end for
properties['alphabet'] = alphabet
return properties
#end function
def __setLocale(self, collate="C", encoding=None, variant=None):
"""Sets the locale to the specified locale, encoding and locale
variant."""
success = False
for e in [encoding, "UTF-8"]:
if success:
break
for v in [variant, ""]:
localestring = '.'.join([x for x in [collate, e] if x])
localestring = '@'.join([x for x in [localestring, v] if x])
try:
locale.setlocale(locale.LC_COLLATE, localestring)
success = True
break
except locale.Error:
pass
#end for
#end for
if not success:
msg = "Warning: cannot set locale '%s'." % collate
sys.stderr.write(msg)
#end function
def __resetLocale(self):
"""Resets LC_COLLATE to its default."""
locale.resetlocale(locale.LC_COLLATE)
#end function
def __sortIndex(self, index, level="item", config=None):
"""Sort index terms."""
# stop recursion
if not index:
return index
# recursive sortkey evaluation
itemlist = []
for v in index.values():
# set sortkey
if not v[-1]:
v[-1] = v[0]
# recursion
v[1] = self.__sortIndex(v[1], "sub"+level, config)
itemlist.append(v)
#end for
# insert alphabet
if level == "item":
for ch in config['alphabet']:
newnode = etree.Element("idxsection", name=ch)
itemlist.append(["idxsection", newnode, ch])
#end for
#end if
# comparison function
def compare(a,b):
x1 = a[-1]
x2 = b[-1]
y1 = a[1]
y2 = b[1]
if isinstance(y1, etree._Element) != isinstance(y2, etree._Element):
result = locale.strcoll(x1.lower(), x2.lower())
else:
result = locale.strcoll(a[-1], b[-1])
#end if
if result != 0:
return result
elif isinstance(y1, etree._Element) and \
isinstance(y2, etree._Element):
return 0
elif isinstance(y1, etree._Element):
return -1
elif isinstance(y2, etree._Element):
return +1
else:
return 0
#end inline
itemlist.sort(key=functools.cmp_to_key(compare))
return itemlist
#end function
def __buildIndexHelper(self, section, index, level, separator):
"""Build index recursively from nested lists structure."""
# stop recursion
if not index:
return index
for item in index:
term = item[0]
item_node = etree.Element(level)
item_node.text = term
i = 0
references = item[2]
num_ref = len(references)
# build referrer node
while i < num_ref:
ref = references[i]
# add single space
if i == 0:
item_node.text += " "
# add reference to list
etree.SubElement(item_node, "idxref", idref=ref)
# add separator (this should be configurable)
if i < num_ref - 1:
if not item_node[-1].tail:
item_node[-1].tail = separator
else:
item_node[-1].tail += separator
#end if
#end if
i += 1
#end while
section.append(item_node)
# recursion
self.__buildIndexHelper(section, item[1], "sub"+level, separator)
#end for
#end function
def __buildIndex(self, node, config):
"""Build XML DOM structure."""
# detect group name
group = node.attrib.get("group", "default")
# load group
try:
index = self.index[group][1]
except:
return node
#end try
# sort index
localestring, encoding = locale.getlocale(locale.LC_COLLATE)
index = self.__sortIndex(index, level="item", config=config)
# build base node
for prop_name in ["columns", "title", "tocentry"]:
try:
node.attrib[prop_name] = config[prop_name]
except KeyError: pass
#end for
# start building index...
section = etree.Element("idxsection")
try:
section.attrib["name"] = config['symbols']
except KeyError: pass
separator = config["separator"]
for item in index:
if isinstance(item[1], etree._Element):
node.append(section)
section = item[1]
else:
term = item[0]
item_node = etree.Element("item")
item_node.text = term
i = 0
references = item[2]
num_ref = len(references)
# build referrer node
while i < num_ref:
ref = references[i]
# add single space
if i == 0:
item_node.text += " "
# add reference to list
etree.SubElement(item_node, "idxref", idref=ref)
# add separator (this should be configurable)
if i < num_ref - 1:
if not item_node[-1].tail:
item_node[-1].tail = separator
else:
item_node[-1].tail += separator
#end if
#end if
i += 1
#end while
section.append(item_node)
# recursion
self.__buildIndexHelper(section, item[1], "subitem", separator)
#end if
#end for
node.append(section)
node.tag = "index"
return node
#end function
#end class
|
|
#!/usr/bin/env python3
import itertools
from collections import defaultdict
import networkx as nx
import numpy as np
from pgmpy.base import UndirectedGraph
from pgmpy.factors import factor_product, Factor
from pgmpy.independencies import Independencies
from pgmpy.extern.six.moves import map, range, zip
class MarkovModel(UndirectedGraph):
"""
Base class for markov model.
A MarkovModel stores nodes and edges with potentials
MarkovModel holds undirected edges.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object.
Examples
--------
Create an empty Markov Model with no nodes and no edges.
>>> from pgmpy.models import MarkovModel
>>> G = MarkovModel()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node('a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge('a', 'b')
a list of edges,
>>> G.add_edges_from([('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
Public Methods
--------------
add_node('node1')
add_nodes_from(['node1', 'node2', ...])
add_edge('node1', 'node2')
add_edges_from([('node1', 'node2'),('node3', 'node4')])
"""
def __init__(self, ebunch=None):
super(MarkovModel, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.factors = []
def add_edge(self, u, v, **kwargs):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
----------
u,v : nodes
Nodes can be any hashable Python object.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> G = MarkovModel()
>>> G.add_nodes_from(['Alice', 'Bob', 'Charles'])
>>> G.add_edge('Alice', 'Bob')
"""
# check that there is no self loop.
if u != v:
super(MarkovModel, self).add_edge(u, v, **kwargs)
else:
raise ValueError('Self loops are not allowed')
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles'),
... ('Charles', 'Debbie'), ('Debbie', 'Alice')])
>>> factor = Factor(['Alice', 'Bob'], cardinality=[3, 2],
... value=np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
if set(factor.variables) - set(factor.variables).intersection(
set(self.nodes())):
raise ValueError("Factors defined on variable not in the model",
factor)
self.factors.append(factor)
def get_factors(self):
"""
Returns the factors that have been added till now to the graph
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2],
... value=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.get_factors()
"""
return self.factors
def remove_factors(self, *factors):
"""
Removes the given factors from the added factors.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.remove_factors(factor)
"""
for factor in factors:
self.factors.remove(factor)
def get_cardinality(self, check_cardinality=False):
"""
Returns a dictionary with the given factors as keys and their respective
cardinality as values.
Parameters
----------
check_cardinality: boolean, optional
If, check_cardinality=True it checks if cardinality information
for all the variables is availble or not. If not it raises an error.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.get_cardinality()
defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2})
"""
cardinalities = defaultdict(int)
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
cardinalities[variable] = cardinality
if check_cardinality and len(self.nodes()) != len(cardinalities):
raise ValueError('Factors for all the variables not defined')
return cardinalities
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors -
* Checks if the cardinalities of all the variables are consistent across all the factors.
* Factors are defined for all the random variables.
Returns
-------
check: boolean
True if all the checks are passed
"""
cardinalities = self.get_cardinality()
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if cardinalities[variable] != cardinality:
raise ValueError(
'Cardinality of variable {var} not matching among factors'.format(var=variable))
for var1, var2 in itertools.combinations(factor.variables, 2):
if var2 not in self.neighbors(var1):
raise ValueError("Factor inconsistent with the model.")
return True
def to_factor_graph(self):
"""
Converts the markov model into factor graph.
A factor graph contains two types of nodes. One type corresponds to
random variables whereas the second type corresponds to factors over
these variables. The graph only contains edges between variables and
factor nodes. Each factor node is associated with one factor whose
scope is the set of variables that are its neighbors.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor1 = Factor(['Alice', 'Bob'], [3, 2], np.random.rand(6))
>>> factor2 = Factor(['Bob', 'Charles'], [2, 2], np.random.rand(4))
>>> student.add_factors(factor1, factor2)
>>> factor_graph = student.to_factor_graph()
"""
from pgmpy.models import FactorGraph
factor_graph = FactorGraph()
if not self.factors:
raise ValueError('Factors not associated with the random variables.')
factor_graph.add_nodes_from(self.nodes())
for factor in self.factors:
scope = factor.scope()
factor_node = 'phi_' + '_'.join(scope)
factor_graph.add_edges_from(itertools.product(scope, [factor_node]))
factor_graph.add_factors(factor)
return factor_graph
def triangulate(self, heuristic='H6', order=None, inplace=False):
"""
Triangulate the graph.
If order of deletion is given heuristic algorithm will not be used.
Parameters
----------
heuristic: H1 | H2 | H3 | H4 | H5 | H6
The heuristic algorithm to use to decide the deletion order of
the variables to compute the triangulated graph.
Let X be the set of variables and X(i) denotes the i-th variable.
* S(i) - The size of the clique created by deleting the variable.
* E(i) - Cardinality of variable X(i).
* M(i) - Maximum size of cliques given by X(i) and its adjacent nodes.
* C(i) - Sum of size of cliques given by X(i) and its adjacent nodes.
The heuristic algorithm decide the deletion order if this way:
* H1 - Delete the variable with minimal S(i).
* H2 - Delete the variable with minimal S(i)/E(i).
* H3 - Delete the variable with minimal S(i) - M(i).
* H4 - Delete the variable with minimal S(i) - C(i).
* H5 - Delete the variable with minimal S(i)/M(i).
* H6 - Delete the variable with minimal S(i)/C(i).
order: list, tuple (array-like)
The order of deletion of the variables to compute the triagulated
graph. If order is given heuristic algorithm will not be used.
inplace: True | False
if inplace is true then adds the edges to the object from
which it is called else returns a new object.
Reference
---------
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.56.3607
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> G = MarkovModel()
>>> G.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> G.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [Factor(edge, [2, 2], np.random.rand(4)) for edge in G.edges()]
>>> G.add_factors(*phi)
>>> G_chordal = G.triangulate()
"""
self.check_model()
if self.is_triangulated():
if inplace:
return
else:
return self
graph_copy = nx.Graph(self.edges())
edge_set = set()
def _find_common_cliques(cliques_list):
"""
Finds the common cliques among the given set of cliques for
corresponding node.
"""
common = set([tuple(x) for x in cliques_list[0]])
for i in range(1, len(cliques_list)):
common = common & set([tuple(x) for x in cliques_list[i]])
return list(common)
def _find_size_of_clique(clique, cardinalities):
"""
Computes the size of a clique.
Size of a clique is defined as product of cardinalities of all the
nodes present in the clique.
"""
return list(map(lambda x: np.prod([cardinalities[node] for node in x]),
clique))
def _get_cliques_dict(node):
"""
Returns a dictionary in the form of {node: cliques_formed} of the
node along with its neighboring nodes.
clique_dict_removed would be containing the cliques created
after deletion of the node
clique_dict_node would be containing the cliques created before
deletion of the node
"""
graph_working_copy = nx.Graph(graph_copy.edges())
neighbors = graph_working_copy.neighbors(node)
graph_working_copy.add_edges_from(itertools.combinations(neighbors, 2))
clique_dict = nx.cliques_containing_node(graph_working_copy,
nodes=([node] + neighbors))
graph_working_copy.remove_node(node)
clique_dict_removed = nx.cliques_containing_node(graph_working_copy,
nodes=neighbors)
return clique_dict, clique_dict_removed
if not order:
order = []
cardinalities = self.get_cardinality()
for index in range(self.number_of_nodes()):
# S represents the size of clique created by deleting the
# node from the graph
S = {}
# M represents the size of maximum size of cliques given by
# the node and its adjacent node
M = {}
# C represents the sum of size of the cliques created by the
# node and its adjacent node
C = {}
for node in set(graph_copy.nodes()) - set(order):
clique_dict, clique_dict_removed = _get_cliques_dict(node)
S[node] = _find_size_of_clique(
_find_common_cliques(list(clique_dict_removed.values())),
cardinalities
)[0]
common_clique_size = _find_size_of_clique(
_find_common_cliques(list(clique_dict.values())),
cardinalities
)
M[node] = np.max(common_clique_size)
C[node] = np.sum(common_clique_size)
if heuristic == 'H1':
node_to_delete = min(S, key=S.get)
elif heuristic == 'H2':
S_by_E = {key: S[key] / cardinalities[key] for key in S}
node_to_delete = min(S_by_E, key=S_by_E.get)
elif heuristic == 'H3':
S_minus_M = {key: S[key] - M[key] for key in S}
node_to_delete = min(S_minus_M, key=S_minus_M.get)
elif heuristic == 'H4':
S_minus_C = {key: S[key] - C[key] for key in S}
node_to_delete = min(S_minus_C, key=S_minus_C.get)
elif heuristic == 'H5':
S_by_M = {key: S[key] / M[key] for key in S}
node_to_delete = min(S_by_M, key=S_by_M.get)
else:
S_by_C = {key: S[key] / C[key] for key in S}
node_to_delete = min(S_by_C, key=S_by_C.get)
order.append(node_to_delete)
graph_copy = nx.Graph(self.edges())
for node in order:
for edge in itertools.combinations(graph_copy.neighbors(node), 2):
graph_copy.add_edge(edge[0], edge[1])
edge_set.add(edge)
graph_copy.remove_node(node)
if inplace:
for edge in edge_set:
self.add_edge(edge[0], edge[1])
return self
else:
graph_copy = MarkovModel(self.edges())
for edge in edge_set:
graph_copy.add_edge(edge[0], edge[1])
return graph_copy
def to_junction_tree(self):
"""
Creates a junction tree (or clique tree) for a given markov model.
For a given markov model (H) a junction tree (G) is a graph
1. where each node in G corresponds to a maximal clique in H
2. each sepset in G separates the variables strictly on one side of the
edge to other.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [Factor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()]
>>> mm.add_factors(*phi)
>>> junction_tree = mm.to_junction_tree()
"""
from pgmpy.models import JunctionTree
# Check whether the model is valid or not
self.check_model()
# Triangulate the graph to make it chordal
triangulated_graph = self.triangulate()
# Find maximal cliques in the chordal graph
cliques = list(map(tuple, nx.find_cliques(triangulated_graph)))
# If there is only 1 clique, then the junction tree formed is just a
# clique tree with that single clique as the node
if len(cliques) == 1:
clique_trees = JunctionTree()
clique_trees.add_node(cliques[0])
# Else if the number of cliques is more than 1 then create a complete
# graph with all the cliques as nodes and weight of the edges being
# the length of sepset between two cliques
elif len(cliques) >= 2:
complete_graph = UndirectedGraph()
edges = list(itertools.combinations(cliques, 2))
weights = list(map(lambda x: len(set(x[0]).intersection(set(x[1]))),
edges))
for edge, weight in zip(edges, weights):
complete_graph.add_edge(*edge, weight=-weight)
# Create clique trees by minimum (or maximum) spanning tree method
clique_trees = JunctionTree(nx.minimum_spanning_tree(complete_graph).edges())
# Check whether the factors are defined for all the random variables or not
all_vars = itertools.chain(*[factor.scope() for factor in self.factors])
if set(all_vars) != set(self.nodes()):
ValueError('Factor for all the random variables not specified')
# Dictionary stating whether the factor is used to create clique
# potential or not
# If false, then it is not used to create any clique potential
is_used = {factor: False for factor in self.factors}
for node in clique_trees.nodes():
clique_factors = []
for factor in self.factors:
# If the factor is not used in creating any clique potential as
# well as has any variable of the given clique in its scope,
# then use it in creating clique potential
if not is_used[factor] and set(factor.scope()).issubset(node):
clique_factors.append(factor)
is_used[factor] = True
# To compute clique potential, initially set it as unity factor
var_card = [self.get_cardinality()[x] for x in node]
clique_potential = Factor(node, var_card, np.ones(np.product(var_card)))
# multiply it with the factors associated with the variables present
# in the clique (or node)
clique_potential *= factor_product(*clique_factors)
clique_trees.add_factors(clique_potential)
if not all(is_used.values()):
raise ValueError('All the factors were not used to create Junction Tree.'
'Extra factors are defined.')
return clique_trees
def markov_blanket(self, node):
"""
Returns a markov blanket for a random variable.
Markov blanket is the neighboring nodes of the given node.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> mm.markov_blanket('x1')
"""
return self.neighbors(node)
def get_local_independencies(self, latex=False):
"""
Returns all the local independencies present in the markov model.
Local independencies are the independence assertion in the form of
.. math:: {X \perp W - {X} - MB(X) | MB(X)}
where MB is the markov blanket of all the random variables in X
Parameters
----------
latex: boolean
If latex=True then latex string of the indepedence assertion would
be created
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> mm.get_local_independecies()
"""
from pgmpy.exceptions import RequiredError
local_independencies = Independencies()
all_vars = set(self.nodes())
for node in self.nodes():
markov_blanket = set(self.markov_blanket(node))
rest = all_vars - set([node]) - markov_blanket
try:
local_independencies.add_assertions([node, list(rest), list(markov_blanket)])
except RequiredError:
pass
local_independencies.reduce()
if latex:
return local_independencies.latex_string()
else:
return local_independencies
def to_bayesian_model(self):
"""
Creates a Bayesian Model which is a minimum I-Map for this markov model.
The ordering of parents may not remain constant. It would depend on the
ordering of variable in the junction tree (which is not constant) all the
time.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [Factor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()]
>>> mm.add_factors(*phi)
>>> bm = mm.to_bayesian_model()
"""
from pgmpy.models import BayesianModel
bm = BayesianModel()
var_clique_dict = defaultdict(tuple)
var_order = []
# Create a junction tree from the markov model.
# Creation of clique tree involves triangulation, finding maximal cliques
# and creating a tree from these cliques
junction_tree = self.to_junction_tree()
# create an ordering of the nodes based on the ordering of the clique
# in which it appeared first
root_node = junction_tree.nodes()[0]
bfs_edges = nx.bfs_edges(junction_tree, root_node)
for node in root_node:
var_clique_dict[node] = root_node
var_order.append(node)
for edge in bfs_edges:
clique_node = edge[1]
for node in clique_node:
if not var_clique_dict[node]:
var_clique_dict[node] = clique_node
var_order.append(node)
# create a bayesian model by adding edges from parent of node to node as
# par(x_i) = (var(c_k) - x_i) \cap {x_1, ..., x_{i-1}}
for node_index in range(len(var_order)):
node = var_order[node_index]
node_parents = (set(var_clique_dict[node]) - set([node])).intersection(
set(var_order[:node_index]))
bm.add_edges_from([(parent, node) for parent in node_parents])
# TODO : Convert factor into CPDs
return bm
def get_partition_function(self):
"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import Factor
>>> G = MarkovModel()
>>> G.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> G.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [Factor(edge, [2, 2], np.random.rand(4)) for edge in G.edges()]
>>> G.add_factors(*phi)
>>> G.get_partition_function()
"""
self.check_model()
factor = self.factors[0]
factor = factor_product(factor, *[self.factors[i] for i in
range(1, len(self.factors))])
if set(factor.scope()) != set(self.nodes()):
raise ValueError('Factor for all the random variables not defined.')
return np.sum(factor.values)
def copy(self):
"""
Returns a copy of this Markov Model.
Returns
-------
MarkovModel: Copy of this Markov model.
Examples
-------
>>> from pgmpy.factors import Factor
>>> from pgmpy.models import MarkovModel
>>> G = MarkovModel()
>>> G.add_nodes_from([('a', 'b'), ('b', 'c')])
>>> G.add_edge(('a', 'b'), ('b', 'c'))
>>> G_copy = G.copy()
>>> G_copy.edges()
[(('a', 'b'), ('b', 'c'))]
>>> G_copy.nodes()
[('a', 'b'), ('b', 'c')]
>>> factor = Factor([('a', 'b')], cardinality=[3],
... values=np.random.rand(3))
>>> G.add_factors(factor)
>>> G.get_factors()
[<Factor representing phi(('a', 'b'):3) at 0x...>]
>>> G_copy.get_factors()
[]
"""
clone_graph = MarkovModel(self.edges())
clone_graph.add_nodes_from(self.nodes())
if self.factors:
factors_copy = [factor.copy() for factor in self.factors]
clone_graph.add_factors(*factors_copy)
return clone_graph
|
|
#!/usr/bin/env python
"""Manage data servers."""
import atexit
import os
import readline
import time
import urlparse
import urllib3
from urllib3 import connectionpool
# pylint: disable=unused-import,g-bad-import-order
from grr.client import client_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import startup
from grr.lib import utils
from grr.server.data_server import constants
from grr.server.data_server import errors
from grr.server.data_server import utils as sutils
class Manager(object):
"""Manage a data server group using a connection to the master."""
def __init__(self):
servers = config_lib.CONFIG["Dataserver.server_list"]
if not servers:
raise errors.DataServerError("List of data servers not available.")
master_location = servers[0]
loc = urlparse.urlparse(master_location, scheme="http")
self.addr = loc.hostname
self.port = int(loc.port)
self.pool = connectionpool.HTTPConnectionPool(self.addr, port=self.port)
self.history_path = os.path.expanduser("~/.grr-data-store-manager")
if os.path.exists(self.history_path):
readline.read_history_file(self.history_path)
self.periodic_thread = None
self.mapping = None
self.mapping_time = 0
def Start(self):
self._PeriodicThread()
self.periodic_thread = utils.InterruptableThread(
target=self._PeriodicThread, sleep_time=10)
self.periodic_thread.start()
return True
def _PeriodicThread(self):
body = ""
headers = {"Content-Length": len(body)}
try:
res = self.pool.urlopen("POST", "/manage", headers=headers, body=body)
if res.status != constants.RESPONSE_OK:
return False
self.mapping = rdfvalue.DataServerMapping(res.data)
self.mapping_time = time.time()
except urllib3.exceptions.MaxRetryError:
pass
def SaveHistory(self):
readline.write_history_file(self.history_path)
def _ShowServers(self):
if not self.mapping:
print "Server information not available"
return
last = time.asctime(time.localtime(self.mapping_time))
print "Last refresh:", last
for i, serv in enumerate(list(self.mapping.servers)):
addr = serv.address
port = serv.port
size = serv.state.size
load = serv.state.load
ncomp = serv.state.num_components
avgcomp = serv.state.avg_component
print "Server %d %s:%d (Size: %dKB, Load: %d)" % (i, addr, port,
size / 1024, load)
print "\t\t%d components %dKB average size" % (ncomp, avgcomp / 1024)
def _ShowRanges(self):
if not self.mapping:
print "Server information not available"
return
self._ShowRange(self.mapping)
def _ShowRange(self, mapping):
for i, serv in enumerate(list(mapping.servers)):
addr = serv.address
port = serv.port
start = serv.interval.start
end = serv.interval.end
perc = float(end - start) / float(2 ** 64)
perc *= 100
print "Server %d %s:%d %d%% [%s, %s[" % (i, addr, port, perc,
str(start).zfill(20),
str(end).zfill(20))
def _ComputeMappingSize(self, mapping):
totalsize = 0
servers = list(mapping.servers)
for serv in servers:
totalsize += serv.state.size
return totalsize
def _ComputeMappingFromPercentages(self, mapping, newperc):
"""Builds a new mapping based on the new server range percentages."""
newstart = 0
n_servers = self.mapping.num_servers
servers = list(mapping.servers)
new_mapping = rdfvalue.DataServerMapping(version=self.mapping.version + 1,
num_servers=n_servers,
pathing=self.mapping.pathing)
for i, perc in enumerate(newperc):
quant = int(perc * constants.MAX_RANGE)
interval = rdfvalue.DataServerInterval(start=newstart)
end = newstart + quant
if i == len(newperc) - 1:
end = constants.MAX_RANGE
interval.end = end
old_server = servers[i]
newstart = end
new_mapping.servers.Append(index=old_server.index,
address=old_server.address,
port=old_server.port,
state=old_server.state,
interval=interval)
return new_mapping
def _Rebalance(self):
"""Starts the rebalance process."""
if not self.mapping:
print "Server information not available"
return
# Compute total size of database.
servers = list(self.mapping.servers)
num_servers = len(servers)
target = 1.0 / float(num_servers)
perc = [target] * num_servers
new_mapping = self._ComputeMappingFromPercentages(self.mapping, perc)
print "The new ranges will be:"
self._ShowRange(new_mapping)
print
self._DoRebalance(new_mapping)
def _DoRebalance(self, new_mapping):
"""Performs a new rebalancing operation with the master server."""
print "Contacting master server to start re-sharding...",
# Send mapping information to master.
pool = None
try:
pool = connectionpool.HTTPConnectionPool(self.addr, port=self.port)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
body = new_mapping.SerializeToString()
headers = {"Content-Length": len(body)}
res = None
try:
res = pool.urlopen("POST", "/rebalance/phase1", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print "Unable to talk with master..."
pool.close()
return
if res.status != constants.RESPONSE_OK:
print "Re-sharding cannot be done!"
return
rebalance = rdfvalue.DataServerRebalance(res.data)
print "OK"
print
print "The following servers will need to move data:"
for i, move in enumerate(list(rebalance.moving)):
print "Server %d moves %dKB" % (i, move / 1024)
answer = raw_input("Proceed with re-sharding? (y/n) ")
if answer != "y":
return
body = rebalance.SerializeToString()
headers = {"Content-Length": len(body)}
try:
res = pool.urlopen("POST", "/rebalance/phase2", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact server for re-sharding."
print "Make sure the data servers are up and try again."
return
if res.status != constants.RESPONSE_OK:
print "Could not start copying files for re-sharding"
print "Make sure the data servers are up and try again."
return
try:
res = pool.urlopen("POST", "/rebalance/commit", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print ("Could not commit the re-sharding transaction with id "
"%s") % rebalance.id
print "Make sure the data servers are up and then run:"
print "'recover %s' in order to re-run transaction" % rebalance.id
return
if res.status != constants.RESPONSE_OK:
print "Could not commit the transaction %s" % rebalance.id
print "Make sure the data servers are up and then run:"
print "'recover %s' in order to re-run transaction" % rebalance.id
return
self.mapping = rdfvalue.DataServerMapping(res.data)
print "Rebalance with id %s fully performed." % rebalance.id
def _Recover(self, transid):
"""Completes a rebalancing transaction that was unsuccessful."""
print "Contacting master about transaction %s..." % transid,
pool = None
try:
pool = connectionpool.HTTPConnectionPool(self.addr, port=self.port)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
print "OK."
try:
body = transid
headers = {"Content-Length": len(body)}
res = pool.urlopen("POST", "/rebalance/recover", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
if res.status == constants.RESPONSE_TRANSACTION_NOT_FOUND:
print "Transaction %s was not found" % transid
return
if res.status != constants.RESPONSE_OK:
print "Potential data master error. Giving up..."
return
rebalance = rdfvalue.DataServerRebalance(res.data)
print "Got transaction object %s" % rebalance.id
answer = raw_input("Proceed with the recover process? (y/n) ")
if answer != "y":
return
body = rebalance.SerializeToString()
headers = {"Content-Length": len(body)}
try:
res = pool.urlopen("POST", "/rebalance/commit", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print "Could not commit re-sharding transaction with id %s" % rebalance.id
print "Make sure the data servers are up and then run:"
print "'recover %s' in order to re-run transaction" % rebalance.id
return
if res.status != constants.RESPONSE_OK:
print "Could not commit transaction %s" % rebalance.id
print "Make sure the data servers are up and then run:"
print "'recover %s' in order to re-run transaction" % rebalance.id
return
self.mapping = rdfvalue.DataServerMapping(res.data)
print "Rebalance with id %s fully performed." % rebalance.id
def _PackNewServer(self, addr, port):
body = sutils.SIZE_PACKER.pack(len(addr))
body += addr
body += sutils.PORT_PACKER.pack(port)
return body
def _AddServer(self, addr, port):
"""Starts the process of adding a new server."""
if port <= 0:
print "Wrong port: %d" % port
return
pool = None
try:
pool = connectionpool.HTTPConnectionPool(self.addr, port=self.port)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
body = self._PackNewServer(addr, port)
headers = {"Content-Length": len(body)}
try:
res = pool.urlopen("POST", "/servers/add/check", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
if res.status == constants.RESPONSE_EQUAL_DATA_SERVER:
print "Master server says there is already a similar server."
print "Giving up..."
return
if res.status == constants.RESPONSE_DATA_SERVERS_UNREACHABLE:
print "Master server says that some data servers are not running."
print "Giving up..."
return
if res.status != constants.RESPONSE_OK:
print "Master server error. Is the server running?"
return
print "Master server allows us to add server %s:%d" % (addr, port)
answer = raw_input("Do you really want to add server //%s:%d? (y/n) " %
(addr, port))
if answer != "y":
return
try:
res = pool.urlopen("POST", "/servers/add", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
if res.status == constants.RESPONSE_DATA_SERVERS_UNREACHABLE:
print "Master server says that some data servers are not running."
print "Giving up..."
return
if res.status == constants.RESPONSE_INCOMPLETE_SYNC:
print ("The master server has set up the new server, but the other "
"servers may not know about it.")
print "Please run 'sync' to fix the problem."
print "Afterwards, you have to rebalance server data with the following:"
self._CompleteAddServerHelp(addr, port)
return
if res.status != constants.RESPONSE_OK:
print "Failed to contact master server."
return
print "============================================="
print "Operation completed."
print "To rebalance server data you have to do the following:"
self._CompleteAddServerHelp(addr, port)
# Update mapping.
self.mapping = rdfvalue.DataServerMapping(res.data)
def _CompleteAddServerHelp(self, addr, port):
print ("\t1. Add '//%s:%d' to Dataserver.server_list in your configuration "
"file.") % (addr, port)
print "\t2. Start the new server at %s:%d" % (addr, port)
print "\t3. Run 'rebalance'"
def _Sync(self):
"""Forces the master to sync with the other data servers."""
pool = None
try:
pool = connectionpool.HTTPConnectionPool(self.addr, port=self.port)
body = ""
headers = {"Content-Length": len(body)}
res = pool.urlopen("POST", "/servers/sync-all", headers=headers,
body=body)
if res.status == constants.RESPONSE_INCOMPLETE_SYNC:
print "Master has tried to contact all the data servers, but failed."
return False
if res.status == constants.RESPONSE_DATA_SERVERS_UNREACHABLE:
print "Master server says that some data servers are not running."
print "Giving up..."
return False
if res.status != constants.RESPONSE_OK:
print "Unable to sync servers."
return False
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return False
print "Sync done."
# Update mapping.
self.mapping = rdfvalue.DataServerMapping(res.data)
return True
def _FindServer(self, addr, port):
for i, serv in enumerate(self.mapping.servers):
if serv.address == addr and serv.port == port:
return serv, i
return None, None
def _DropServer(self, addr, port):
"""Remove data stored in a server."""
# Find server.
server, index = self._FindServer(addr, port)
if not server:
print "Server not found."
return
servers = list(self.mapping.servers)
num_servers = len(servers)
# Simply set everyone else with 1/(N-1).
target = 1.0 / float(num_servers - 1)
newperc = [target] * num_servers
# Our server gets 0.
newperc[index] = 0
# Create new mapping structure.
new_mapping = self._ComputeMappingFromPercentages(self.mapping, newperc)
print "The new ranges will be:"
self._ShowRange(new_mapping)
print
# Now, we do a rebalancing.
self._DoRebalance(new_mapping)
def _RemServer(self, addr, port):
"""Remove server from group."""
# Find server.
server, _ = self._FindServer(addr, port)
if not server:
print "Server not found."
return
if server.interval.start != server.interval.end:
print "Server has some data in it!"
print "Giving up..."
return
pool = None
try:
pool = connectionpool.HTTPConnectionPool(self.addr, port=self.port)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
body = self._PackNewServer(addr, port)
headers = {"Content-Length": len(body)}
try:
res = pool.urlopen("POST", "/servers/rem/check", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
if res.status == constants.RESPONSE_DATA_SERVER_NOT_FOUND:
print "Master server says the data server does not exist."
return
if res.status == constants.RESPONSE_RANGE_NOT_EMPTY:
print "Master server says the data server has still some data."
print "Giving up..."
return
if res.status == constants.RESPONSE_DATA_SERVERS_UNREACHABLE:
print "Master server says some data servers are not running."
print "Giving up..."
return
if res.status != constants.RESPONSE_OK:
print "Master server error. Is the server running?"
return
print "Master server allows us to remove server %s:%d" % (addr, port)
answer = raw_input("Do you really want to remove server //%s:%d? (y/n) " %
(addr, port))
if answer != "y":
return
try:
res = pool.urlopen("POST", "/servers/rem", headers=headers,
body=body)
except urllib3.exceptions.MaxRetryError:
print "Unable to contact master..."
return
if res.status == constants.RESPONSE_DATA_SERVERS_UNREACHABLE:
print "Master server says that some data servers are not running."
print "Giving up..."
return
if res.status == constants.RESPONSE_OK:
# Update mapping.
self.mapping = rdfvalue.DataServerMapping(res.data)
self._CompleteRemServerHelpComplete(addr, port)
return
if res.status == constants.RESPONSE_INCOMPLETE_SYNC:
# We were unable to sync, so we try again:
if self._Sync():
self._CompleteRemServerHelpComplete(addr, port)
return
else:
# If we cannot sync in the second attempt, we give up.
print ("The master server has removed the new server, but the other "
"servers may not know about it.")
print "Please run 'sync' to fix the problem, followed by:"
self._CompleteRemServerHelp(addr, port)
return
if res.status != constants.RESPONSE_OK:
print "Master has returned an unknown error..."
return
def _CompleteRemServerHelpComplete(self, addr, port):
print "Server //%s:%d has been successfully removed!" % (addr, port)
print "Now you have to do the following:"
self._CompleteRemServerHelp(addr, port)
def _CompleteRemServerHelp(self, addr, port):
print "\t1. Stop the server running on //%s:%d" % (addr, port)
print "\t2. Remove '//%s:%d' from the configuration file." % (addr, port)
print "\t3. Remove the data store directory"
def _Help(self):
"""Help message."""
print "stop\t\t\t\tStop manager."
print "servers\t\t\t\tDisplay server information."
print "ranges\t\t\t\tDisplay server range information."
print "rebalance\t\t\tRebalance server load."
print "recover <transaction id>\tComplete a pending transaction."
print "addserver <address> <port>\tAdd new server to the group."
print ("dropserver <address> <port>\tMove all the data from the server "
"to others.")
print "remserver <address> <port>\tRemove server from server group."
print "sync\t\t\t\tSync server information between data servers."
def _HandleCommand(self, cmd, args):
"""Execute an user command."""
if cmd == "stop" or cmd == "exit":
return False
elif cmd == "servers":
self._ShowServers()
elif cmd == "help":
self._Help()
elif cmd == "ranges":
self._ShowRanges()
elif cmd == "rebalance":
self._Rebalance()
elif cmd == "recover":
if len(args) != 1:
print "Syntax: recover <transaction-id>"
self._Recover(args[0])
elif cmd == "addserver":
if len(args) != 2:
print "Syntax: addserver <address> <port>"
try:
self._AddServer(args[0], int(args[1]))
except ValueError:
print "Invalid port number: %s" % args[1]
elif cmd == "dropserver":
if len(args) != 2:
print "Syntax: dropserver <address> <port>"
try:
self._DropServer(args[0], int(args[1]))
except ValueError:
print "Invalid port number: %s" % args[1]
elif cmd == "remserver":
if len(args) != 2:
print "Syntax: remserver <address> <port>"
try:
self._RemServer(args[0], int(args[1]))
except ValueError:
print "Invalid port number: %s" % args[1]
elif cmd == "sync":
self._Sync()
else:
print "No such command:", cmd
return True
def _NumServers(self):
if self.mapping:
return str(self.mapping.num_servers)
else:
return "-"
def Run(self):
while True:
line = raw_input("Manager(%s servers)> " % self._NumServers())
if not line:
continue
vec = line.split(" ")
if not vec:
continue
cmd = vec[0]
args = vec[1:]
try:
if not self._HandleCommand(cmd, args):
break
except Exception as e: # pylint: disable=broad-except
print "Exception:", str(e)
def main(unused_argv):
"""Main."""
config_lib.CONFIG.AddContext("DataServer Context")
startup.ClientInit()
manager = Manager()
if not manager.Start():
print "Failed to start manager"
return
atexit.register(manager.SaveHistory)
try:
manager.Run()
except (EOFError, KeyboardInterrupt):
print
if __name__ == "__main__":
flags.StartMain(main)
|
|
import numpy as np
import scipy as sp
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(ddof=1, axis=0),
np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0]
expected_result = sorted(expected_result, reverse=True)[:2]
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, ddof=1, axis=0))
assert_array_almost_equal(pca.explained_variance_, expected_result)
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, ddof=1, axis=0))
assert_array_almost_equal(apca.explained_variance_, expected_result)
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, ddof=1, axis=0),
decimal=1)
assert_array_almost_equal(rpca.explained_variance_,
expected_result, decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_singular_values():
# Check that the PCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full',
random_state=rng).fit(X)
apca = PCA(n_components=2, svd_solver='arpack',
random_state=rng).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.singular_values_, apca.singular_values_, 12)
assert_array_almost_equal(pca.singular_values_, rpca.singular_values_, 1)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 1)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 9)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 0)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
apca = PCA(n_components=3, svd_solver='arpack', random_state=rng)
rpca = PCA(n_components=3, svd_solver='randomized', random_state=rng)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
apca.fit(X_hat)
rpca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
# Ensures that solver-specific extreme inputs for the n_components
# parameter raise errors
X = np.array([[0, 1, 0], [1, 0, 0]])
smallest_d = 2 # The smallest dimension
lower_limit = {'randomized': 1, 'arpack': 1, 'full': 0, 'auto': 0}
for solver in solver_list:
# We conduct the same test on X.T so that it is invariant to axis.
for data in [X, X.T]:
for n_components in [-1, 3]:
if solver == 'auto':
solver_reported = 'full'
else:
solver_reported = solver
assert_raises_regex(ValueError,
"n_components={}L? must be between "
"{}L? and min\(n_samples, n_features\)="
"{}L? with svd_solver=\'{}\'"
.format(n_components,
lower_limit[solver],
smallest_d,
solver_reported),
PCA(n_components,
svd_solver=solver).fit, data)
if solver == 'arpack':
n_components = smallest_d
assert_raises_regex(ValueError,
"n_components={}L? must be "
"strictly less than "
"min\(n_samples, n_features\)={}L?"
" with svd_solver=\'arpack\'"
.format(n_components, smallest_d),
PCA(n_components, svd_solver=solver)
.fit, data)
def test_n_components_none():
# Ensures that n_components == None is handled correctly
X = iris.data
# We conduct the same test on X.T so that it is invariant to axis.
for data in [X, X.T]:
for solver in solver_list:
pca = PCA(svd_solver=solver)
pca.fit(data)
if solver == 'arpack':
assert_equal(pca.n_components_, min(data.shape) - 1)
else:
assert_equal(pca.n_components_, min(data.shape))
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_pca_score_with_different_solvers():
digits = datasets.load_digits()
X_digits = digits.data
pca_dict = {svd_solver: PCA(n_components=30, svd_solver=svd_solver,
random_state=0)
for svd_solver in solver_list}
for pca in pca_dict.values():
pca.fit(X_digits)
# Sanity check for the noise_variance_. For more details see
# https://github.com/scikit-learn/scikit-learn/issues/7568
# https://github.com/scikit-learn/scikit-learn/issues/8541
# https://github.com/scikit-learn/scikit-learn/issues/8544
assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0)
# Compare scores with different svd_solvers
score_dict = {svd_solver: pca.score(X_digits)
for svd_solver, pca in pca_dict.items()}
assert_almost_equal(score_dict['full'], score_dict['arpack'])
assert_almost_equal(score_dict['full'], score_dict['randomized'],
decimal=3)
def test_pca_zero_noise_variance_edge_cases():
# ensure that noise_variance_ is 0 in edge cases
# when n_components == min(n_samples, n_features)
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
# arpack raises ValueError for n_components == min(n_samples,
# n_features)
svd_solvers = ['full', 'randomized']
for svd_solver in svd_solvers:
pca = PCA(svd_solver=svd_solver, n_components=p)
pca.fit(X)
assert pca.noise_variance_ == 0
pca.fit(X.T)
assert pca.noise_variance_ == 0
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA was "
"deprecated in 0.18 and will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten ``components_``. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
def test_pca_sparse_input():
X = np.random.RandomState(0).rand(5, 4)
X = sp.sparse.csr_matrix(X)
assert(sp.sparse.issparse(X))
for svd_solver in solver_list:
pca = PCA(n_components=3, svd_solver=svd_solver)
assert_raises(TypeError, pca.fit, X)
def test_pca_bad_solver():
X = np.random.RandomState(0).rand(5, 4)
pca = PCA(n_components=3, svd_solver='bad_argument')
assert_raises(ValueError, pca.fit, X)
def test_pca_dtype_preservation():
for svd_solver in solver_list:
yield check_pca_float_dtype_preservation, svd_solver
yield check_pca_int_dtype_upcast_to_double, svd_solver
def check_pca_float_dtype_preservation(svd_solver):
# Ensure that PCA does not upscale the dtype when input is float32
X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64)
X_32 = X_64.astype(np.float32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float32
assert pca_64.transform(X_64).dtype == np.float64
assert pca_32.transform(X_32).dtype == np.float32
assert_array_almost_equal(pca_64.components_, pca_32.components_,
decimal=5)
def check_pca_int_dtype_upcast_to_double(svd_solver):
# Ensure that all int types will be upcast to float64
X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
X_i64 = X_i64.astype(np.int64)
X_i32 = X_i64.astype(np.int32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_i64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_i32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float64
assert pca_64.transform(X_i64).dtype == np.float64
assert pca_32.transform(X_i32).dtype == np.float64
assert_array_almost_equal(pca_64.components_, pca_32.components_,
decimal=5)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Server for Swift """
import six.moves.cPickle as pickle
import json
import os
import multiprocessing
import time
import traceback
import socket
import math
from swift import gettext_ as _
from hashlib import md5
from eventlet import sleep, wsgi, Timeout
from eventlet.greenthread import spawn
from swift.common.utils import public, get_logger, \
config_true_value, timing_stats, replication, \
normalize_delete_at_timestamp, get_log_line, Timestamp, \
get_expirer_container, parse_mime_headers, \
iter_multipart_mime_documents, extract_swift_bytes, safe_json_loads
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_object_creation, \
valid_timestamp, check_utf8
from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \
ChunkReadError, DiskFileXattrNotSupported
from swift.obj import ssync_receiver
from swift.common.http import is_success
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.request_helpers import get_name_and_placement, \
is_user_meta, is_sys_or_user_meta, is_object_transient_sysmeta, \
resolve_etag_is_at_header
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
HTTPServerError
from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileRouter
def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size):
mime_documents_iter = iter_multipart_mime_documents(
wsgi_input, mime_boundary, read_chunk_size)
for file_like in mime_documents_iter:
hdrs = parse_mime_headers(file_like)
yield (hdrs, file_like)
def drain(file_like, read_size, timeout):
"""
Read and discard any bytes from file_like.
:param file_like: file-like object to read from
:param read_size: how big a chunk to read at a time
:param timeout: how long to wait for a read (use None for no timeout)
:raises ChunkReadTimeout: if no chunk was read in time
"""
while True:
with ChunkReadTimeout(timeout):
chunk = file_like.read(read_size)
if not chunk:
break
def _make_backend_fragments_header(fragments):
if fragments:
result = {}
for ts, frag_list in fragments.items():
result[ts.internal] = frag_list
return json.dumps(result)
return None
class EventletPlungerString(str):
"""
Eventlet won't send headers until it's accumulated at least
eventlet.wsgi.MINIMUM_CHUNK_SIZE bytes or the app iter is exhausted. If we
want to send the response body behind Eventlet's back, perhaps with some
zero-copy wizardry, then we have to unclog the plumbing in eventlet.wsgi
to force the headers out, so we use an EventletPlungerString to empty out
all of Eventlet's buffers.
"""
def __len__(self):
return wsgi.MINIMUM_CHUNK_SIZE + 1
class ObjectController(BaseStorageServer):
"""Implements the WSGI application for the Swift Object Server."""
server_type = 'object-server'
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = float(conf.get('node_timeout', 3))
self.container_update_timeout = float(
conf.get('container_update_timeout', 1))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup specific to an object server implementation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_router = DiskFileRouter(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._diskfile_router[policy].get_diskfile(
device, partition, account, container, obj, policy, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy,
logger_thread_locals=None):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
:param policy: the associated BaseStoragePolicy instance
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
"""
if logger_thread_locals:
self.logger.thread_locals = logger_thread_locals
headers_out['user-agent'] = 'object-server %s' % os.getpid()
full_path = '/%s/%s/%s' % (account, container, obj)
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
else:
self.logger.error(_(
'ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
timestamp = headers_out.get('x-meta-timestamp',
headers_out.get('x-timestamp'))
self._diskfile_router[policy].pickle_async_update(
objdevice, account, container, obj, data, timestamp, policy)
def container_update(self, op, account, container, obj, request,
headers_out, objdevice, policy):
"""
Update the container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request object driving the update
:param headers_out: dictionary of headers to send in the container
request(s)
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance
"""
headers_in = request.headers
conthosts = [h.strip() for h in
headers_in.get('X-Container-Host', '').split(',')]
contdevices = [d.strip() for d in
headers_in.get('X-Container-Device', '').split(',')]
contpartition = headers_in.get('X-Container-Partition', '')
if len(conthosts) != len(contdevices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(_(
'ERROR Container update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"') % {
'hosts': headers_in.get('X-Container-Host', ''),
'devices': headers_in.get('X-Container-Device', '')})
return
if contpartition:
updates = zip(conthosts, contdevices)
else:
updates = []
headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-')
headers_out['referer'] = request.as_referer()
headers_out['X-Backend-Storage-Policy-Index'] = int(policy)
update_greenthreads = []
for conthost, contdevice in updates:
gt = spawn(self.async_update, op, account, container, obj,
conthost, contpartition, contdevice, headers_out,
objdevice, policy,
logger_thread_locals=self.logger.thread_locals)
update_greenthreads.append(gt)
# Wait a little bit to see if the container updates are successful.
# If we immediately return after firing off the greenthread above, then
# we're more likely to confuse the end-user who does a listing right
# after getting a successful response to the object create. The
# `container_update_timeout` bounds the length of time we wait so that
# one slow container server doesn't make the entire request lag.
try:
with Timeout(self.container_update_timeout):
for gt in update_greenthreads:
gt.wait()
except Timeout:
# updates didn't go through, log it and return
self.logger.debug(
'Container update timeout (%.4fs) waiting for %s',
self.container_update_timeout, updates)
def delete_at_update(self, op, delete_at, account, container, obj,
request, objdevice, policy):
"""
Update the expiring objects container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param delete_at: scheduled delete in UNIX seconds, int
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request driving the update
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance (used for tmp dir)
"""
if config_true_value(
request.headers.get('x-backend-replication', 'f')):
return
delete_at = normalize_delete_at_timestamp(delete_at)
updates = [(None, None)]
partition = None
hosts = contdevices = [None]
headers_in = request.headers
headers_out = HeaderKeyDict({
# system accounts are always Policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': request.timestamp.internal,
'x-trans-id': headers_in.get('x-trans-id', '-'),
'referer': request.as_referer()})
if op != 'DELETE':
delete_at_container = headers_in.get('X-Delete-At-Container', None)
if not delete_at_container:
self.logger.warning(
'X-Delete-At-Container header must be specified for '
'expiring objects background %s to work properly. Making '
'best guess as to the container name for now.' % op)
# TODO(gholt): In a future release, change the above warning to
# a raised exception and remove the guess code below.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
partition = headers_in.get('X-Delete-At-Partition', None)
hosts = headers_in.get('X-Delete-At-Host', '')
contdevices = headers_in.get('X-Delete-At-Device', '')
updates = [upd for upd in
zip((h.strip() for h in hosts.split(',')),
(c.strip() for c in contdevices.split(',')))
if all(upd) and partition]
if not updates:
updates = [(None, None)]
headers_out['x-size'] = '0'
headers_out['x-content-type'] = 'text/plain'
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
else:
# DELETEs of old expiration data have no way of knowing what the
# old X-Delete-At-Container was at the time of the initial setting
# of the data, so a best guess is made here.
# Worst case is a DELETE is issued now for something that doesn't
# exist there and the original data is left where it is, where
# it will be ignored when the expirer eventually tries to issue the
# object DELETE later since the X-Delete-At value won't match up.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
delete_at_container = normalize_delete_at_timestamp(
delete_at_container)
for host, contdevice in updates:
self.async_update(
op, self.expiring_objects_account, delete_at_container,
'%s-%s/%s/%s' % (delete_at, account, container, obj),
host, partition, contdevice, headers_out, objdevice,
policy)
def _make_timeout_reader(self, file_like):
def timeout_reader():
with ChunkReadTimeout(self.client_timeout):
return file_like.read(self.network_chunk_size)
return timeout_reader
def _read_put_commit_message(self, mime_documents_iter):
rcvd_commit = False
try:
with ChunkReadTimeout(self.client_timeout):
commit_hdrs, commit_iter = next(mime_documents_iter)
if commit_hdrs.get('X-Document', None) == "put commit":
rcvd_commit = True
drain(commit_iter, self.network_chunk_size, self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find PUT commit MIME doc")
return rcvd_commit
def _read_metadata_footer(self, mime_documents_iter):
try:
with ChunkReadTimeout(self.client_timeout):
footer_hdrs, footer_iter = next(mime_documents_iter)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find footer MIME doc")
timeout_reader = self._make_timeout_reader(footer_iter)
try:
footer_body = ''.join(iter(timeout_reader, ''))
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
footer_md5 = footer_hdrs.get('Content-MD5')
if not footer_md5:
raise HTTPBadRequest(body="no Content-MD5 in footer")
if footer_md5 != md5(footer_body).hexdigest():
raise HTTPUnprocessableEntity(body="footer MD5 mismatch")
try:
return HeaderKeyDict(json.loads(footer_body))
except ValueError:
raise HTTPBadRequest("invalid JSON for footer doc")
def _check_container_override(self, update_headers, metadata,
footers=None):
"""
Applies any overrides to the container update headers.
Overrides may be in the x-object-sysmeta-container-update- namespace or
the x-backend-container-update-override- namespace. The former is
preferred and is used by proxy middlewares. The latter is historical
but is still used with EC policy PUT requests; for backwards
compatibility the header names used with EC policy requests have not
been changed to the sysmeta namespace - that way the EC PUT path of a
newer proxy will remain compatible with an object server that pre-dates
the introduction of the x-object-sysmeta-container-update- namespace
and vice-versa.
:param update_headers: a dict of headers used in the container update
:param metadata: a dict that may container override items
:param footers: another dict that may container override items, at a
higher priority than metadata
"""
footers = footers or {}
# the order of this list is significant:
# x-object-sysmeta-container-update-override-* headers take precedence
# over x-backend-container-update-override-* headers
override_prefixes = ['x-backend-container-update-override-',
'x-object-sysmeta-container-update-override-']
for override_prefix in override_prefixes:
for key, val in metadata.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
# apply x-backend-container-update-override* from footers *before*
# x-object-sysmeta-container-update-override-* from headers
for key, val in footers.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
def _preserve_slo_manifest(self, update_metadata, orig_metadata):
if 'X-Static-Large-Object' in orig_metadata:
update_metadata['X-Static-Large-Object'] = \
orig_metadata['X-Static-Large-Object']
@public
@timing_stats()
def POST(self, request):
"""Handle HTTP POST requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined):
return HTTPNotFound(request=request)
orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
orig_ctype_timestamp = disk_file.content_type_timestamp
req_ctype_time = '0'
req_ctype = request.headers.get('Content-Type')
if req_ctype:
req_ctype_time = request.headers.get('Content-Type-Timestamp',
req_timestamp.internal)
req_ctype_timestamp = Timestamp(req_ctype_time)
if orig_timestamp >= req_timestamp \
and orig_ctype_timestamp >= req_ctype_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
if req_timestamp > orig_timestamp:
metadata = {'X-Timestamp': req_timestamp.internal}
self._preserve_slo_manifest(metadata, orig_metadata)
metadata.update(val for val in request.headers.items()
if (is_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
else:
# preserve existing metadata, only content-type may be updated
metadata = dict(disk_file.get_metafile_metadata())
if req_ctype_timestamp > orig_ctype_timestamp:
# we have a new content-type, add to metadata and container update
content_type_headers = {
'Content-Type': request.headers['Content-Type'],
'Content-Type-Timestamp': req_ctype_timestamp.internal
}
metadata.update(content_type_headers)
else:
# send existing content-type with container update
content_type_headers = {
'Content-Type': disk_file.content_type,
'Content-Type-Timestamp': orig_ctype_timestamp.internal
}
if orig_ctype_timestamp != disk_file.data_timestamp:
# only add to metadata if it's not the datafile content-type
metadata.update(content_type_headers)
try:
disk_file.write_metadata(metadata)
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if (content_type_headers['Content-Type-Timestamp']
!= disk_file.data_timestamp):
# Current content-type is not from the datafile, but the datafile
# content-type may have a swift_bytes param that was appended by
# SLO and we must continue to send that with the container update.
# Do this (rather than use a separate header) for backwards
# compatibility because there may be 'legacy' container updates in
# async pending that have content-types with swift_bytes params, so
# we have to be able to handle those in container server anyway.
_, swift_bytes = extract_swift_bytes(
disk_file.get_datafile_metadata()['Content-Type'])
if swift_bytes:
content_type_headers['Content-Type'] += (';swift_bytes=%s'
% swift_bytes)
update_headers = HeaderKeyDict({
'x-size': orig_metadata['Content-Length'],
'x-content-type': content_type_headers['Content-Type'],
'x-timestamp': disk_file.data_timestamp.internal,
'x-content-type-timestamp':
content_type_headers['Content-Type-Timestamp'],
'x-meta-timestamp': metadata['X-Timestamp'],
'x-etag': orig_metadata['ETag']})
# Special cases for backwards compatibility.
# For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
# X-Backend-Container-Update-Override-Etag value sent with the original
# PUT. Similarly send X-Object-Sysmeta-Ec-Content-Length which is the
# same as the X-Backend-Container-Update-Override-Size value. We have
# to send Etag and size with a POST container update because the
# original PUT container update may have failed or be in async_pending.
if 'X-Object-Sysmeta-Ec-Etag' in orig_metadata:
update_headers['X-Etag'] = orig_metadata[
'X-Object-Sysmeta-Ec-Etag']
if 'X-Object-Sysmeta-Ec-Content-Length' in orig_metadata:
update_headers['X-Size'] = orig_metadata[
'X-Object-Sysmeta-Ec-Content-Length']
self._check_container_override(update_headers, orig_metadata)
# object POST updates are PUT to the container server
self.container_update(
'PUT', account, container, obj, request, update_headers,
device, policy)
return HTTPAccepted(request=request)
@public
@timing_stats()
def PUT(self, request):
"""Handle HTTP PUT requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
error_response = check_object_creation(request, obj)
if error_response:
return error_response
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
try:
fsize = request.message_length()
except ValueError as e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# In case of multipart-MIME put, the proxy sends a chunked request,
# but may let us know the real content length so we can verify that
# we have enough disk space to hold the object.
if fsize is None:
fsize = request.headers.get('X-Backend-Obj-Content-Length')
if fsize is not None:
try:
fsize = int(fsize)
except ValueError as e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# SSYNC will include Frag-Index header for subrequests to primary
# nodes; handoff nodes should 409 subrequests to over-write an
# existing data fragment until they offloaded the existing fragment
frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_index=frag_index)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
orig_timestamp = disk_file.data_timestamp
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileDeleted as e:
orig_metadata = e.metadata
orig_timestamp = e.timestamp
except (DiskFileNotExist, DiskFileQuarantined):
orig_metadata = {}
orig_timestamp = Timestamp(0)
# Checks for If-None-Match
if request.if_none_match is not None and orig_metadata:
if '*' in request.if_none_match:
# File exists already so return 412
return HTTPPreconditionFailed(request=request)
if orig_metadata.get('ETag') in request.if_none_match:
# The current ETag matches, so return 412
return HTTPPreconditionFailed(request=request)
if orig_timestamp >= req_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
upload_expiration = time.time() + self.max_upload_time
etag = md5()
elapsed_time = 0
try:
with disk_file.create(size=fsize) as writer:
upload_size = 0
# If the proxy wants to send us object metadata after the
# object body, it sets some headers. We have to tell the
# proxy, in the 100 Continue response, that we're able to
# parse a multipart MIME document and extract the object and
# metadata from it. If we don't, then the proxy won't
# actually send the footer metadata.
have_metadata_footer = False
use_multiphase_commit = False
mime_documents_iter = iter([])
obj_input = request.environ['wsgi.input']
hundred_continue_headers = []
if config_true_value(
request.headers.get(
'X-Backend-Obj-Multiphase-Commit')):
use_multiphase_commit = True
hundred_continue_headers.append(
('X-Obj-Multiphase-Commit', 'yes'))
if config_true_value(
request.headers.get('X-Backend-Obj-Metadata-Footer')):
have_metadata_footer = True
hundred_continue_headers.append(
('X-Obj-Metadata-Footer', 'yes'))
if have_metadata_footer or use_multiphase_commit:
obj_input.set_hundred_continue_response_headers(
hundred_continue_headers)
mime_boundary = request.headers.get(
'X-Backend-Obj-Multipart-Mime-Boundary')
if not mime_boundary:
return HTTPBadRequest("no MIME boundary")
try:
with ChunkReadTimeout(self.client_timeout):
mime_documents_iter = iter_mime_headers_and_bodies(
request.environ['wsgi.input'],
mime_boundary, self.network_chunk_size)
_junk_hdrs, obj_input = next(mime_documents_iter)
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
timeout_reader = self._make_timeout_reader(obj_input)
try:
for chunk in iter(timeout_reader, ''):
start_time = time.time()
if start_time > upload_expiration:
self.logger.increment('PUT.timeouts')
return HTTPRequestTimeout(request=request)
etag.update(chunk)
upload_size = writer.write(chunk)
elapsed_time += time.time() - start_time
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
if upload_size:
self.logger.transfer_rate(
'PUT.' + device + '.timing', elapsed_time,
upload_size)
if fsize is not None and fsize != upload_size:
return HTTPClientDisconnect(request=request)
footer_meta = {}
if have_metadata_footer:
footer_meta = self._read_metadata_footer(
mime_documents_iter)
request_etag = (footer_meta.get('etag') or
request.headers.get('etag', '')).lower()
etag = etag.hexdigest()
if request_etag and request_etag != etag:
return HTTPUnprocessableEntity(request=request)
metadata = {
'X-Timestamp': request.timestamp.internal,
'Content-Type': request.headers['content-type'],
'ETag': etag,
'Content-Length': str(upload_size),
}
metadata.update(val for val in request.headers.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
metadata.update(val for val in footer_meta.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
writer.put(metadata)
# if the PUT requires a two-phase commit (a data and a commit
# phase) send the proxy server another 100-continue response
# to indicate that we are finished writing object data
if use_multiphase_commit:
request.environ['wsgi.input'].\
send_hundred_continue_response()
if not self._read_put_commit_message(mime_documents_iter):
return HTTPServerError(request=request)
# got 2nd phase confirmation, write a timestamp.durable
# state file to indicate a successful PUT
writer.commit(request.timestamp)
# Drain any remaining MIME docs from the socket. There
# shouldn't be any, but we must read the whole request body.
try:
while True:
with ChunkReadTimeout(self.client_timeout):
_junk_hdrs, _junk_body = next(mime_documents_iter)
drain(_junk_body, self.network_chunk_size,
self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
pass
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update(
'DELETE', orig_delete_at, account, container, obj,
request, device, policy)
update_headers = HeaderKeyDict({
'x-size': metadata['Content-Length'],
'x-content-type': metadata['Content-Type'],
'x-timestamp': metadata['X-Timestamp'],
'x-etag': metadata['ETag']})
# apply any container update header overrides sent with request
self._check_container_override(update_headers, request.headers,
footer_meta)
self.container_update(
'PUT', account, container, obj, request,
update_headers,
device, policy)
return HTTPCreated(request=request, etag=etag)
@public
@timing_stats()
def GET(self, request):
"""Handle HTTP GET requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
with disk_file.open():
metadata = disk_file.get_metadata()
obj_size = int(metadata['Content-Length'])
file_x_ts = Timestamp(metadata['X-Timestamp'])
keep_cache = (self.keep_cache_private or
('X-Auth-Token' not in request.headers and
'X-Storage-Token' not in request.headers))
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(
app_iter=disk_file.reader(keep_cache=keep_cache),
request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
response.last_modified = math.ceil(float(file_x_ts))
response.content_length = obj_size
try:
response.content_encoding = metadata[
'Content-Encoding']
except KeyError:
pass
response.headers['X-Timestamp'] = file_x_ts.normal
response.headers['X-Backend-Timestamp'] = file_x_ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
resp = request.get_response(response)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
resp = HTTPNotFound(request=request, headers=headers,
conditional_response=True)
return resp
@public
@timing_stats(sample_rate=0.8)
def HEAD(self, request):
"""Handle HTTP HEAD requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
return HTTPNotFound(request=request, headers=headers,
conditional_response=True)
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
ts = Timestamp(metadata['X-Timestamp'])
response.last_modified = math.ceil(float(ts))
# Needed for container sync feature
response.headers['X-Timestamp'] = ts.normal
response.headers['X-Backend-Timestamp'] = ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
response.content_length = int(metadata['Content-Length'])
try:
response.content_encoding = metadata['Content-Encoding']
except KeyError:
pass
return response
@public
@timing_stats()
def DELETE(self, request):
"""Handle HTTP DELETE requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileExpired as e:
orig_timestamp = e.timestamp
orig_metadata = e.metadata
response_class = HTTPNotFound
except DiskFileDeleted as e:
orig_timestamp = e.timestamp
orig_metadata = {}
response_class = HTTPNotFound
except (DiskFileNotExist, DiskFileQuarantined):
orig_timestamp = 0
orig_metadata = {}
response_class = HTTPNotFound
else:
orig_timestamp = disk_file.data_timestamp
if orig_timestamp < req_timestamp:
response_class = HTTPNoContent
else:
response_class = HTTPConflict
response_timestamp = max(orig_timestamp, req_timestamp)
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
try:
req_if_delete_at_val = request.headers['x-if-delete-at']
req_if_delete_at = int(req_if_delete_at_val)
except KeyError:
pass
except ValueError:
return HTTPBadRequest(
request=request,
body='Bad X-If-Delete-At header value')
else:
# request includes x-if-delete-at; we must not place a tombstone
# if we can not verify the x-if-delete-at time
if not orig_timestamp:
# no object found at all
return HTTPNotFound()
if orig_delete_at != req_if_delete_at:
return HTTPPreconditionFailed(
request=request,
body='X-If-Delete-At and X-Delete-At do not match')
else:
# differentiate success from no object at all
response_class = HTTPNoContent
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
if orig_timestamp < req_timestamp:
try:
disk_file.delete(req_timestamp)
except DiskFileNoSpace:
return HTTPInsufficientStorage(drive=device, request=request)
self.container_update(
'DELETE', account, container, obj, request,
HeaderKeyDict({'x-timestamp': req_timestamp.internal}),
device, policy)
return response_class(
request=request,
headers={'X-Backend-Timestamp': response_timestamp.internal})
@public
@replication
@timing_stats(sample_rate=0.1)
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
Note that the name REPLICATE is preserved for historical reasons as
this verb really just returns the hashes information for the specified
parameters and is used, for example, by both replication and EC.
"""
device, partition, suffix_parts, policy = \
get_name_and_placement(request, 2, 3, True)
suffixes = suffix_parts.split('-') if suffix_parts else []
try:
hashes = self._diskfile_router[policy].get_hashes(
device, partition, suffixes, policy)
except DiskFileDeviceUnavailable:
resp = HTTPInsufficientStorage(drive=device, request=request)
else:
resp = Response(body=pickle.dumps(hashes))
return resp
@public
@replication
@timing_stats(sample_rate=0.1)
def SSYNC(self, request):
return Response(app_iter=ssync_receiver.Receiver(self, request)())
def __call__(self, env, start_response):
"""WSGI Application entry point for the Swift Object Server."""
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except DiskFileCollision:
res = HTTPForbidden(request=req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(_(
'ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
if self.log_requests:
log_line = get_log_line(req, res, trans_time, '')
if req.method in ('REPLICATE', 'SSYNC') or \
'X-Backend-Replication' in req.headers:
self.logger.debug(log_line)
else:
self.logger.info(log_line)
if req.method in ('PUT', 'DELETE'):
slow = self.slow - trans_time
if slow > 0:
sleep(slow)
# To be able to zero-copy send the object, we need a few things.
# First, we have to be responding successfully to a GET, or else we're
# not sending the object. Second, we have to be able to extract the
# socket file descriptor from the WSGI input object. Third, the
# diskfile has to support zero-copy send.
#
# There's a good chance that this could work for 206 responses too,
# but the common case is sending the whole object, so we'll start
# there.
if req.method == 'GET' and res.status_int == 200 and \
isinstance(env['wsgi.input'], wsgi.Input):
app_iter = getattr(res, 'app_iter', None)
checker = getattr(app_iter, 'can_zero_copy_send', None)
if checker and checker():
# For any kind of zero-copy thing like sendfile or splice, we
# need the file descriptor. Eventlet doesn't provide a clean
# way of getting that, so we resort to this.
wsock = env['wsgi.input'].get_socket()
wsockfd = wsock.fileno()
# Don't call zero_copy_send() until after we force the HTTP
# headers out of Eventlet and into the socket.
def zero_copy_iter():
# If possible, set TCP_CORK so that headers don't
# immediately go on the wire, but instead, wait for some
# response body to make the TCP frames as large as
# possible (and hence as few packets as possible).
#
# On non-Linux systems, we might consider TCP_NODELAY, but
# since the only known zero-copy-capable diskfile uses
# Linux-specific syscalls, we'll defer that work until
# someone needs it.
if hasattr(socket, 'TCP_CORK'):
wsock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_CORK, 1)
yield EventletPlungerString()
try:
app_iter.zero_copy_send(wsockfd)
except Exception:
self.logger.exception("zero_copy_send() blew up")
raise
yield ''
# Get headers ready to go out
res(env, start_response)
return zero_copy_iter()
else:
return res(env, start_response)
else:
return res(env, start_response)
def global_conf_callback(preloaded_app_conf, global_conf):
"""
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.
This conf instance will go away, so
just read from it, don't write.
:param global_conf: The global conf that will eventually be
passed to the app_factory function later.
This conf is created before the worker
subprocesses are forked, so can be useful to
set up semaphores, shared memory, etc.
"""
replication_concurrency = int(
preloaded_app_conf.get('replication_concurrency') or 4)
if replication_concurrency:
# Have to put the value in a list so it can get past paste
global_conf['replication_semaphore'] = [
multiprocessing.BoundedSemaphore(replication_concurrency)]
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
|
|
from __future__ import unicode_literals
from django.core import mail
from django.utils import six
from reviewboard.reviews.models import Review
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (review_reply_item_mimetype,
review_reply_list_mimetype)
from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_review import (ReviewItemMixin,
ReviewListMixin)
from reviewboard.webapi.tests.urls import (get_review_reply_item_url,
get_review_reply_list_url)
class BaseResourceTestCase(BaseWebAPITestCase):
def _create_test_review(self, with_local_site=False):
review_request = self.create_review_request(
submitter=self.user,
with_local_site=with_local_site)
file_attachment = self.create_file_attachment(review_request)
review_request.publish(review_request.submitter)
review = self.create_review(review_request, publish=True)
self.create_file_attachment_comment(review, file_attachment)
return review
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(ReviewListMixin, ReviewRequestChildListMixin,
BaseResourceTestCase):
"""Testing the ReviewReplyResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/replies/'
resource = resources.review_reply
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
return (get_review_reply_list_url(review),
review_reply_list_mimetype)
def compare_item(self, item_rsp, reply):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], reply.body_top)
self.assertEqual(item_rsp['body_bottom'], reply.body_bottom)
if reply.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, publish=True)
if populate_items:
items = [self.create_reply(review, publish=True)]
else:
items = []
return (get_review_reply_list_url(review, local_site_name),
review_reply_list_mimetype,
items)
def test_get_with_counts_only(self):
"""Testing the
GET review-requests/<id>/reviews/<id>/replies/?counts-only=1 API
"""
review = self._create_test_review()
self.create_reply(review, user=self.user, publish=True)
rsp = self.api_get(
'%s?counts-only=1' % get_review_reply_list_url(review),
expected_mimetype=review_reply_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['count'], 1)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, publish=True)
return (get_review_reply_list_url(review, local_site_name),
review_reply_item_mimetype,
{},
[review])
def check_post_result(self, user, rsp, review):
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertFalse(reply.rich_text)
self.compare_item(rsp['reply'], reply)
def test_post_with_body_top(self):
"""Testing the POST review-requests/<id>/reviews/<id>/replies/ API
with body_top
"""
body_top = 'My Body Top'
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp = self.api_post(
get_review_reply_list_url(review),
{'body_top': body_top},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.body_top, body_top)
def test_post_with_body_bottom(self):
"""Testing the POST review-requests/<id>/reviews/<id>/replies/ API
with body_bottom
"""
body_bottom = 'My Body Bottom'
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp = self.api_post(
get_review_reply_list_url(review),
{'body_bottom': body_bottom},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.body_bottom, body_bottom)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(ReviewItemMixin, ReviewRequestChildItemMixin,
BaseResourceTestCase):
"""Testing the ReviewReplyResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/replies/<id>/'
resource = resources.review_reply
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
return (get_review_reply_item_url(review, reply.pk),
review_reply_item_mimetype)
def compare_item(self, item_rsp, reply):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], reply.body_top)
self.assertEqual(item_rsp['body_bottom'], reply.body_bottom)
if reply.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
[reply, review])
def check_delete_result(self, user, reply, review):
self.assertNotIn(reply, review.replies.all())
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
review_reply_item_mimetype,
reply)
def test_get_not_modified(self):
"""Testing the GET review-requests/<id>/reviews/<id>/
with Not Modified response
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
self._testHttpCaching(
get_review_reply_item_url(reply.base_reply_to, reply.id),
check_last_modified=True)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
review_reply_item_mimetype,
{
'body_top': 'New body top',
},
reply,
[])
def check_put_result(self, user, item_rsp, reply, *args):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], 'New body top')
self.assertEqual(item_rsp['text_type'], 'plain')
reply = Review.objects.get(pk=reply.pk)
self.compare_item(item_rsp, reply)
def test_put_with_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/replies/<id>/?public=1 API
"""
self.siteconfig.set('mail_send_review_mail', True)
self.siteconfig.save()
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
mail.outbox = []
rsp, response = self.api_post_with_response(
get_review_reply_list_url(review),
expected_mimetype=review_reply_item_mimetype)
self.assertIn('Location', response)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
rsp = self.api_put(
response['Location'],
{
'body_top': 'Test',
'public': True,
},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.public, True)
self.assertEqual(len(mail.outbox), 1)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import eventlet
eventlet.monkey_patch(os=False)
import copy
import gettext
import logging
import os
import shutil
import sys
import uuid
import fixtures
from oslo.config import cfg
from oslo.messaging import conffixture as messaging_conffixture
import testtools
from nova import context
from nova import db
from nova.db import migration
from nova.db.sqlalchemy import api as session
from nova.network import manager as network_manager
from nova import objects
from nova.objects import base as objects_base
from nova.openstack.common.fixture import logging as log_fixture
from nova.openstack.common.fixture import moxstubout
from nova.openstack.common import log as nova_logging
from nova.openstack.common import timeutils
from nova import paths
from nova import rpc
from nova import service
from nova.tests import conf_fixture
from nova.tests import policy_fixture
from nova import utils
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
]
CONF = cfg.CONF
CONF.register_opts(test_opts)
CONF.import_opt('connection',
'nova.openstack.common.db.options',
group='database')
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.options',
group='database')
CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v3')
CONF.set_override('use_stderr', False)
nova_logging.setup('nova')
# NOTE(comstud): Make sure we have all of the objects loaded. We do this
# at module import time, because we may be using mock decorators in our
# tests that run at import time.
objects.register_all()
_DB_CACHE = None
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
if db_migrate.db_version() > db_migrate.db_initial_version():
return
else:
testdb = paths.state_path_rel(sqlite_db)
if os.path.exists(testdb):
return
db_migrate.db_sync()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = paths.state_path_rel(sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
paths.state_path_rel(self.sqlite_db))
class SampleNetworks(fixtures.Fixture):
"""Create sample networks in the database."""
def __init__(self, host=None):
self.host = host
def setUp(self):
super(SampleNetworks, self).setUp()
ctxt = context.get_admin_context()
network = network_manager.VlanManager(host=self.host)
bridge_interface = CONF.flat_interface or CONF.vlan_interface
network.create_networks(ctxt,
label='test',
cidr='10.0.0.0/8',
multi_host=CONF.multi_host,
num_networks=CONF.num_networks,
network_size=CONF.network_size,
cidr_v6=CONF.fixed_range_v6,
gateway=CONF.gateway,
gateway_v6=CONF.gateway_v6,
bridge=CONF.flat_network_bridge,
bridge_interface=bridge_interface,
vpn_start=CONF.vpn_start,
vlan_start=CONF.vlan_start,
dns1=CONF.flat_network_dns)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net)
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
def __init__(self, name, new_value):
self.name = name
self.new_value = new_value
def _restore(self, old_value):
sys.modules[self.name] = old_value
def setUp(self):
super(ReplaceModule, self).setUp()
old_value = sys.modules.get(self.name)
sys.modules[self.name] = self.new_value
self.addCleanup(self._restore, old_value)
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class TranslationFixture(fixtures.Fixture):
"""Use gettext NullTranslation objects in tests."""
def setUp(self):
super(TranslationFixture, self).setUp()
nulltrans = gettext.NullTranslations()
gettext_fixture = fixtures.MonkeyPatch('gettext.translation',
lambda *x, **y: nulltrans)
self.gettext_patcher = self.useFixture(gettext_fixture)
class TestingException(Exception):
pass
class NullHandler(logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests.
Due to the slowness of DB access, please consider deriving from
`NoDBTestCase` first.
"""
USES_DB = True
# NOTE(rpodolyaka): this attribute can be overridden in subclasses in order
# to scale the global test timeout value set for each
# test case separately. Use 0 value to disable timeout.
TIMEOUT_SCALING_FACTOR = 1
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if self.TIMEOUT_SCALING_FACTOR >= 0:
test_timeout *= self.TIMEOUT_SCALING_FACTOR
else:
raise ValueError('TIMEOUT_SCALING_FACTOR value must be >= 0')
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(TranslationFixture())
self.useFixture(log_fixture.get_logging_handle_error_fixture())
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
rpc.add_extra_exmods('nova.test')
self.addCleanup(rpc.clear_extra_exmods)
self.addCleanup(rpc.cleanup)
# set root logger to debug
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = logging.DEBUG
else:
level = logging.INFO
# Collect logs
fs = '%(levelname)s [%(name)s] %(message)s'
fake = self.useFixture(fixtures.FakeLogger(format=fs, level=None))
root.handlers[0].setLevel(level)
if level > logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(logging.DEBUG)
self.useFixture(conf_fixture.ConfFixture(CONF))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
rpc.init(CONF)
if self.USES_DB:
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(session, migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.database.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE)
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry.
objects_base.NovaObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.NovaObject._obj_classes)
self.addCleanup(self._restore_obj_registry)
# NOTE(mnaser): All calls to utils.is_neutron() are cached in
# nova.utils._IS_NEUTRON. We set it to None to avoid any
# caching of that value.
utils._IS_NEUTRON = None
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
self.addCleanup(self._clear_attrs)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
CONF.set_override('fatal_exception_format_errors', True)
CONF.set_override('enabled', True, 'osapi_v3')
CONF.set_override('force_dhcp_release', False)
def _restore_obj_registry(self):
objects_base.NovaObject._obj_classes = self._base_test_obj_backup
def _clear_attrs(self):
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
svc = self.useFixture(ServiceFixture(name, host, **kwargs))
return svc.service
class APICoverage(object):
cover_api = None
def test_api_methods(self):
self.assertTrue(self.cover_api is not None)
api_methods = [x for x in dir(self.cover_api)
if not x.startswith('_')]
test_methods = [x[5:] for x in dir(self)
if x.startswith('test_')]
self.assertThat(
test_methods,
testtools.matchers.ContainsAll(api_methods))
class TimeOverride(fixtures.Fixture):
"""Fixture to start and remove time override."""
def setUp(self):
super(TimeOverride, self).setUp()
timeutils.set_time_override()
self.addCleanup(timeutils.clear_time_override)
class NoDBTestCase(TestCase):
"""`NoDBTestCase` differs from TestCase in that DB access is not supported.
This makes tests run significantly faster. If possible, all new tests
should derive from this class.
"""
USES_DB = False
|
|
from __future__ import unicode_literals
import time
import responses
from tunigo.api import BASE_QUERY, BASE_URL, Tunigo
from tunigo.genre import Genre, SubGenre
from tunigo.playlist import Playlist
from tunigo.release import Release
class TestApi(object):
@responses.activate
def test_repr(self):
tunigo = Tunigo(region='no', max_results=100, cache_time=3600)
assert (
tunigo.__repr__() ==
"Tunigo(region='no', max_results=100, cache_time=3600)")
@responses.activate
def test_returns_items(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(
responses.GET,
BASE_URL + '/key',
content_type='application/json',
body='{"items": [{"test": 1}]}')
result = tunigo._get('key')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/key?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
assert len(result) == 1
assert result[0]['test'] == 1
@responses.activate
def test_returns_empty_array_if_status_not_200(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(
responses.GET,
BASE_URL + '/key',
status=404,
content_type='application/json',
body='{"items": [{"test": 1}]}')
result = tunigo._get('key')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/key?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
assert result == []
@responses.activate
def test_returns_empty_array_if_content_type_not_application_json(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(
responses.GET,
BASE_URL + '/key',
body='{"items": [{"test": 1}]}')
result = tunigo._get('key')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/key?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
assert result == []
@responses.activate
def test_set_given_region_query_option(self):
max_results = 100
tunigo = Tunigo(region='no', max_results=max_results)
responses.add(responses.GET, BASE_URL + '/key')
tunigo._get('key')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/key?{}&per_page={}®ion=no'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_set_given_query_options(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/key')
tunigo._get('key', 'test=value')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/key?{}&per_page={}&test=value'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_returns_playlists(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(
responses.GET,
BASE_URL + '/key',
content_type='application/json',
body="""
{
"items": [
{
"playlist": {
"title": "Title 0",
"description": "Description 0",
"image": "Image 0",
"uri": "uri:0",
"numSubscribers": 0
}
},
{
"playlist": {
"title": "Title 1",
"description": "Description 1",
"image": "Image 1",
"uri": "uri:1",
"numSubscribers": 1
}
}
]
}""")
playlists = tunigo.get_playlists('key')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/key?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
assert len(playlists) == 2
assert isinstance(playlists[0], Playlist)
assert playlists[0].title == 'Title 0'
assert playlists[0].description == 'Description 0'
assert playlists[0].image == 'Image 0'
assert playlists[0].uri == 'uri:0'
assert playlists[0].num_subscribers == 0
assert isinstance(playlists[1], Playlist)
assert playlists[1].title == 'Title 1'
assert playlists[1].description == 'Description 1'
assert playlists[1].image == 'Image 1'
assert playlists[1].uri == 'uri:1'
assert playlists[1].num_subscribers == 1
@responses.activate
def test_sets_playlists_query_options(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/key')
tunigo.get_playlists('key', 'test=value')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/key?{}&per_page={}&test=value'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_caches_playlists_result(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/key')
tunigo.get_playlists('key')
tunigo.get_playlists('key')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/key?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_featured_playlist_calls_uri(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/featured-playlists')
tunigo.get_featured_playlists()
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/featured-playlists?{}&per_page={}&dt={}'.format(
BASE_URL,
BASE_QUERY,
max_results,
time.strftime('%FT%H:01:00')))
@responses.activate
def test_top_lists_calls_uri(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/toplists')
tunigo.get_top_lists()
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/toplists?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_returns_genres(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(
responses.GET,
BASE_URL + '/genres',
content_type='application/json',
body="""
{
"items": [
{
"genre": {
"name": "Genre 0",
"id": "Id 0",
"type": "Type 0",
"templateName": "Template name 0",
"iconImageUrl": "Icon image url 0",
"iconUrl": "Icon url 0",
"moodImageUrl": "Mood image url 0",
"headerImageUrl": "Header image url 0",
"subGenres": [
{
"name": "Genre 0, subgenre 0",
"key": "Key 0, 0"
},
{
"name": "Genre 0, subgenre 1",
"key": "Key 0, 1"
}
]
}
},
{
"genre": {
"name": "Genre 1",
"id": "Id 1",
"type": "Type 1",
"templateName": "Template name 1",
"iconImageUrl": "Icon image url 1",
"iconUrl": "Icon url 1",
"moodImageUrl": "Mood image url 1",
"headerImageUrl": "Header image url 1",
"subGenres": [
{
"name": "Genre 1, subgenre 0",
"key": "Key 1, 0"
},
{
"name": "Genre 1, subgenre 1",
"key": "Key 1, 1"
}
]
}
}
]
}""")
genres = tunigo.get_genres()
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/genres?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
assert len(genres) == 2
assert isinstance(genres[0], Genre)
assert genres[0].name == 'Genre 0'
assert genres[0].id == 'Id 0'
assert genres[0].type == 'Type 0'
assert genres[0].template_name == 'Template name 0'
assert genres[0].icon_image_url == 'Icon image url 0'
assert genres[0].icon_url == 'Icon url 0'
assert genres[0].mood_image_url == 'Mood image url 0'
assert genres[0].header_image_url == 'Header image url 0'
assert len(genres[0].sub_genres) == 2
assert isinstance(genres[0].sub_genres[0], SubGenre)
assert genres[0].sub_genres[0].name == 'Genre 0, subgenre 0'
assert genres[0].sub_genres[0].key == 'Key 0, 0'
assert isinstance(genres[0].sub_genres[1], SubGenre)
assert genres[0].sub_genres[1].name == 'Genre 0, subgenre 1'
assert genres[0].sub_genres[1].key == 'Key 0, 1'
assert isinstance(genres[0], Genre)
assert genres[1].name == 'Genre 1'
assert genres[1].id == 'Id 1'
assert genres[1].type == 'Type 1'
assert genres[1].template_name == 'Template name 1'
assert genres[1].icon_image_url == 'Icon image url 1'
assert genres[1].icon_url == 'Icon url 1'
assert genres[1].mood_image_url == 'Mood image url 1'
assert genres[1].header_image_url == 'Header image url 1'
assert len(genres[1].sub_genres) == 2
assert isinstance(genres[1].sub_genres[0], SubGenre)
assert genres[1].sub_genres[0].name == 'Genre 1, subgenre 0'
assert genres[1].sub_genres[0].key == 'Key 1, 0'
assert isinstance(genres[1].sub_genres[1], SubGenre)
assert genres[1].sub_genres[1].name == 'Genre 1, subgenre 1'
assert genres[1].sub_genres[1].key == 'Key 1, 1'
@responses.activate
def test_caches_genres_result(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/genres')
tunigo.get_genres()
tunigo.get_genres()
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/genres?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_genre_playlists_with_genre_string_calls_uri(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/genre')
tunigo.get_genre_playlists('genre')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/genre?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_genre_playlists_with_genre_and_subgenre_string_calls_uri(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/genre')
tunigo.get_genre_playlists('genre', 'subgenre')
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/genre?{}&per_page={}&filter=subgenre'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_genre_playlists_with_genre_instance_calls_uri(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/genre')
tunigo.get_genre_playlists(Genre(template_name='genre'))
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/genre?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_genre_playlists_with_genre_and_subgenre_instance_calls_uri(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/genre')
tunigo.get_genre_playlists(
Genre(template_name='genre'),
SubGenre(key='subgenre'))
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/genre?{}&per_page={}&filter=subgenre'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_genre_playlists_with_only_subgenre_instance_calls_uri(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/genre')
tunigo.get_genre_playlists(
sub_genre=SubGenre(key='subgenre', main_genre='genre'))
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/genre?{}&per_page={}&filter=subgenre'.format(
BASE_URL, BASE_QUERY, max_results))
@responses.activate
def test_returns_releases(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(
responses.GET,
BASE_URL + '/new-releases',
content_type='application/json',
body="""
{
"items": [
{
"release": {
"albumName": "Album 0",
"uri": "uri:0",
"artistName": "Artist 0",
"image": "Image 0",
"artistUri": "artist:uri:0"
}
},
{
"release": {
"albumName": "Album 1",
"uri": "uri:1",
"artistName": "Artist 1",
"image": "Image 1",
"artistUri": "artist:uri:1"
}
}
]
}""")
releases = tunigo.get_new_releases()
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/new-releases?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
assert len(releases) == 2
assert isinstance(releases[0], Release)
assert releases[0].album_name == 'Album 0'
assert releases[0].uri == 'uri:0'
assert releases[0].artist_name == 'Artist 0'
assert releases[0].image == 'Image 0'
assert releases[0].artist_uri == 'artist:uri:0'
assert isinstance(releases[1], Release)
assert releases[1].album_name == 'Album 1'
assert releases[1].uri == 'uri:1'
assert releases[1].artist_name == 'Artist 1'
assert releases[1].image == 'Image 1'
assert releases[1].artist_uri == 'artist:uri:1'
@responses.activate
def test_caches_releases_result(self):
max_results = 100
tunigo = Tunigo(max_results=max_results)
responses.add(responses.GET, BASE_URL + '/new-releases')
tunigo.get_new_releases()
tunigo.get_new_releases()
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url ==
'{}/new-releases?{}&per_page={}'.format(
BASE_URL, BASE_QUERY, max_results))
|
|
import unittest
from src.binary_search_tree import BST, PARENT, KEY, LEFT, RIGHT, SIZE, \
BinarySearchTreeNode
class TestBinarySearchTreeNode(unittest.TestCase):
def test_insert(self):
bst = BinarySearchTreeNode('x', 1)
y_node = bst.insert('y', 2)
u_node = bst.insert('u', 3)
self.assertEqual(bst.key, 'x', 'root is the correct value')
self.assertEqual(bst.left.key, 'u', 'right node is correct')
self.assertEqual(bst.right.key, 'y', 'left node is correct')
self.assertEqual(bst.parent, None, 'root has no parent')
self.assertEqual(bst.left.parent, bst, 'left has root as parent')
self.assertEqual(bst.right.parent, bst, 'right has root as parent')
self.assertEqual(bst.size, 3, 'should update correct size of a node')
actual = bst.insert('v', 4)
self.assertEqual(actual.key, 'v', 'should insert with correct key')
self.assertEqual(actual.value, 4, 'should insert with correct value')
self.assertEqual(actual.parent, u_node, 'should have the correct parent')
def test_lookup(self):
bst = BinarySearchTreeNode('y', 1)
bst.insert('x', 2)
bst.insert('z', 3)
actual = bst.lookup('x')
expected = bst.left
self.assertEqual(actual, expected, 'should return the entire object')
actual = bst.lookup('t')
self.assertIsNone(actual, 'should not find node with key t')
def test_get_min(self):
bst = BinarySearchTreeNode('y', 1)
bst.insert('x', 2)
bst.insert('z', 3)
expected = bst.get_min()
actual = bst.left
self.assertEqual(actual, expected, 'produced correct min node')
self.assertEqual(actual.key, 'x', 'produced correct min node key')
def test_get_max(self):
bst = BinarySearchTreeNode('y', 1)
bst.insert('x', 2)
bst.insert('z', 3)
expected = bst.get_max()
actual = bst.right
self.assertEqual(actual, expected, 'produced correct max node')
self.assertEqual(actual.key, 'z', 'produced correct max node key')
def test_predecessor(self):
""" Following tree structure:
(x)
/ \
(t) (y)
\ \
(u) (z)
\
(v)
"""
x_node = BinarySearchTreeNode('x', 1)
y_node = x_node.insert('y', 2)
z_node = x_node.insert('z', 3)
t_node = x_node.insert('t', 4)
u_node = x_node.insert('u', 5)
v_node = x_node.insert('v', 6)
self.assertEqual(x_node.predecessor(), v_node, 'predecessor of x is u')
self.assertEqual(y_node.predecessor(), x_node, 'predecessor of y is x')
self.assertEqual(z_node.predecessor(), y_node, 'predecessor of z is y')
self.assertEqual(t_node.predecessor(), None, 't has no predecessor')
self.assertEqual(u_node.predecessor(), t_node, 'predecessor of u is t')
self.assertEqual(v_node.predecessor(), u_node, 'predecessor of v is u')
def test_successor(self):
""" Following tree structure:
(x)
/ \
(t) (y)
\ \
(u) (z)
\
(v)
"""
x_node = BinarySearchTreeNode('x', 1)
y_node = x_node.insert('y', 2)
z_node = x_node.insert('z', 3)
t_node = x_node.insert('t', 4)
u_node = x_node.insert('u', 5)
v_node = x_node.insert('v', 6)
self.assertEqual(x_node.successor(), y_node, 'successor of x is y')
self.assertEqual(y_node.successor(), z_node, 'successor of y is z')
self.assertEqual(z_node.successor(), None, 'z has no successor')
self.assertEqual(t_node.successor(), u_node, 'successor of t is u')
self.assertEqual(u_node.successor(), v_node, 'successor of u is v')
self.assertEqual(v_node.successor(), x_node, 'successor of v is x')
def test_rank(self):
""" Following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
This will yield: [q, r, s, t, u, v, x, y, z]
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
self.assertEqual(q_node.rank(), 0, 'q is on index 0')
self.assertEqual(r_node.rank(), 1, 'r is on index 1')
self.assertEqual(s_node.rank(), 2, 's is on index 2')
self.assertEqual(t_node.rank(), 3, 't is on index 3')
self.assertEqual(u_node.rank(), 4, 'u is on index 4')
self.assertEqual(v_node.rank(), 5, 'v is on index 5')
self.assertEqual(x_node.rank(), 6, 'x is on index 6')
self.assertEqual(y_node.rank(), 7, 'y is on index 7')
self.assertEqual(z_node.rank(), 8, 'z is on index 8')
def test_select(self):
""" Following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
This will yield: [q, r, s, t, u, v, x, y, z]
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
self.assertEqual(x_node.select(0), q_node, 'q is on index 0')
self.assertEqual(x_node.select(1), r_node, 'r is on index 1')
self.assertEqual(x_node.select(2), s_node, 's is on index 2')
self.assertEqual(x_node.select(3), t_node, 't is on index 3')
self.assertEqual(x_node.select(4), u_node, 'u is on index 4')
self.assertEqual(x_node.select(5), v_node, 'v is on index 5')
self.assertEqual(x_node.select(6), x_node, 'x is on index 6')
self.assertEqual(x_node.select(7), y_node, 'y is on index 7')
self.assertEqual(x_node.select(8), z_node, 'z is on index 8')
self.assertEqual(x_node.select(10), None, 'there is no node on index 10')
self.assertEqual(x_node.select(-5), None, 'there is no node on index -5')
def test_delete(self):
""" Following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
# Case 1: delete leaf.
# (x) (x)
# / \ / \
# (t) (y) (t) (y)
# / \ \ => / \ \
# (r) (u) (z) (r) (u) (z)
# / \ \ / \
# (q) (s) (v) (q) (s)
v_node.delete()
self.assertEqual(u_node.right, None, 'node u has no left child')
self.assertEqual(v_node.parent, None, 'v was completely detached')
# Case 2: delete internal node with one child.
# (x) (x)
# / \ / \
# (t) (y) (t) (z)
# / \ \ => / \
# (r) (u) (z) (r) (u)
# / \ / \
# (q) (s) (q) (s)
y_node.delete()
self.assertEqual(x_node.right, z_node, 'right child of x is now z')
self.assertEqual(z_node.parent, x_node, 'parent of z is now x')
self.assertEqual(y_node.parent, None, 'y was detached from its parent')
self.assertEqual(y_node.right, None, 'y was completly detached from its right child')
# Case 3, delete internal node with two children.
# (x) (x)
# / \ / \
# (t) (z) (s) (z)
# / \ => / \
# (r) (u) (r) (u)
# / \ /
# (q) (s) (q)
t_node.delete()
self.assertEqual(t_node.parent, None, 't was detached from parent')
self.assertEqual(t_node.left, None, 't was detached from left child')
self.assertEqual(t_node.right, None, 't was detached from right child')
self.assertEqual(s_node.parent, x_node, 's new parent is x')
self.assertEqual(s_node.left, r_node, 's left child is r')
self.assertEqual(s_node.right, u_node, 's right child is u')
self.assertEqual(r_node.right, None, 's was displaced from being right child of r')
# Case 3, delete the root.
# (x) (u)
# / \ / \
# (s) (z) (s) (z)
# / \ => /
# (r) (u) (r)
# / /
# (q) (q)
x_node.delete()
self.assertEqual(x_node.parent, None, 'root x was detached')
self.assertEqual(x_node.left, None, 'root x was detached from left child')
self.assertEqual(x_node.right, None, 'root x was detached from right child')
self.assertEqual(u_node.parent, None, 'u is the new root')
self.assertEqual(u_node.left, s_node, 'left child of root u is now s')
self.assertEqual(u_node.right, z_node, 'right child of root us is now z')
def test_in_order_traversal(self):
""" Uses following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
expected = ['q', 'r', 's', 't', 'u', 'v', 'x', 'y', 'z']
nodes = x_node.in_order_traversal()
actual = map(lambda n: n.key, nodes)
self.assertEqual(actual, expected, 'correct in-order traversal')
def test_pre_order_traversal(self):
""" Uses following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
expected = ['x', 't', 'r', 'q', 's', 'u', 'v', 'y', 'z']
nodes = x_node.pre_order_traversal()
actual = map(lambda n: n.key, nodes)
self.assertEqual(actual, expected, 'correct pre-order traversal')
def test_post_order_traversal(self):
""" Uses following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
expected = ['q', 's', 'r', 'v', 'u', 't', 'z', 'y', 'x']
nodes = x_node.post_order_traversal()
actual = map(lambda n: n.key, nodes)
self.assertEqual(actual, expected, 'correct pre-order traversal')
def test_common_ancestor(self):
""" Uses following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
self.assertEqual(t_node.common_ancestor_prime(t_node), t_node, 'both nodes are the same')
self.assertEqual(t_node.common_ancestor_prime(v_node), t_node, 't is ancestor of v')
self.assertEqual(v_node.common_ancestor_prime(u_node), u_node, 'u is parent of v')
self.assertEqual(q_node.common_ancestor_prime(x_node), x_node, 'x is root')
self.assertEqual(q_node.common_ancestor_prime(z_node), x_node, 'x is root')
def test_common_ancestor_prime(self):
""" Uses following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
self.assertEqual(t_node.common_ancestor_prime(t_node), t_node, 'both nodes are the same')
self.assertEqual(t_node.common_ancestor_prime(v_node), t_node, 't is ancestor of v')
self.assertEqual(v_node.common_ancestor_prime(u_node), u_node, 'u is parent of v')
self.assertEqual(q_node.common_ancestor_prime(x_node), x_node, 'x is root')
self.assertEqual(q_node.common_ancestor_prime(z_node), x_node, 'x is root')
def test_is_identical(self):
""" Using the following tree structure and two test candidates:
(x) (x) (x)
/ \ / \ / \
(t) (y) (t) (y) (t) (y)
/ \ \ / \ \ / \ \
(r) (u) (z) (r) (u) (z) (r) (u) (z)
/ \ \ / \ \ / \
(q) (s) (v) (q) (s) (v) (q) (s)
(root) (subject1) (subject2)
"""
root = BinarySearchTreeNode('x')
root.insert('y')
root.insert('z')
root.insert('t')
root.insert('u')
root.insert('v')
root.insert('r')
root.insert('s')
root.insert('q')
subject1 = BinarySearchTreeNode('x')
subject1.insert('y')
subject1.insert('z')
subject1.insert('t')
subject1.insert('u')
subject1.insert('v')
subject1.insert('r')
subject1.insert('s')
subject1.insert('q')
self.assertTrue(root.is_identical(subject1), 'should detect identical trees')
subject2 = BinarySearchTreeNode('x')
subject2.insert('y')
subject2.insert('z')
subject2.insert('t')
subject2.insert('u')
subject2.insert('r')
subject2.insert('s')
subject2.insert('q')
self.assertFalse(root.is_identical(subject2), 'should detect non-identical trees')
def test_is_subtree_of(self):
""" Using the following tree structure and two test candidates:
(x)
/ \ (u)
(t) (y) (r) \
/ \ \ / \ (v)
(r) (u) (z) (q) (s) \
/ \ \ (w)
(q) (s) (v)
(root) (subject1) (subject2)
"""
root = BinarySearchTreeNode('x')
root.insert('y')
root.insert('z')
root.insert('t')
root.insert('u')
root.insert('v')
root.insert('r')
root.insert('s')
root.insert('q')
subject1 = BinarySearchTreeNode('r')
subject1 = BinarySearchTreeNode('q')
subject1 = BinarySearchTreeNode('s')
self.assertTrue(subject1.is_subtree_of(root), 'should find the subtree')
subject2 = BinarySearchTreeNode('u')
subject2 = BinarySearchTreeNode('v')
subject2 = BinarySearchTreeNode('w')
self.assertFalse(subject2.is_subtree_of(root), 'should not find the subtree')
def test_diameter(self):
""" Using the following tree structure:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
\
(w)
"""
root = BinarySearchTreeNode('x')
y_node = root.insert('y')
z_node = root.insert('z')
t_node = root.insert('t')
u_node = root.insert('u')
v_node = root.insert('v')
w_node = root.insert('w')
r_node = root.insert('r')
s_node = root.insert('s')
q_node = root.insert('q')
self.assertEqual(root.diameter(), 7, 'max diameter of this tree is 6')
def test_is_ballanced(self):
""" Using the following tree structure:
(x) (x)
/ \ / \
(t) (y) (t) (y)
/ \ \ / \ \
(r) (u) (z) (r) (u) (z)
/ \ \
(q) (s) (v)
\
(w)
(unballanced) (ballanced)
"""
unballanced = BinarySearchTreeNode('x')
unballanced.insert('y')
unballanced.insert('z')
unballanced.insert('t')
unballanced.insert('u')
unballanced.insert('v')
unballanced.insert('w')
unballanced.insert('r')
unballanced.insert('s')
unballanced.insert('q')
self.assertFalse(unballanced.is_ballanced(),
'subject tree is not ballanced')
ballanced = BinarySearchTreeNode('x')
ballanced.insert('y')
ballanced.insert('z')
ballanced.insert('t')
ballanced.insert('u')
ballanced.insert('r')
self.assertTrue(ballanced.is_ballanced(),
'subject tree is ballanced')
def test_merge(self):
""" Given two binary search trees check if they are correctly merged:
(y) (a) (c)
/ \ \ / \
(x) (z) + (b) = (a) (y)
\ \ / \
(c) (b) (x) (z)
(first) (second) (result)
"""
first = BinarySearchTreeNode('y')
first.insert('x')
first.insert('z')
second = BinarySearchTreeNode('a')
second.insert('b')
second.insert('c')
result = first.merge(second)
self.assertEqual(result.key, 'c', 'root is c')
self.assertEqual(result.left.key, 'a', 'left child of c is a')
self.assertEqual(result.right.key, 'y', 'right child of c is y')
self.assertEqual(result.left.right.key, 'b', 'right child of a is b')
self.assertEqual(result.right.left.key, 'x', 'left child of y is x')
self.assertEqual(result.right.right.key, 'z', 'right child of y is z')
# Utilities
def test_swap(self):
""" Following tree structure is used:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
r_node = x_node.insert('r')
u_node = x_node.insert('u')
# 1. Swap leaf and it's parent node.
# (x) (x)
# / \ / \
# (t) (y) => (r) (y)
# / \ \ / \ \
# (r) (u) (z) (t) (u) (z)
r_node.swap(t_node)
self.assertEqual(r_node.parent, x_node, 'x is now parent of r')
self.assertEqual(r_node.left, t_node, 't is left child of r')
self.assertEqual(r_node.right, u_node, 'u is left child of r')
self.assertEqual(t_node.parent, r_node, 'r is now parent of t')
self.assertEqual(t_node.left, None, 't has no left child')
self.assertEqual(t_node.right, None, 't has no right child')
# 2. Swap leaf with another middle node.
# (x) (x)
# / \ / \
# (r) (y) => (r) (u)
# / \ \ / \ \
# (t) (u) (z) (t) (y) (z)
u_node.swap(y_node)
self.assertEqual(u_node.parent, x_node, 'x is now parent of u')
self.assertEqual(u_node.left, None, 'u has no left child')
self.assertEqual(u_node.right, z_node, 'z is right child of u')
self.assertEqual(y_node.parent, r_node, 'r is now parent of y')
self.assertEqual(y_node.left, None, 'y has no left child')
self.assertEqual(y_node.right, None, 'y has no right child')
# 3. Swap leaf with another leaf.
# (x) (x)
# / \ / \
# (r) (u) => (r) (u)
# / \ \ / \ \
# (t) (y) (z) (z) (y) (t)
t_node.swap(z_node) #
self.assertEqual(t_node.parent, u_node, 'u is now parent of t')
self.assertEqual(t_node.left, None, 't has no left child')
self.assertEqual(t_node.right, None, 't has no right child')
self.assertEqual(z_node.parent, r_node, 'r is now parent of z')
self.assertEqual(z_node.left, None, 'y has no left child')
self.assertEqual(z_node.right, None, 'y has no right child')
# 3. Swap leaf with root.
# (x) (z)
# / \ / \
# (r) (u) => (r) (u)
# / \ \ / \ \
# (z) (y) (t) (x) (y) (t)
z_node.swap(x_node)
self.assertEqual(z_node.parent, None, 'z is now a root so no parent')
self.assertEqual(z_node.left, r_node, 'left child of z is r')
self.assertEqual(z_node.right, u_node, 'right child of z is u')
self.assertEqual(x_node.parent, r_node, 'r is now parent of x')
self.assertEqual(x_node.left, None, 'x has no left child')
self.assertEqual(x_node.right, None, 'x has no right child')
def test_rotate_left(self):
""" Uses following tree structure, test rotate left between
nodes t and u:
(x) (x)
/ \ / \
(t) (y) (u) (y)
/ \ \ => / \ \
(r) (u) (z) (t) (v) (z)
/ \ \ /
(q) (s) (v) (r)
/ \
(q) (s)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
t_node.rotate('left')
self.assertEqual(u_node.parent, x_node, 'parent of u is now x')
self.assertEqual(x_node.left, u_node, 'left child of x is u')
self.assertEqual(u_node.left, t_node, 'left node of u is t')
self.assertEqual(t_node.parent, u_node, 'parent of t is u')
self.assertEqual(u_node.right, v_node, 'right node of u if v')
self.assertEqual(v_node.parent, u_node, 'parent of v is u')
self.assertEqual(t_node.parent, u_node, 'parent of t is u')
self.assertEqual(t_node.left, r_node, 'left child of t is r')
self.assertEqual(r_node.parent, t_node, 'parent node of r is t')
# Test sizes of the newly rotated nodes
self.assertEqual(t_node.size, 4, 't can now reach 4 nodes')
self.assertEqual(u_node.size, 6, 'u can now reach 6 nodes')
def test_rotate_right(self):
""" Uses following tree structure, test rotate right between
nodes r and t.
(x) (x)
/ \ / \
(t) (y) (r) (y)
/ \ \ => / \ \
(r) (u) (z) (q) (t) (z)
/ \ \ / \
(q) (s) (v) (s) (u)
\
(v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
t_node.rotate('right')
self.assertEqual(r_node.parent, x_node, 'x is parent of r')
self.assertEqual(x_node.left, r_node, 'left child of x is r')
self.assertEqual(r_node.left, q_node, 'q is left child of r')
self.assertEqual(q_node.parent, r_node, 'parent of q is r')
self.assertEqual(r_node.right, t_node, 'x is right child of r')
self.assertEqual(t_node.parent, r_node, 'parent of r is t')
self.assertEqual(t_node.left, s_node, 'left child of t is s')
self.assertEqual(s_node.parent, t_node, 'new parent of s is t')
self.assertEqual(u_node.parent, t_node, 'no change in the parent of u')
# Test sizes of the newly rotated nodes
self.assertEqual(t_node.size, 4, 't can now reach 4 nodes')
self.assertEqual(r_node.size, 6, 'u can now reach 6 nodes')
def test_depth(self):
""" Using the following tree:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
self.assertEqual(x_node.depth(), 0, 'x is root so its depth is 0')
self.assertEqual(v_node.depth(), 3, 'v is leaf with depth 3')
def test_height(self):
""" Using the following tree:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
self.assertEqual(x_node.height(), 3, 'x is root so its height is 3')
self.assertEqual(t_node.height(), 2, 'height of t is 2')
self.assertEqual(v_node.height(), 0, 'height of leaf v is 0')
self.assertEqual(r_node.height(), 1, 'x is root so its height is 3')
self.assertEqual(t_node.height(), 2, 'x is root so its height is 3')
def test_min_depth(self):
""" Using the following tree:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
self.assertEqual(x_node.min_depth(), (z_node, 2),
'z node is the leaf with minimum depth of 2')
def test_max_depth(self):
""" Using the following tree:
(x)
/ \
(t) (y)
/ \ \
(r) (u) (z)
/ \ \
(q) (s) (v)
"""
x_node = BinarySearchTreeNode('x')
y_node = x_node.insert('y')
z_node = x_node.insert('z')
t_node = x_node.insert('t')
u_node = x_node.insert('u')
v_node = x_node.insert('v')
r_node = x_node.insert('r')
s_node = x_node.insert('s')
q_node = x_node.insert('q')
self.assertEqual(x_node.max_depth(), (q_node, 3),
'q node is the first leaf with maximum depth of 3')
# Statics
def test_from_sorted_list(self):
""" Build the following tree:
(d)
/ \
(b) (f)
/ \ / \
(a) (c) (e) (g)
"""
arr = [('a',1), ('b',2), ('c',3), ('d',4), ('e',5), ('f',6), ('g',7)]
root = BinarySearchTreeNode.from_sorted_list(arr)
self.assertEqual(root.key, 'd', 'd is root')
self.assertEqual(root.left.key, 'b', 'left child of d is b')
self.assertEqual(root.right.key, 'f', 'right child of d is f')
self.assertEqual(root.left.left.key, 'a', 'left child of b is a')
self.assertEqual(root.left.right.key, 'c', 'left child of b is c')
self.assertEqual(root.right.left.key, 'e', 'left child of f is e')
self.assertEqual(root.right.right.key, 'g', 'left child of f is e')
class TestBST(unittest.TestCase):
""" Running examples:
(5)
/
(3) (4)
/ \ /
(1) (5) (3)
\ / /
(2) (4) (2)
/
(1)
"""
def test_build(self):
b = BST.build([3,1,2,5,4])
def test_insert(self):
b = BST.build([])
actual = b.insert(3)
expected = [None, 3, None, None, 1]
self.assertEqual(actual, expected, 'should have inserted the '+
'correct single node into the BST')
actual = b.insert(1)
self.assertEqual(actual[PARENT][KEY], 3, 'should be a child of 3')
self.assertIsNone(actual[LEFT], 'should have no left child')
self.assertIsNone(actual[RIGHT], 'should have not right child')
self.assertEqual(actual[SIZE], 1, 'new node is a leaf')
self.assertEqual(b.root[SIZE], 2, 'root can access 2 nodes')
def test_search(self):
b = BST.build([3,1,2,5,4])
self.assertIsNotNone(b.search(3), 'should find 3 in the bst')
self.assertIsNotNone(b.search(1), 'should find 1 in the bst')
self.assertIsNotNone(b.search(2), 'should find 2 in the bst')
self.assertIsNotNone(b.search(5), 'should find 5 in the bst')
self.assertIsNotNone(b.search(4), 'should find 4 in the bst')
self.assertIsNone(b.search(10), 'should not find 10 in the bst')
def test_max(self):
b = BST.build([3,1,2,5,4])
self.assertEqual(b.get_max()[KEY], 5, 'should find the max value')
def test_min(self):
b = BST.build([3,1,2,5,4])
self.assertEqual(b.get_min()[KEY], 1, 'should find the min value')
def test_output(self):
b = BST.build([3,1,2,5,4])
actual = b.list_sorted()
expected = [1,2,3,4,5]
self.assertEqual(actual, expected, 'should list the key in order')
def test_predecessor(self):
b = BST.build([3,1,2,5,4])
actual = b.predecessor(6)
self.assertIsNone(actual, 'did not find any node with key 6')
actual = b.predecessor(1)
self.assertIsNone(actual, '1 is min, so no predecessor')
actual = b.predecessor(2)
self.assertEqual(actual[KEY], 1, 'predecessor of 2 is 1')
actual = b.predecessor(3)
self.assertEqual(actual[KEY], 2, 'predecessor of 3 is 2')
actual = b.predecessor(4)
self.assertEqual(actual[KEY], 3, 'predecessor of 4 is 3')
actual = b.predecessor(5)
self.assertEqual(actual[KEY], 4, 'predecessor of 4 is 3')
def test_successor(self):
b = BST.build([3,1,2,5,4])
actual = b.successor(6)
self.assertIsNone(actual, 'did not find any node with key 6')
actual = b.successor(1)
self.assertEqual(actual[KEY], 2, 'successor of 1 is 2')
actual = b.successor(2)
self.assertEqual(actual[KEY], 3, 'successor of 2 is 3')
actual = b.successor(3)
self.assertEqual(actual[KEY], 4, 'successor of 3 is 4')
actual = b.successor(4)
self.assertEqual(actual[KEY], 5, 'successor of 4 is 5')
actual = b.successor(5)
self.assertIsNone(actual, '5 is max of tree so no successor')
def test_range_query(self):
b = BST.build([3,1,2,5,4])
actual = b.range_query(2, 4)
expected = [2,3,4]
self.assertEqual(actual, expected, 'should return a range of data')
def test_delete(self):
b = BST.build([3,1,2,5,4])
removed = b.delete(2) # Node is a leaf.
self.assertEqual(removed[KEY], 2, 'returns the removed node')
self.assertIsNone(b.search(2), 'should not find 2 anymore')
self.assertIsNone(b.search(1)[RIGHT], '1 has no more children')
removed = b.delete(5) # Node has only one child.
self.assertEqual(removed[KEY], 5, 'returns the removed node')
self.assertIsNone(b.search(5), 'should have removed 5')
self.assertEqual(b.search(4)[PARENT][KEY], 3, 'should have hoisted 4')
removed = b.delete(3) # Node has both children.
self.assertEqual(removed[KEY], 3, 'returns the removed node')
self.assertIsNone(b.search(3), 'should have removed 3')
self.assertEqual(b.root[KEY], 1, 'new root is 1')
def test_node_size_gets_modified_on_insertion(self):
b = BST.build([3,1,2,5,4])
self.assertEqual(b.root[SIZE], 5, 'root has size of 5')
b.insert(6)
self.assertEqual(b.root[SIZE], 6, 'new root size is now 6')
self.assertEqual(b.search(1)[SIZE], 2, '1 has size 2')
self.assertEqual(b.search(2)[SIZE], 1, '2 has size 1')
self.assertEqual(b.search(3)[SIZE], 6, '3 has size 6')
self.assertEqual(b.search(4)[SIZE], 1, '4 has size 1')
self.assertEqual(b.search(5)[SIZE], 3, '5 has size 3')
self.assertEqual(b.search(6)[SIZE], 1, '6 has size 1')
def test_node_size_gets_modified_on_deletion(self):
b = BST.build([3,1,2,5,4])
self.assertEqual(b.search(3)[SIZE], 5, '3 has size 6')
b.delete(2) # Node is a leaf.
self.assertEqual(b.search(1)[SIZE], 1, '1 has no more children')
self.assertEqual(b.search(3)[SIZE], 4, 'root has 4 children now')
b.delete(5) # Node is in the middle.
self.assertEqual(b.search(4)[SIZE], 1, 'the size of 1 is unchanged')
self.assertEqual(b.search(3)[SIZE], 3, 'root has 3 children after del')
b.delete(3) # Node is the root.
self.assertEqual(b.search(4)[SIZE], 1, 'the size of 1 is unchanged')
self.assertEqual(b.search(1)[SIZE], 2, 'the new root is 1 and has size of 2')
def test_select(self):
b = BST.build([3,1,2,5,4])
self.assertEqual(b.select(1)[KEY], 1, '1st elem is 1')
self.assertEqual(b.select(2)[KEY], 2, '2nd elem is 2')
self.assertEqual(b.select(3)[KEY], 3, '3rd elem is 3')
self.assertEqual(b.select(4)[KEY], 4, '4th elem is 4')
self.assertEqual(b.select(5)[KEY], 5, '5th elem is 5')
def test_rank(self):
b = BST.build([3,1,2,5,4])
self.assertEqual(b.rank(1), 0, '0 keys smaller than 1')
self.assertEqual(b.rank(2), 1, '1 key smaller than 2')
self.assertEqual(b.rank(3), 2, '2 keys smaller than 3')
self.assertEqual(b.rank(4), 3, '3 keys smaller than 4')
self.assertEqual(b.rank(5), 4, '4 keys smaller than 5')
self.assertIsNone(b.rank(6), 'key 6 does not exist')
def test_rotate(self):
""" Test the following right rotation, switching 5 and 7.
(3) (3)
/ \ / \
(1) (5) (1) (7)
\ / \ => \ / \
(2) (4) (7) (2) (5) (8)
/ \ / \
(6) (8) (4) (6)
"""
b = BST.build([3,1,2,5,4,7,8,6])
b.rotate(b.search(5), RIGHT)
root = b.search(3)
node = b.search(5)
child = b.search(7)
self.assertEqual(root[LEFT][KEY], 1, 'root right child unchanged')
self.assertEqual(root[RIGHT][KEY], 7, '7 swapped places with 5')
self.assertEqual(node[PARENT][KEY], 7, '7 new parent of 5')
self.assertEqual(node[LEFT][KEY], 4, 'left child of 5 remains unchanged')
self.assertEqual(node[RIGHT][KEY], 6, 'left child of 7 becomes new '+
'right child of 5')
self.assertEqual(child[PARENT][KEY], 3, 'new parent of 7 is root')
self.assertEqual(child[LEFT][KEY], 5, 'left child of 7 is now '+
'its old parent 5')
self.assertEqual(child[RIGHT][KEY], 8, '7 old right child is unchanged')
def test_rotate_correctly_updates_sizes(self):
""" Makes sure the rotate operation updates node sizes accordingly.
(3) (3)
/ \ / \
(1) (5) (1) (7)
\ / \ => \ / \
(2) (4) (7) (2) (5) (8)
/ \ / \
(6) (8) (4) (6)
"""
b = BST.build([3,1,2,5,4,7,8,6])
b.rotate(b.search(5), RIGHT)
self.assertEqual(b.search(3)[SIZE], 8, 'root has the same size')
self.assertEqual(b.search(5)[SIZE], 3, 'rotated node has new size')
self.assertEqual(b.search(7)[SIZE], 5, 'rotated node has new size')
def test_rotate_in_isolation(self):
""" Test makes sure the rotation operation works in isolation:
No parent P, no subtrees A,B or C. Here's the tree format:
Schema (for right rotations):
(None) (None)
| |
(2) (3)
/ \ => / \
(None) (3) (2) (None)
/ \ / \
(None) (None) (None) (None)
Schema (for left rotations):
(None) (None)
| |
(3) (2)
/ \ => / \
(2) (None) (A) (3)
/ \ / \
(None) (None) (None) (None)
"""
b1 = BST.build([2,3])
n2 = b1.search(2)
n3 = b1.search(3)
b1.rotate(n2, RIGHT)
self.assertEqual(b1.root[KEY], 3, 'root has changed')
self.assertEqual(n2[PARENT], n3)
self.assertIsNone(n2[LEFT])
self.assertIsNone(n2[RIGHT])
self.assertIsNone(n3[PARENT])
self.assertEqual(n3[LEFT], n2)
self.assertIsNone(n3[RIGHT])
b2 = BST.build([3,2])
n2 = b2.search(2)
n3 = b2.search(3)
b2.rotate(n3, LEFT)
self.assertEqual(b2.root[KEY], 2, 'root has changed')
self.assertIsNone(n2[PARENT])
self.assertIsNone(n2[LEFT])
self.assertEqual(n2[RIGHT], n3)
self.assertEqual(n3[PARENT], n2)
self.assertIsNone(n3[LEFT])
self.assertIsNone(n3[RIGHT])
def test_join(self):
""" Tests the method to join the current tree with another one. """
bst1 = BST.build([1,3,5])
bst2 = BST.build([2,4,6])
joined = BST.join(bst1, bst2)
self.assertTrue(BST.is_binary_search_tree(joined.root),
'should have built a binary search tree')
self.assertEqual(joined.root[KEY], 3)
self.assertEqual(joined.root[SIZE], 3)
self.assertEqual(joined.root[LEFT][KEY], 1)
self.assertEqual(joined.root[LEFT][SIZE], 2)
self.assertEqual(joined.root[LEFT][RIGHT][KEY], 2)
self.assertEqual(joined.root[LEFT][RIGHT][SIZE], 1)
self.assertEqual(joined.root[RIGHT][KEY], 5)
self.assertEqual(joined.root[RIGHT][SIZE], 2)
self.assertEqual(joined.root[RIGHT][LEFT][KEY], 4)
self.assertEqual(joined.root[RIGHT][LEFT][SIZE], 1)
self.assertEqual(joined.root[RIGHT][RIGHT][KEY], 6)
self.assertEqual(joined.root[RIGHT][RIGHT][SIZE], 1)
def test_in_order_traversal(self):
""" Running examples:
(3)
/ \
(1) (5)
/ \ / \
(0) (2) (4) (7)
"""
tree = BST.build([3, 1, 0, 2, 5, 4, 7])
expected = [0, 1, 2, 3, 4, 5, 7]
actual = tree.in_order_traversal()
self.assertEqual(actual, expected, 'in-order traversal')
def test_pre_order_traversal(self):
""" Running examples:
(3)
/ \
(1) (5)
/ \ / \
(0) (2) (4) (7)
"""
tree = BST.build([3, 1, 0, 2, 5, 4, 7])
expected = [3, 1, 0, 2, 5, 4, 7]
actual = tree.pre_order_traversal()
self.assertEqual(actual, expected, 'pre-order traversal')
def test_post_order_traversal(self):
""" Running examples:
(3)
/ \
(1) (5)
/ \ / \
(0) (2) (4) (7)
"""
tree = BST.build([3, 1, 0, 2, 5, 4, 7])
expected = [0, 2, 1, 4, 7, 5, 3]
actual = tree.post_order_traversal()
self.assertEqual(actual, expected, 'post-order traversal')
def test_is_subtree(self):
""" Given the following binary tree:
(3)
/ \
(1) (5)
/ \ / \
(0) (2) (4) (7)
"""
tree = BST.build([3, 1, 0, 2, 5, 4, 7])
subtree1 = BST.build([1, 0, 2]) # Left subtree
subtree2 = BST.build([2]) # A leaf.
subtree3 = BST.build([3, 1, 0, 2, 5, 4, 7]) # The same tree.
subtree4 = BST.build([5, 4, 8]) # Modified right subtree.
self.assertTrue(tree.is_subtree(subtree1), 'the left subtree')
self.assertTrue(tree.is_subtree(subtree2), 'a tree with only leaf')
self.assertTrue(tree.is_subtree(subtree3), 'the same as original tree')
self.assertFalse(tree.is_subtree(subtree4), 'modified right subtree')
def test_is_subtree_in_case_of_duplicate_root_keys(self):
""" Given the following binary tree:
(4)
/ \
(2) (5)
/
(2)
/
(1)
"""
tree = BST.build([4, 2, 2, 1, 5])
subtree = BST.build([2, 1])
actual = tree.is_subtree(subtree)
self.assertTrue(actual, 'should discover the correct subtree')
def test_is_subtree_when_duplicate_key_is_not_immediate_descendant(self):
""" Given the following tree and the lookup subtree:
(3)
/ \
(2) (6) (3)
/ \ \
(3) (7) (5)
\
(5)
"""
tree = BST.build([3, 2, 6, 3, 7, 5, 3])
subtree = BST.build([3, 3])
actual = tree.is_subtree(subtree)
self.assertTrue(actual, 'should discover the correct subtree')
def test_diameter(self):
""" Given the following binary search tree:
(3)
/ \
(2) (4)
/ \
(1) (5)
"""
tree = BST.build([3,2,1,4,5])
actual = tree.diameter()
expected = 5
self.assertEqual(actual, expected, 'should return the path with '+
'the max number of vertices')
def test_unballanced_graph_diameter(self):
""" Given the following binary search tree:
(1)
\
(2)
\
(3)
\
(4)
\
(5)
"""
tree = BST.build([1,2,3,4,5])
actual = tree.diameter()
expected = 5
self.assertEqual(actual, expected, 'should return the path with '+
'the max number of vertices')
def test_is_ballanced_binary_search_tree(self):
""" Test three cases:
(3) (3) (3)
/ \ / \
(1) (7) (1) (7)
\ / \ / \
(2) (5) (10) (5) (10)
/ \
(4) (6)
"""
bst1 = BST.build([3])
bst2 = BST.build([3, 1, 2, 7, 5, 4, 6, 10])
bst3 = BST.build([3, 1, 7, 5, 10])
self.assertTrue(BST.is_ballanced_binary_search_tree(bst1))
self.assertFalse(BST.is_ballanced_binary_search_tree(bst2))
self.assertTrue(BST.is_ballanced_binary_search_tree(bst3))
def test_is_binary_search_tree(self):
""" Construct two trees, a plain one and a binary search tree:
- binary search tree - - non-search-tree -
(3) (3)
/ \ / \
(1) (5) (9) (7)
\ / \ \ / \
(2) (4) (7) (2) (5) (4)
/ \ / \
(6) (8) (10) (6)
"""
n10 = [None, 10, None, None]
n6 = [None, 6, None, None]
n4 = [None, 4, None, None]
n2 = [None, 2, None, None]
n5 = [None, 5, n10, n6]
n10[PARENT] = n5
n6[PARENT] = n5
n7 = [None, 7, n5, n4]
n5[PARENT] = n7
n4[PARENT] = n7
n9 = [None, 9, None, n2]
n2[PARENT] = n9
n3 = [None, 3, n9, n7]
n9[PARENT] = n3
n7[PARENT] = n3
notSearchTree = n3
trueSearchTree = BST.build([3,1,2,5,4,7,8,9]).root
self.assertTrue(BST.is_binary_search_tree(trueSearchTree),
'should detect a correct search tree')
self.assertFalse(BST.is_binary_search_tree(notSearchTree),
'should detect a when a tree is not search tree')
def test_from_sorted(self):
""" Tests construction of a BST from a sorted array. """
a = [1,2,3,4,5,5,6]
tree = BST.from_sorted(a)
self.assertTrue(BST.is_binary_search_tree(tree.root),
'should have built a binary search tree')
self.assertEqual(tree.root[KEY], 4)
self.assertEqual(tree.root[SIZE], 3)
self.assertEqual(tree.root[LEFT][KEY], 2)
self.assertEqual(tree.root[LEFT][SIZE], 2)
self.assertEqual(tree.root[LEFT][LEFT][KEY], 1)
self.assertEqual(tree.root[LEFT][LEFT][SIZE], 1)
self.assertEqual(tree.root[LEFT][RIGHT][KEY], 3)
self.assertEqual(tree.root[LEFT][RIGHT][SIZE], 1)
self.assertEqual(tree.root[RIGHT][KEY], 5)
self.assertEqual(tree.root[RIGHT][SIZE], 2)
self.assertEqual(tree.root[RIGHT][LEFT][KEY], 5)
self.assertEqual(tree.root[RIGHT][LEFT][SIZE], 1)
self.assertEqual(tree.root[RIGHT][RIGHT][KEY], 6)
self.assertEqual(tree.root[RIGHT][RIGHT][SIZE], 1)
def test_from_sorted_with_an_inballanced_tree(self):
""" Tests construction of a BST from a sorted array. """
a = [1,2]
tree = BST.from_sorted(a)
self.assertTrue(BST.is_binary_search_tree(tree.root),
'should have built a binary search tree')
self.assertEqual(tree.root[KEY], 1)
self.assertEqual(tree.root[SIZE], 2)
self.assertEqual(tree.root[RIGHT][KEY], 2)
self.assertEqual(tree.root[RIGHT][SIZE], 1)
|
|
"""
pghoard - main pghoard daemon
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
from __future__ import print_function
from contextlib import closing
import datetime
import json
import logging
import logging.handlers
import os
import psycopg2
import random
import signal
import shutil
import socket
import sys
import time
from . basebackup import PGBaseBackup
from . common import (
create_pgpass_file, get_connection_info,
convert_pg_version_number_to_numeric,
default_log_format_str, set_syslog_handler, Queue)
from . compressor import Compressor
from . errors import InvalidConfigurationError
from . inotify import InotifyWatcher
from . object_storage import TransferAgent, get_object_storage_transfer
from . receivexlog import PGReceiveXLog
from . webserver import WebServer
try:
from systemd import daemon # pylint: disable=no-name-in-module
except ImportError:
daemon = None
def get_basebackup_path(basebackup_path):
for i in range(1000):
final_basebackup_path = os.path.join(basebackup_path, datetime.datetime.utcnow().strftime("%Y-%m-%d") + "_" + str(i))
if not os.path.exists(final_basebackup_path):
os.makedirs(final_basebackup_path)
return final_basebackup_path
class PGHoard(object):
def __init__(self, config_path):
self.log = logging.getLogger("pghoard")
self.log_level = None
self.running = True
self.config_path = config_path
self.compression_queue = Queue()
self.transfer_queue = Queue()
self.syslog_handler = None
self.config = {}
self.site_transfers = {}
self.state = {
"backup_sites": {},
"data_transfer": {},
"startup_time": datetime.datetime.utcnow().isoformat(),
}
self.load_config()
if not os.path.exists(self.config["backup_location"]):
os.makedirs(self.config["backup_location"])
signal.signal(signal.SIGHUP, self.load_config)
signal.signal(signal.SIGINT, self.quit)
signal.signal(signal.SIGTERM, self.quit)
self.time_since_last_backup_check = 0
self.basebackups = {}
self.receivexlogs = {}
self.wal_queue = Queue()
self.compressors = []
self.transfer_agents = []
self.inotify = InotifyWatcher(self.compression_queue)
self.webserver = WebServer(self.config, self.compression_queue, self.transfer_queue)
for _ in range(2):
compressor = Compressor(self.config, self.compression_queue, self.transfer_queue)
self.compressors.append(compressor)
ta = TransferAgent(self.config, self.compression_queue, self.transfer_queue)
self.transfer_agents.append(ta)
if daemon: # If we can import systemd we always notify it
daemon.notify("READY=1")
self.log.info("Sent startup notification to systemd that pghoard is READY")
self.log.info("pghoard initialized, own_hostname: %r, cwd: %r", socket.gethostname(), os.getcwd())
def check_pg_versions_ok(self, pg_version_server, command):
if not pg_version_server or pg_version_server <= 90300:
self.log.error("pghoard does not support versions earlier than 9.3, found: %r", pg_version_server)
self.create_alert_file("version_unsupported_error")
return False
output = os.popen(self.config.get(command + "_path", "/usr/bin/" + command) + " --version").read().strip()
pg_version_client = convert_pg_version_number_to_numeric(output[len(command + " (PostgreSQL) "):])
if pg_version_server != pg_version_client:
# FIXME: should we just check for the same major version?
self.log.error("Server version: %r does not match %s client version: %r",
pg_version_server, command, pg_version_client)
self.create_alert_file("version_mismatch_error")
return False
return True
def create_basebackup(self, cluster, connection_string, basebackup_path):
pg_version_server = self.check_pg_server_version(connection_string)
if not self.check_pg_versions_ok(pg_version_server, "pg_basebackup"):
return
final_basebackup_path = get_basebackup_path(basebackup_path)
command = [
self.config.get("pg_basebackup_path", "/usr/bin/pg_basebackup"),
"--dbname", connection_string,
"--format", "tar",
"--xlog",
"--pgdata", final_basebackup_path,
"--progress",
"--label", "initial_base_backup",
"--verbose",
]
thread = PGBaseBackup(command, final_basebackup_path, self.compression_queue)
thread.start()
self.basebackups[cluster] = thread
def check_pg_server_version(self, connection_string):
pg_version = None
try:
with closing(psycopg2.connect(connection_string)) as c:
pg_version = c.server_version
except psycopg2.OperationalError as ex:
self.log.warning("%s (%s) connecting to DB at: %r",
ex.__class__.__name__, ex, connection_string)
if 'password authentication' in str(ex):
self.create_alert_file("authentication_error")
elif 'pg_hba.conf' in str(ex):
self.create_alert_file("pg_hba_conf_error")
except Exception: # log all errors and return None; pylint: disable=broad-except
self.log.exception("Problem in getting PG server version")
return pg_version
def receivexlog_listener(self, cluster, xlog_location, connection_string, slot):
pg_version_server = self.check_pg_server_version(connection_string)
if not self.check_pg_versions_ok(pg_version_server, "pg_receivexlog"):
return
command = [
self.config.get("pg_receivexlog_path", "/usr/bin/pg_receivexlog"),
"--dbname", connection_string,
"--status-interval", "1",
"--verbose",
"--directory", xlog_location,
]
if pg_version_server >= 90400 and slot:
command.extend(["--slot", slot])
self.inotify.add_watch(xlog_location)
thread = PGReceiveXLog(command)
thread.start()
self.receivexlogs[cluster] = thread
def create_backup_site_paths(self, site):
site_path = os.path.join(self.config["backup_location"], site)
xlog_path = os.path.join(site_path, "xlog")
basebackup_path = os.path.join(site_path, "basebackup")
paths_to_create = [site_path, xlog_path, basebackup_path,
os.path.join(site_path, "compressed_xlog"),
os.path.join(site_path, "compressed_timeline")]
for path in paths_to_create:
if not os.path.exists(path):
os.makedirs(path)
return xlog_path, basebackup_path
def delete_local_wal_before(self, wal_segment, xlog_path):
self.log.debug("Starting WAL deletion from: %r before: %r", xlog_path, wal_segment)
wal_segment_no = int(wal_segment, 16)
while wal_segment_no > 0:
# Note this does not take care of timelines/older PGs
wal_segment_no -= 1
wal_segment = hex(wal_segment_no)[2:].upper().zfill(24)
wal_path = os.path.join(xlog_path, wal_segment)
if not os.path.exists(wal_path):
self.log.debug("wal_path %r not found, returning", wal_path)
break
self.log.debug("Deleting wal_file: %r", wal_path)
os.unlink(wal_path)
def delete_remote_wal_before(self, wal_segment, site):
self.log.debug("Starting WAL deletion from: %r before: %r", site, wal_segment)
wal_segment_no = int(wal_segment, 16)
storage = self.site_transfers.get(site)
while True:
# Note this does not take care of timelines/older PGs
wal_segment_no -= 1
wal_segment = hex(wal_segment_no)[2:].upper().zfill(24)
wal_path = "%s/xlog/%s" % (site, wal_segment)
self.log.debug("Deleting wal_file: %r", wal_path)
try:
if not storage.delete_key(wal_path):
self.log.debug("Could not delete wal_file: %r, returning", wal_path)
break
except: # FIXME: don't catch all exceptions; pylint: disable=bare-except
self.log.exception("Problem deleting: %r", wal_path)
def delete_remote_basebackup(self, site, basebackup):
storage = self.site_transfers.get(site)
try:
storage.delete_key(basebackup)
except: # FIXME: don't catch all exceptions; pylint: disable=bare-except
self.log.exception("Problem deleting: %r", basebackup)
def get_local_basebackups_info(self, basebackup_path):
m_time, metadata = 0, {}
basebackups = sorted(os.listdir(basebackup_path))
if len(basebackups) > 0:
m_time = os.stat(os.path.join(basebackup_path, basebackups[-1])).st_mtime
with open(os.path.join(basebackup_path, basebackups[-1], "pghoard_metadata"), "r") as fp:
metadata = json.load(fp)
return basebackups, m_time, metadata
def get_remote_basebackups_info(self, site):
basebackups, m_time, metadata = [], 0, {}
storage = self.site_transfers.get(site)
if not storage:
obs_key, obs_value = self.config['backup_sites'][site]['object_storage'].copy().popitem()
storage = get_object_storage_transfer(obs_key, obs_value)
self.site_transfers[site] = storage
results = storage.list_path(site + "/basebackup/")
if results:
basebackups_dict = dict((basebackup['name'], basebackup) for basebackup in results)
basebackups = sorted(basebackups_dict.keys())
basebackup = basebackups_dict[basebackups[-1]]
m_time = basebackup['last_modified'].timestamp()
metadata = basebackup['metadata']
return basebackups, m_time, metadata
def check_backup_count_and_state(self, site, basebackup_path, xlog_path):
allowed_basebackup_count = self.config['backup_sites'][site]['basebackup_count']
remote = False
if 'object_storage' in self.config['backup_sites'][site] and self.config['backup_sites'][site]['object_storage']:
basebackups, m_time, metadata = self.get_remote_basebackups_info(site)
remote = True
else:
basebackups, m_time, metadata = self.get_local_basebackups_info(basebackup_path)
self.log.debug("Found %r basebackups, m_time: %r, metadata: %r", basebackups, m_time, metadata)
if len(basebackups) >= allowed_basebackup_count:
self.log.warning("Too many basebackups: %d>%d, %r, starting to get rid of %r",
len(basebackups), allowed_basebackup_count, basebackups, basebackups[0])
last_wal_segment_still_needed = metadata['start-wal-segment']
if not remote:
self.delete_local_wal_before(last_wal_segment_still_needed, xlog_path)
basebackup_to_be_deleted = os.path.join(basebackup_path, basebackups[0])
shutil.rmtree(basebackup_to_be_deleted)
else:
self.delete_remote_wal_before(last_wal_segment_still_needed, site)
self.delete_remote_basebackup(site, basebackups[0])
self.state["backup_sites"][site]['basebackups'] = basebackups
time_since_last_backup = time.time() - m_time
return time_since_last_backup
def set_state_defaults(self, site):
if site not in self.state:
self.state['backup_sites'][site] = {"basebackups": []}
def startup_walk_for_missed_files(self):
for site in self.config["backup_sites"]:
xlog_path, basebackup_path = self.create_backup_site_paths(site) # pylint: disable=unused-variable
for filename in os.listdir(xlog_path):
if not filename.endswith(".partial"):
compression_event = {"type": "CREATE",
"full_path": os.path.join(xlog_path, filename),
"site": site,
"delete_file_after_compression": True}
self.log.debug("Found: %r when starting up, adding to compression queue", compression_event)
self.compression_queue.put(compression_event)
def start_threads_on_startup(self):
# Startup threads
self.inotify.start()
self.webserver.start()
for compressor in self.compressors:
compressor.start()
for ta in self.transfer_agents:
ta.start()
def get_passwordless_connection_string(self, chosen_backup_node):
"""Process the input chosen_backup_node entry which may be a libpq
connection string or uri, or a dict containing key:value pairs of
connection info entries or just the connection string with a
replication slot name. Create a pgpass entry for this in case it
contains a password and return a libpq-format connection string
without the password in it and a possible replication slot."""
slot = None
if isinstance(chosen_backup_node, dict):
chosen_backup_node = chosen_backup_node.copy()
slot = chosen_backup_node.pop("slot", None)
if list(chosen_backup_node) == ["connection_string"]:
# if the dict only contains the `connection_string` key use it as-is
chosen_backup_node = chosen_backup_node["connection_string"]
# make sure it's a replication connection to the host
# pointed by the key using the "replication" pseudo-db
connection_info = get_connection_info(chosen_backup_node)
connection_info["dbname"] = "replication"
connection_info["replication"] = "true"
connection_string = create_pgpass_file(self.log, connection_info)
return connection_string, slot
def run(self):
self.start_threads_on_startup()
self.startup_walk_for_missed_files()
while self.running:
for site, site_config in self.config['backup_sites'].items():
self.set_state_defaults(site)
xlog_path, basebackup_path = self.create_backup_site_paths(site)
if time.time() - self.time_since_last_backup_check > 3600:
time_since_last_backup = self.check_backup_count_and_state(site, basebackup_path, xlog_path)
self.time_since_last_backup_check = time.time()
chosen_backup_node = random.choice(site_config["nodes"])
if site not in self.receivexlogs and site_config.get("active_backup_mode") == "pg_receivexlog":
# Create a pg_receivexlog listener for all sites
connection_string, slot = self.get_passwordless_connection_string(chosen_backup_node)
self.receivexlog_listener(site, xlog_path, connection_string, slot)
if time_since_last_backup > self.config['backup_sites'][site]['basebackup_interval_hours'] * 3600 \
and site not in self.basebackups:
self.log.debug("Starting to create a new basebackup for: %r since time from previous: %r",
site, time_since_last_backup)
connection_string, slot = self.get_passwordless_connection_string(chosen_backup_node)
self.create_basebackup(site, connection_string, basebackup_path)
self.write_backup_state_to_json_file()
time.sleep(5.0)
def write_backup_state_to_json_file(self):
"""Periodically write a JSON state file to disk"""
start_time = time.time()
state_file_path = self.config.get("json_state_file_path", "/tmp/pghoard_state.json")
self.state["pg_receivexlogs"] = dict((key, {"latest_activity": value.latest_activity.isoformat(), "running": value.running})
for key, value in self.receivexlogs.items())
self.state["pg_basebackups"] = dict((key, {"latest_activity": value.latest_activity.isoformat(), "running": value.running})
for key, value in self.basebackups.items())
self.state["compressors"] = [compressor.state for compressor in self.compressors]
self.state["transfer_agents"] = [ta.state for ta in self.transfer_agents]
self.state["queues"] = {
"compression_queue": self.compression_queue.qsize(),
"transfer_queue": self.transfer_queue.qsize(),
}
json_to_dump = json.dumps(self.state, indent=4)
self.log.debug("Writing JSON state file to: %r, file_size: %r", state_file_path, len(json_to_dump))
with open(state_file_path + ".tmp", "w") as fp:
fp.write(json_to_dump)
os.rename(state_file_path + ".tmp", state_file_path)
self.log.debug("Wrote JSON state file to disk, took %.4fs", time.time() - start_time)
def create_alert_file(self, filename):
filepath = os.path.join(self.config.get("alert_file_dir", os.getcwd()), filename)
self.log.debug("Creating alert file: %r", filepath)
with open(filepath, "w") as fp:
fp.write("alert")
def delete_alert_file(self, filename):
filepath = os.path.join(self.config.get("alert_file_dir", os.getcwd()), filename)
if os.path.exists(filepath):
self.log.debug("Deleting alert file: %r", filepath)
os.unlink(filepath)
def load_config(self, _signal=None, _frame=None):
self.log.debug("Loading JSON config from: %r, signal: %r, frame: %r",
self.config_path, _signal, _frame)
try:
with open(self.config_path, "r") as fp:
self.config = json.load(fp)
except (IOError, ValueError) as ex:
self.log.exception("Invalid JSON config %r: %s", self.config_path, ex)
# if we were called by a signal handler we'll ignore (and log)
# the error and hope the user fixes the configuration before
# restarting pghoard.
if _signal is not None:
return
raise InvalidConfigurationError(self.config_path)
if self.config.get("syslog") and not self.syslog_handler:
self.syslog_handler = set_syslog_handler(self.config.get("syslog_address", "/dev/log"),
self.config.get("syslog_facility", "local2"),
self.log)
# the levelNames hack is needed for Python2.6
if sys.version_info[0] >= 3:
self.log_level = getattr(logging, self.config.get("log_level", "DEBUG"))
else:
self.log_level = logging._levelNames[self.config.get("log_level", "DEBUG")] # pylint: disable=no-member,protected-access
try:
self.log.setLevel(self.log_level)
except ValueError:
self.log.exception("Problem with log_level: %r", self.log_level)
# we need the failover_command to be converted into subprocess [] format
self.log.debug("Loaded config: %r from: %r", self.config, self.config_path)
def quit(self, _signal=None, _frame=None):
self.log.warning("Quitting, signal: %r, frame: %r", _signal, _frame)
self.running = False
self.inotify.running = False
for receivexlog in self.receivexlogs.values():
receivexlog.running = False
for compressor in self.compressors:
compressor.running = False
for ta in self.transfer_agents:
ta.running = False
self.webserver.close()
def main(argv):
logging.basicConfig(level=logging.INFO, format=default_log_format_str)
if len(argv) != 2:
print("Usage: {} <config filename>".format(argv[0]))
return 1
if not os.path.exists(argv[1]):
print("{}: {!r} doesn't exist".format(argv[0], argv[1]))
return 1
try:
pghoard = PGHoard(sys.argv[1])
except InvalidConfigurationError as ex:
print("{}: failed to load config {}".format(argv[0], ex))
return 1
return pghoard.run()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from functools import partial
import operator as op
from dateutil.relativedelta import relativedelta
import empyrical as ep
import numpy as np
import pandas as pd
from six import iteritems
from zipline.utils.exploding_object import NamedExplodingObject
from zipline.finance._finance_ext import minute_annual_volatility
class SimpleLedgerField(object):
"""Emit the current value of a ledger field every bar or every session.
Parameters
----------
ledger_field : str
The ledger field to read.
packet_field : str, optional
The name of the field to populate in the packet. If not provided,
``ledger_field`` will be used.
"""
def __init__(self, ledger_field, packet_field=None):
self._get_ledger_field = op.attrgetter(ledger_field)
if packet_field is None:
self._packet_field = ledger_field.rsplit('.', 1)[-1]
else:
self._packet_field = packet_field
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
packet['minute_perf'][self._packet_field] = self._get_ledger_field(
ledger,
)
def end_of_session(self,
packet,
ledger,
session,
session_ix,
data_portal):
packet['daily_perf'][self._packet_field] = self._get_ledger_field(
ledger,
)
class DailyLedgerField(object):
"""Like :class:`~zipline.finance.metrics.metric.SimpleLedgerField` but
also puts the current value in the ``cumulative_perf`` section.
Parameters
----------
ledger_field : str
The ledger field to read.
packet_field : str, optional
The name of the field to populate in the packet. If not provided,
``ledger_field`` will be used.
"""
def __init__(self, ledger_field, packet_field=None):
self._get_ledger_field = op.attrgetter(ledger_field)
if packet_field is None:
self._packet_field = ledger_field.rsplit('.', 1)[-1]
else:
self._packet_field = packet_field
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
field = self._packet_field
packet['cumulative_perf'][field] = packet['minute_perf'][field] = (
self._get_ledger_field(ledger)
)
def end_of_session(self,
packet,
ledger,
session,
session_ix,
data_portal):
field = self._packet_field
packet['cumulative_perf'][field] = packet['daily_perf'][field] = (
self._get_ledger_field(ledger)
)
class StartOfPeriodLedgerField(object):
"""Keep track of the value of a ledger field at the start of the period.
Parameters
----------
ledger_field : str
The ledger field to read.
packet_field : str, optional
The name of the field to populate in the packet. If not provided,
``ledger_field`` will be used.
"""
def __init__(self, ledger_field, packet_field=None):
self._get_ledger_field = op.attrgetter(ledger_field)
if packet_field is None:
self._packet_field = ledger_field.rsplit('.', 1)[-1]
else:
self._packet_field = packet_field
def start_of_simulation(self,
ledger,
emission_rate,
trading_calendar,
sessions,
benchmark_source):
self._start_of_simulation = self._get_ledger_field(ledger)
def start_of_session(self, ledger, session, data_portal):
self._previous_day = self._get_ledger_field(ledger)
def _end_of_period(self, sub_field, packet, ledger):
packet_field = self._packet_field
packet['cumulative_perf'][packet_field] = self._start_of_simulation
packet[sub_field][packet_field] = self._previous_day
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
self._end_of_period('minute_perf', packet, ledger)
def end_of_session(self,
packet,
ledger,
session,
session_ix,
data_portal):
self._end_of_period('daily_perf', packet, ledger)
class Returns(object):
"""Tracks the daily and cumulative returns of the algorithm.
"""
def _end_of_period(field,
packet,
ledger,
dt,
session_ix,
data_portal):
packet[field]['returns'] = ledger.todays_returns
packet['cumulative_perf']['returns'] = ledger.portfolio.returns
packet['cumulative_risk_metrics']['algorithm_period_return'] = (
ledger.portfolio.returns
)
end_of_bar = partial(_end_of_period, 'minute_perf')
end_of_session = partial(_end_of_period, 'daily_perf')
class BenchmarkReturnsAndVolatility(object):
"""Tracks daily and cumulative returns for the benchmark as well as the
volatility of the benchmark returns.
"""
def start_of_simulation(self,
ledger,
emission_rate,
trading_calendar,
sessions,
benchmark_source):
daily_returns_series = benchmark_source.daily_returns(
sessions[0],
sessions[-1],
)
self._daily_returns = daily_returns_array = daily_returns_series.values
self._daily_cumulative_returns = (
np.cumprod(1 + daily_returns_array) - 1
)
self._daily_annual_volatility = (
daily_returns_series.expanding(2).std(ddof=1) * np.sqrt(252)
).values
if emission_rate == 'daily':
self._minute_cumulative_returns = NamedExplodingObject(
'self._minute_cumulative_returns',
'does not exist in daily emission rate',
)
self._minute_annual_volatility = NamedExplodingObject(
'self._minute_annual_volatility',
'does not exist in daily emission rate',
)
else:
open_ = trading_calendar.session_open(sessions[0])
close = trading_calendar.session_close(sessions[-1])
returns = benchmark_source.get_range(open_, close)
self._minute_cumulative_returns = (
(1 + returns).cumprod() - 1
)
self._minute_annual_volatility = pd.Series(
minute_annual_volatility(
returns.index.normalize().view('int64'),
returns.values,
daily_returns_array,
),
index=returns.index,
)
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
r = self._minute_cumulative_returns[dt]
if np.isnan(r):
r = None
packet['cumulative_risk_metrics']['benchmark_period_return'] = r
v = self._minute_annual_volatility[dt]
if np.isnan(v):
v = None
packet['cumulative_risk_metrics']['benchmark_volatility'] = v
def end_of_session(self,
packet,
ledger,
session,
session_ix,
data_portal):
r = self._daily_cumulative_returns[session_ix]
if np.isnan(r):
r = None
packet['cumulative_risk_metrics']['benchmark_period_return'] = r
v = self._daily_annual_volatility[session_ix]
if np.isnan(v):
v = None
packet['cumulative_risk_metrics']['benchmark_volatility'] = v
class PNL(object):
"""Tracks daily and cumulative PNL.
"""
def start_of_simulation(self,
ledger,
emission_rate,
trading_calendar,
sessions,
benchmark_source):
self._previous_pnl = 0.0
def start_of_session(self, ledger, session, data_portal):
self._previous_pnl = ledger.portfolio.pnl
def _end_of_period(self, field, packet, ledger):
pnl = ledger.portfolio.pnl
packet[field]['pnl'] = pnl - self._previous_pnl
packet['cumulative_perf']['pnl'] = ledger.portfolio.pnl
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
self._end_of_period('minute_perf', packet, ledger)
def end_of_session(self,
packet,
ledger,
session,
session_ix,
data_portal):
self._end_of_period('daily_perf', packet, ledger)
class CashFlow(object):
"""Tracks daily and cumulative cash flow.
Notes
-----
For historical reasons, this field is named 'capital_used' in the packets.
"""
def start_of_simulation(self,
ledger,
emission_rate,
trading_calendar,
sessions,
benchmark_source):
self._previous_cash_flow = 0.0
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
cash_flow = ledger.portfolio.cash_flow
packet['minute_perf']['capital_used'] = (
cash_flow - self._previous_cash_flow
)
packet['cumulative_perf']['capital_used'] = cash_flow
def end_of_session(self,
packet,
ledger,
session,
session_ix,
data_portal):
cash_flow = ledger.portfolio.cash_flow
packet['daily_perf']['capital_used'] = (
cash_flow - self._previous_cash_flow
)
packet['cumulative_perf']['capital_used'] = cash_flow
self._previous_cash_flow = cash_flow
class Orders(object):
"""Tracks daily orders.
"""
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
packet['minute_perf']['orders'] = ledger.orders(dt)
def end_of_session(self,
packet,
ledger,
dt,
session_ix,
data_portal):
packet['daily_perf']['orders'] = ledger.orders()
class Transactions(object):
"""Tracks daily transactions.
"""
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
packet['minute_perf']['transactions'] = ledger.transactions(dt)
def end_of_session(self,
packet,
ledger,
dt,
session_ix,
data_portal):
packet['daily_perf']['transactions'] = ledger.transactions()
class Positions(object):
"""Tracks daily positions.
"""
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
packet['minute_perf']['positions'] = ledger.positions(dt)
def end_of_session(self,
packet,
ledger,
dt,
session_ix,
data_portal):
packet['daily_perf']['positions'] = ledger.positions()
class ReturnsStatistic(object):
"""A metric that reports an end of simulation scalar or time series
computed from the algorithm returns.
Parameters
----------
function : callable
The function to call on the daily returns.
field_name : str, optional
The name of the field. If not provided, it will be
``function.__name__``.
"""
def __init__(self, function, field_name=None):
if field_name is None:
field_name = function.__name__
self._function = function
self._field_name = field_name
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
res = self._function(ledger.daily_returns_array[:session_ix + 1])
if not np.isfinite(res):
res = None
packet['cumulative_risk_metrics'][self._field_name] = res
end_of_session = end_of_bar
class AlphaBeta(object):
"""End of simulation alpha and beta to the benchmark.
"""
def start_of_simulation(self,
ledger,
emission_rate,
trading_calendar,
sessions,
benchmark_source):
self._daily_returns_array = benchmark_source.daily_returns(
sessions[0],
sessions[-1],
).values
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
risk = packet['cumulative_risk_metrics']
alpha, beta = ep.alpha_beta_aligned(
ledger.daily_returns_array[:session_ix + 1],
self._daily_returns_array[:session_ix + 1],
)
if not np.isfinite(alpha):
alpha = None
if np.isnan(beta):
beta = None
risk['alpha'] = alpha
risk['beta'] = beta
end_of_session = end_of_bar
class MaxLeverage(object):
"""Tracks the maximum account leverage.
"""
def start_of_simulation(self, *args):
self._max_leverage = 0.0
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
self._max_leverage = max(self._max_leverage, ledger.account.leverage)
packet['cumulative_risk_metrics']['max_leverage'] = self._max_leverage
end_of_session = end_of_bar
class NumTradingDays(object):
"""Report the number of trading days.
"""
def start_of_simulation(self, *args):
self._num_trading_days = 0
def start_of_session(self, *args):
self._num_trading_days += 1
def end_of_bar(self,
packet,
ledger,
dt,
session_ix,
data_portal):
packet['cumulative_risk_metrics']['trading_days'] = (
self._num_trading_days
)
end_of_session = end_of_bar
class _ConstantCumulativeRiskMetric(object):
"""A metric which does not change, ever.
Notes
-----
This exists to maintain the existing structure of the perf packets. We
should kill this as soon as possible.
"""
def __init__(self, field, value):
self._field = field
self._value = value
def end_of_bar(self, packet, *args):
packet['cumulative_risk_metrics'][self._field] = self._value
def end_of_session(self, packet, *args):
packet['cumulative_risk_metrics'][self._field] = self._value
class PeriodLabel(object):
"""Backwards compat, please kill me.
"""
def start_of_session(self, ledger, session, data_portal):
self._label = session.strftime('%Y-%m')
def end_of_bar(self, packet, *args):
packet['cumulative_risk_metrics']['period_label'] = self._label
end_of_session = end_of_bar
class _ClassicRiskMetrics(object):
"""Produces original risk packet.
"""
def start_of_simulation(self,
ledger,
emission_rate,
trading_calendar,
sessions,
benchmark_source):
self._leverages = np.full_like(sessions, np.nan, dtype='float64')
def end_of_session(self,
packet,
ledger,
dt,
session_ix,
data_portal):
self._leverages[session_ix] = ledger.account.leverage
@classmethod
def risk_metric_period(cls,
start_session,
end_session,
algorithm_returns,
benchmark_returns,
algorithm_leverages):
"""
Creates a dictionary representing the state of the risk report.
Parameters
----------
start_session : pd.Timestamp
Start of period (inclusive) to produce metrics on
end_session : pd.Timestamp
End of period (inclusive) to produce metrics on
algorithm_returns : pd.Series(pd.Timestamp -> float)
Series of algorithm returns as of the end of each session
benchmark_returns : pd.Series(pd.Timestamp -> float)
Series of benchmark returns as of the end of each session
algorithm_leverages : pd.Series(pd.Timestamp -> float)
Series of algorithm leverages as of the end of each session
Returns
-------
risk_metric : dict[str, any]
Dict of metrics that with fields like:
{
'algorithm_period_return': 0.0,
'benchmark_period_return': 0.0,
'treasury_period_return': 0,
'excess_return': 0.0,
'alpha': 0.0,
'beta': 0.0,
'sharpe': 0.0,
'sortino': 0.0,
'period_label': '1970-01',
'trading_days': 0,
'algo_volatility': 0.0,
'benchmark_volatility': 0.0,
'max_drawdown': 0.0,
'max_leverage': 0.0,
}
"""
algorithm_returns = algorithm_returns[
(algorithm_returns.index >= start_session) &
(algorithm_returns.index <= end_session)
]
# Benchmark needs to be masked to the same dates as the algo returns
benchmark_returns = benchmark_returns[
(benchmark_returns.index >= start_session) &
(benchmark_returns.index <= algorithm_returns.index[-1])
]
benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]
alpha, beta = ep.alpha_beta_aligned(
algorithm_returns.values,
benchmark_returns.values,
)
benchmark_volatility = ep.annual_volatility(benchmark_returns)
sharpe = ep.sharpe_ratio(algorithm_returns)
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(sharpe):
sharpe = 0.0
sortino = ep.sortino_ratio(
algorithm_returns.values,
_downside_risk=ep.downside_risk(algorithm_returns.values),
)
rval = {
'algorithm_period_return': algorithm_period_returns,
'benchmark_period_return': benchmark_period_returns,
'treasury_period_return': 0,
'excess_return': algorithm_period_returns,
'alpha': alpha,
'beta': beta,
'sharpe': sharpe,
'sortino': sortino,
'period_label': end_session.strftime("%Y-%m"),
'trading_days': len(benchmark_returns),
'algo_volatility': ep.annual_volatility(algorithm_returns),
'benchmark_volatility': benchmark_volatility,
'max_drawdown': ep.max_drawdown(algorithm_returns.values),
'max_leverage': algorithm_leverages.max(),
}
# check if a field in rval is nan or inf, and replace it with None
# except period_label which is always a str
return {
k: (
None
if k != 'period_label' and not np.isfinite(v) else
v
)
for k, v in iteritems(rval)
}
@classmethod
def _periods_in_range(cls,
months,
end_session,
end_date,
algorithm_returns,
benchmark_returns,
algorithm_leverages,
months_per):
if months.size < months_per:
return
end_date = end_date.tz_convert(None)
for period_timestamp in months:
period = period_timestamp.to_period(freq='%dM' % months_per)
if period.end_time > end_date:
break
yield cls.risk_metric_period(
start_session=period.start_time,
end_session=min(period.end_time, end_session),
algorithm_returns=algorithm_returns,
benchmark_returns=benchmark_returns,
algorithm_leverages=algorithm_leverages,
)
@classmethod
def risk_report(cls,
algorithm_returns,
benchmark_returns,
algorithm_leverages):
start_session = algorithm_returns.index[0]
end_session = algorithm_returns.index[-1]
end = end_session.replace(day=1) + relativedelta(months=1)
months = pd.date_range(
start=start_session,
# Ensure we have at least one month
end=end - datetime.timedelta(days=1),
freq='M',
tz='utc',
)
periods_in_range = partial(
cls._periods_in_range,
months=months,
end_session=end_session.tz_convert(None),
end_date=end,
algorithm_returns=algorithm_returns,
benchmark_returns=benchmark_returns,
algorithm_leverages=algorithm_leverages,
)
return {
'one_month': list(periods_in_range(months_per=1)),
'three_month': list(periods_in_range(months_per=3)),
'six_month': list(periods_in_range(months_per=6)),
'twelve_month': list(periods_in_range(months_per=12)),
}
def end_of_simulation(self,
packet,
ledger,
trading_calendar,
sessions,
data_portal,
benchmark_source):
packet.update(self.risk_report(
algorithm_returns=ledger.daily_returns_series,
benchmark_returns=benchmark_source.daily_returns(
sessions[0],
sessions[-1],
),
algorithm_leverages=self._leverages,
))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, random, string, requests
LOGGING = True
BASE_URL = "http://localhost:8080"
API_VERSION = "v1.4.0"
DL_TOKEN = { 'Authorization' : 'DirectLogin token=' }
CONTENT_JSON = { 'content-type' : 'application/json' }
def setCounterParty(bank_id, account_id,counterparty_id, counterparty_iban):
global COUNTERPARTY_BANK, OUR_COUNTERPARTY, OUR_COUNTERPARTY_ID, OUR_COUNTERPARTY_IBAN
COUNTERPARTY_BANK = bank_id
OUR_COUNTERPARTY = account_id
OUR_COUNTERPARTY_ID = counterparty_id
OUR_COUNTERPARTY_IBAN = counterparty_iban
def setPaymentDetails(currency,value):
global OUR_CURRENCY, OUR_VALUE
OUR_CURRENCY = currency
OUR_VALUE =value
def setBaseUrl(u):
global BASE_URL
BASE_URL = u
def setToken(t):
global DL_TOKEN
DL_TOKEN = { 'Authorization' : 'DirectLogin token=%s' % t}
def setApiVersion(v):
global API_VERSION
API_VERSION = v
# Helper function to merge headers
def mergeHeaders(x, y):
z = x.copy()
z.update(y)
return z
# Logger
def log(m):
if LOGGING:
print(m)
# Login as user
def login(username, password, consumer_key):
login_url = '{0}/my/logins/direct'.format(BASE_URL)
login_header = { 'Authorization' : 'DirectLogin username="%s",password="%s",consumer_key="%s"' % (username, password, consumer_key)}
# Login and receive authorized token
log('Login as {0} to {1}'.format(login_header, login_url))
r = requests.post(login_url, headers=login_header)
if (r.status_code != 201):
log("error: could not login")
log("text: " + r.text)
return r.text
t = r.json()['token']
log("Received token: {0}".format(t))
setToken(t)
return t
# Request a meeting
def requestMeeting(purpose_id, provider_id):
post_data = {
'purpose_id' : '%s' % purpose_id,
'provider_id' : '%s' % provider_id
}
# Send post request with attached json
response = requests.post(u"{0}/obp/{1}/banks/THE_BANK/meetings".format(BASE_URL, API_VERSION), json=post_data, headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
# Print result
log("code=" + response.status_code + " text=" + response.text)
return response.json()
def getCounterBankId():
return COUNTERPARTY_BANK
def getCounterpartyAccountId():
return OUR_COUNTERPARTY
def getCounterpartyId():
return OUR_COUNTERPARTY_ID
def getCounterpartyIban():
return OUR_COUNTERPARTY_IBAN
# Get banks
def getBanks():
# Prepare headers
response = requests.get(u"{0}/obp/{1}/banks".format(BASE_URL, API_VERSION), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()['banks']
# Get currently logged in user
def getCurrentUser():
# Prepare headers
response = requests.get(u"{0}/obp/{1}/users/current".format(BASE_URL, API_VERSION), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()
# Create an user
def createUser(fname, lname, email, username, password):
post_data = {
'last_name' : '%s' % lname,
'first_name' : '%s' % fname,
'email' : '%s' % email,
'username' : '%s' % username,
'password' : '%s' % password
}
# Send post request with attached json
response = requests.post(u"{0}/obp/{1}/users".format(BASE_URL, API_VERSION), json=post_data, headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
# Log result
log("code=" + response.status_code + " text=" + response.text)
return response.json()
# Get all user's private accounts
def getPrivateAccounts(bank):
# Prepare headers
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/private".format(BASE_URL, API_VERSION, bank), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()['accounts']
# Get a single account
def getAccount(bank, account):
# Prepare headers
response = requests.get(u"{0}/obp/{1}/my/banks/{2}/accounts/{3}/account".format(BASE_URL, API_VERSION, bank, account), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()
# Get owner's transactions
def getTransactions(bank, account):
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transactions".format(BASE_URL, API_VERSION, bank, account), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()['transactions']
# Get Transaction by Id.
def getTransaction(bank_id, account_id, transaction_id):
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transactions/{4}/transaction".format(BASE_URL, API_VERSION, bank_id, account_id, transaction_id), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()
# Get challenge types
def getChallengeTypes(bank, account):
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transaction-request-types".format(BASE_URL, API_VERSION, bank, account), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
types = response.json()['transaction_request_types']
res = []
for type in types:
res.append(type['value'])
return res
# Answer the challenge
def answerChallenge(bank, account, transation_req_id, challenge_query):
body = '{"id": "' + challenge_query + '","answer": "123456"}' #any number works in sandbox mode
response = requests.post(u"{0}/obp/v1.4.0/banks/{1}/accounts/{2}/owner/transaction-request-types/sandbox/transaction-requests/{3}/challenge".format(
BASE_URL, bank, account, transation_req_id), data=body, headers=mergeHeaders(DL_TOKEN, CONTENT_JSON)
)
return response.json()
def getTransactionRequest(bank, account, transation_req_id):
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transactions".format(BASE_URL, API_VERSION, bank, account), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json
def initiateTransactionRequest(bank, account, challenge_type, cp_bank, cp_account):
send_to = {"bank": cp_bank, "account": cp_account}
payload = '{"to": {"account_id": "' + send_to['account'] +'", "bank_id": "' + send_to['bank'] + \
'"}, "value": {"currency": "' + OUR_CURRENCY + '", "amount": "' + OUR_VALUE + '"}, "description": "Description abc", "challenge_type" : "' + \
challenge_type + '"}'
response = requests.post(u"{0}/obp/v2.2.0/banks/{1}/accounts/{2}/owner/transaction-request-types/{3}/transaction-requests".format(BASE_URL, bank, account, challenge_type), data=payload, headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()
# Create counterparty, input data format:
# {
# "name": "test3",
# "other_account_routing_scheme": "BankAccountID",
# "other_account_routing_address": "1000203892",
# "other_bank_routing_scheme": "BankId",
# "other_bank_routing_address": "00100",
# "other_branch_routing_scheme": "OBP",
# "other_branch_routing_address": "Berlin",
# "is_beneficiary": true
# }
def createCounterparty(bank_id,
account_id,
name,
other_account_routing_scheme,
other_account_routing_address,
other_bank_routing_scheme,
other_bank_routing_address):
post_data = {
'name' : '%s' % name,
'other_account_routing_scheme' : '%s' % other_account_routing_scheme ,
'other_account_routing_address': '%s' % other_account_routing_address,
'other_bank_routing_scheme' : '%s' % other_bank_routing_scheme,
'other_bank_routing_address' : '%s' % other_bank_routing_address,
'other_branch_routing_scheme' : 'OBP', # not useful now, set default value
'other_branch_routing_address' : 'Berlin', # not useful now, set default value
'is_beneficiary' : True
}
# Send post request with attached json
response = requests.post(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/counterparties".format(BASE_URL, API_VERSION, bank_id, account_id), json=post_data, headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()
# Get all entitlements
def getAllEntitlements():
response = requests.get(u"{0}/obp/{1}/entitlements".format(BASE_URL, API_VERSION), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()
# Get user's entitlements
def getEntitlements(user, bank):
response = requests.get(u"{0}/obp/{1}/banks/{2}/users/{3}/entitlements".format(BASE_URL, API_VERSION, bank, user), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()
# Add system role to user
def addRole(role, user):
post_data = {
'bank_id' : '',
'role_name' : '%s' % role
}
# Send post request with attached json
response = requests.post(u"{0}/obp/{1}/users/{2}/entitlements".format(BASE_URL, API_VERSION, user), json=post_data, headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
# Log result
return response.text
# Add entitlement to user
def addEntitlement(entitlement, user, bank=''):
post_data = {
'bank_id' : '%s' % bank,
'role_name' : '%s' % entitlement
}
# Send post request with attached json
response = requests.post(u"{0}/obp/{1}/users/{2}/entitlements".format(BASE_URL, API_VERSION, user), json=post_data, headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
# Log result
return response.text
# Answer Transaction Request Challenge. - V210
def answerChallengeV210(bank_id,
account_id,
transation_req_id,
challenge_type,
challenge_query):
body = '{"id": "' + challenge_query + '","answer": "123456"}' # any number works in sandbox mode
response = requests.post( u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transaction-request-types/{4}/transaction-requests/{5}/challenge".format(
BASE_URL, API_VERSION, bank_id, account_id, challenge_type,
transation_req_id), data=body,
headers=mergeHeaders(DL_TOKEN, CONTENT_JSON)
)
return response.json()
# Create Transaction Request. - V210
# Note : previous called 'initiateTransactionRequest', now keep it the same and OBP-API endpoint name
def createTransactionRequestV210(from_bank_id,
from_account_id,
transaction_request_type,
to_bank_id,
to_account_id,
to_counterparty_id,
to_counterparty_iban):
if(transaction_request_type== "SANDBOX_TAN"):
send_to = {"bank": to_bank_id, "account": to_account_id}
payload = '{"to": {"account_id": "' + send_to['account'] +'", "bank_id": "' + send_to['bank'] + \
'"}, "value": {"currency": "' + OUR_CURRENCY + '", "amount": "' + OUR_VALUE + '"}, "description": "Description abc"}'
elif(transaction_request_type== "SEPA"):
send_to = {"iban": to_counterparty_iban}
payload = '{"to": {"iban": "' + send_to['iban'] +\
'"}, "value": {"currency": "' + OUR_CURRENCY + '", "amount": "' + OUR_VALUE + '"}, "description": "Description abc", "charge_policy" : "' + \
"SHARED" + '"}'
elif (transaction_request_type== "COUNTERPARTY"):
send_to = {"counterparty_id": to_counterparty_id}
payload = '{"to": {"counterparty_id": "' + send_to['counterparty_id'] + \
'"}, "value": {"currency": "' + OUR_CURRENCY + '", "amount": "' + OUR_VALUE + '"}, "description": "Description abc", "charge_policy" : "' + \
"SHARED" + '"}'
else: # FREE_FORM
send_to = {"bank": to_bank_id, "account": to_account_id}
payload = '{"to": {"account_id": "' + send_to['account'] +'", "bank_id": "' + send_to['bank'] + \
'"}, "value": {"currency": "' + OUR_CURRENCY + '", "amount": "' + OUR_VALUE + '"}, "description": "Description abc", "challenge_type" : "' + \
transaction_request_type + '"}'
response = requests.post(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transaction-request-types/{4}/transaction-requests".format(BASE_URL, API_VERSION, from_bank_id, from_account_id, transaction_request_type), data=payload, headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()
# Get Counterparties of one Account.-V220
def getCounterparties(bank, account):
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/counterparties".format(BASE_URL, API_VERSION, bank, account), headers=mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()['counterparties']
# define some help print transaction methods status
def printMessageNoChallenge(response):
if "error" in response:
print("The result is: {0}".format(response))
# sys.exit("Got an error: " + str(response))
print("There was no challenge, transaction was created immediately:")
print("The response is : {0}".format(response))
print("Transaction status: {0}".format(response['status']))
print("Transaction id is created: {0}".format(response["transaction_ids"]))
def printMessageWithChallenge(response):
if "error" in response:
print("The result is: {0}".format(response))
# sys.exit("Got an error: " + str(response))
print("There was a challenge, transaction was interrupted, the transaction_request is 'INITIATED' and new Transaction id is null:")
print("The response is: {0}".format(response))
print("Transaction status: {0}".format(response['status']))
print("New Transaction ID created: {0}".format(response["transaction_ids"]))
def printMessageAfterAnswerChallenge(response):
if "error" in response:
print("The result is: {0}".format(response))
# sys.exit("Got an error: " + str(response))
print("Transaction is done , and the transaction_request is 'COMPLETED' and new Transaction id is created: :")
print("The result is: {0}".format(response))
print("Transaction status: {0}".format(response['status']))
print("New Transaction ID created: {0}".format(response["transaction_ids"]))
def printGetTransactions(response):
if "error" in response:
sys.exit("Got an error: " + str(response))
print("Print all the transactions : ")
count=0
for transaction in response:
count=count + 1
print (str(count) +":"+str(transaction))
def printGetTransaction(response, newTransactionId):
if "error" in response:
sys.exit("Got an error: " + str(response))
print("Check wther the new transactionId{0} is exsting".format(newTransactionId))
print("The result is: {0}".format(response))
def printCreateCounterparty(response):
if "error" in response:
sys.exit("Got an error: " + str(response))
print("Counterparty is created:")
print("The result is: {0}".format(response))
def printGetCounterparties(response):
if "error" in response:
sys.exit("Got an error: " + str(response))
print("Print all the counterparties : ")
count=0
for transaction in response:
count=count + 1
print (str(count) +":"+str(transaction))
def printGetAccount(response):
if "error" in response:
sys.exit("Got an error: " + str(response))
print("The account detail is: {0}".format(response))
|
|
# -*- coding: utf-8 -*-
from .grammar import SchemaGrammar
from ..blueprint import Blueprint
from ...query.expression import QueryExpression
from ...support.fluent import Fluent
class MySqlSchemaGrammar(SchemaGrammar):
_modifiers = [
'unsigned', 'charset', 'collate', 'nullable',
'default', 'increment', 'comment', 'after'
]
_serials = ['big_integer', 'integer',
'medium_integer', 'small_integer', 'tiny_integer']
def compile_table_exists(self):
"""
Compile the query to determine if a table exists
:rtype: str
"""
return 'SELECT * FROM information_schema.tables WHERE table_schema = %s AND table_name = %s'
def compile_column_exists(self, table):
"""
Compile the query to determine the list of columns.
"""
return 'SELECT column_name FROM information_schema.columns WHERE table_name = %s' % table
def compile_create(self, blueprint, command, connection):
"""
Compile a create table command.
"""
columns = ', '.join(self._get_columns(blueprint))
sql = 'CREATE TABLE %s (%s)' % (self.wrap_table(blueprint), columns)
sql = self._compile_create_encoding(sql, connection, blueprint)
if blueprint.engine:
sql += ' ENGINE = %s' % blueprint.engine
return sql
def _compile_create_encoding(self, sql, connection, blueprint):
"""
Append the character set specifications to a command.
:type sql: str
:type connection: orator.connections.Connection
:type blueprint: Blueprint
:rtype: str
"""
charset = blueprint.charset or connection.get_config('charset')
if charset:
sql += ' DEFAULT CHARACTER SET %s' % charset
collation = blueprint.collation or connection.get_config('collation')
if collation:
sql += ' COLLATE %s' % collation
return sql
def compile_add(self, blueprint, command, _):
table = self.wrap_table(blueprint)
columns = self.prefix_list('ADD', self._get_columns(blueprint))
return 'ALTER TABLE %s %s' % (table, ', '.join(columns))
def compile_primary(self, blueprint, command, _):
command.name = None
return self._compile_key(blueprint, command, 'PRIMARY KEY')
def compile_unique(self, blueprint, command, _):
return self._compile_key(blueprint, command, 'UNIQUE')
def compile_index(self, blueprint, command, _):
return self._compile_key(blueprint, command, 'INDEX')
def _compile_key(self, blueprint, command, type):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s ADD %s %s(%s)' % (table, type, command.index, columns)
def compile_drop(self, blueprint, command, _):
return 'DROP TABLE %s' % self.wrap_table(blueprint)
def compile_drop_if_exists(self, blueprint, command, _):
return 'DROP TABLE IF EXISTS %s' % self.wrap_table(blueprint)
def compile_drop_column(self, blueprint, command, connection):
columns = self.prefix_list('DROP', self.wrap_list(command.columns))
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s %s' % (table, ', '.join(columns))
def compile_drop_primary(self, blueprint, command, _):
return 'ALTER TABLE %s DROP PRIMARY KEY'\
% self.wrap_table(blueprint)
def compile_drop_unique(self, blueprint, command, _):
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s DROP INDEX %s' % (table, command.index)
def compile_drop_index(self, blueprint, command, _):
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s DROP INDEX %s' % (table, command.index)
def compile_drop_foreign(self, blueprint, command, _):
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s DROP FOREIGN KEY %s' % (table, command.index)
def compile_rename(self, blueprint, command, _):
from_ = self.wrap_table(blueprint)
return 'RENAME TABLE %s TO %s' % (from_, self.wrap_table(command.to))
def _type_char(self, column):
return "CHAR(%s)" % column.length
def _type_string(self, column):
return "VARCHAR(%s)" % column.length
def _type_text(self, column):
return 'TEXT'
def _type_medium_text(self, column):
return 'MEDIUMTEXT'
def _type_long_text(self, column):
return 'LONGTEXT'
def _type_integer(self, column):
return 'INT'
def _type_big_integer(self, column):
return 'BIGINT'
def _type_medium_integer(self, column):
return 'MEDIUMINT'
def _type_tiny_integer(self, column):
return 'TINYINT'
def _type_small_integer(self, column):
return 'SMALLINT'
def _type_float(self, column):
return self._type_double(column)
def _type_double(self, column):
if column.total and column.places:
return 'DOUBLE(%s, %s)' % (column.total, column.places)
return 'DOUBLE'
def _type_decimal(self, column):
return 'DECIMAL(%s, %s)' % (column.total, column.places)
def _type_boolean(self, column):
return 'TINYINT(1)'
def _type_enum(self, column):
return 'ENUM(\'%s\')' % '\', \''.join(column.allowed)
def _type_json(self, column):
return 'TEXT'
def _type_date(self, column):
return 'DATE'
def _type_datetime(self, column):
return 'DATETIME'
def _type_time(self, column):
return 'TIME'
def _type_timestamp(self, column):
if getattr(column, 'nullable', False):
return 'TIMESTAMP DEFAULT 0'
return 'TIMESTAMP'
def _type_binary(self, column):
return 'BLOB'
def _modify_unsigned(self, blueprint, column):
if column.get('unsigned', False):
return ' UNSIGNED'
return ''
def _modify_charset(self, blueprint, column):
if column.get('charset'):
return ' CHARACTER SET ' + column.charset
return ''
def _modify_collate(self, blueprint, column):
if column.get('collation'):
return ' COLLATE ' + column.collation
return ''
def _modify_nullable(self, blueprint, column):
if column.get('nullable'):
return ' NULL'
return ' NOT NULL'
def _modify_default(self, blueprint, column):
if column.get('default') is not None:
return ' DEFAULT %s' % self._get_default_value(column.default)
return ''
def _modify_increment(self, blueprint, column):
if column.type in self._serials and column.auto_increment:
return ' AUTO_INCREMENT PRIMARY KEY'
return ''
def _modify_after(self, blueprint, column):
if column.get('after') is not None:
return ' AFTER ' + self.wrap(column.after)
return ''
def _modify_comment(self, blueprint, column):
if column.get('comment') is not None:
return ' COMMENT "%s"' % column.comment
return ''
def _wrap_value(self, value):
if value == '*':
return value
return '`%s`' % value.replace('`', '``')
|
|
'''@file gmm.py
contains the functionality for Kaldi GMM training, aligning and testing'''
from abc import ABCMeta, abstractproperty
import os
class GMM(object):
'''an abstract class for a kaldi GMM'''
__metaclass__ = ABCMeta
def __init__(self, conf):
'''
KaldiGMM constructor
Args:
conf: the general configurations
'''
self.conf = conf
def train(self):
'''train the GMM'''
#save the current dir
current_dir = os.getcwd()
#go to kaldi egs dir
os.chdir(self.conf.get('directories', 'kaldi_egs'))
#train the GMM
os.system('%s --cmd %s --config %s/config/%s %s %s %s %s %s' %(
self.trainscript, self.conf.get('general', 'cmd'), current_dir,
self.conf_file, self.trainops,
self.conf.get('directories', 'train_features')
+ '/' + self.conf.get('gmm-features', 'name'),
self.conf.get('directories', 'language'),
self.parent_gmm_alignments,
self.conf.get('directories', 'expdir') + '/' + self.name))
#build the decoding graphs
os.system('utils/mkgraph.sh %s %s %s %s/graph' % (
self.graphopts, self.conf.get('directories', 'language_test'),
self.conf.get('directories', 'expdir') + '/' + self.name,
self.conf.get('directories', 'expdir') + '/' + self.name))
#go back to working dir
os.chdir(current_dir)
def align(self):
'''use the GMM to align the training utterances'''
#save the current dir
current_dir = os.getcwd()
#go to kaldi egs dir
os.chdir(self.conf.get('directories', 'kaldi_egs'))
#do the alignment
os.system('''steps/align_si.sh --nj %s --cmd %s
--config %s/config/ali_%s %s %s %s %s/ali''' % (
self.conf.get('general', 'num_jobs'),
self.conf.get('general', 'cmd'), current_dir, self.conf_file,
self.conf.get('directories', 'train_features') + '/'
+ self.conf.get('gmm-features', 'name'),
self.conf.get('directories', 'language'),
self.conf.get('directories', 'expdir') + '/' + self.name,
self.conf.get('directories', 'expdir') + '/' + self.name))
#convert alignments (transition-ids) to pdf-ids
for i in range(int(self.conf.get('general', 'num_jobs'))):
os.system('''gunzip -c %s/ali/ali.%d.gz | ali-to-pdf
%s/ali/final.mdl ark:- ark,t:- | gzip > %s/ali/pdf.%d.gz''' % (
self.conf.get('directories', 'expdir') + '/' + self.name,
i+1, self.conf.get('directories', 'expdir') + '/'
+ self.name, self.conf.get('directories', 'expdir')
+ '/' + self.name, i+1))
#go back to working dir
os.chdir(current_dir)
def test(self):
'''test the GMM on the testing set'''
#save the current dir
current_dir = os.getcwd()
#go to kaldi egs dir
os.chdir(self.conf.get('directories', 'kaldi_egs'))
os.system('''steps/decode.sh --cmd %s --nj %s %s/graph %s %s/decode
| tee %s/decode.log || exit 1;''' % (
self.conf.get('general', 'cmd'),
self.conf.get('general', 'num_jobs'),
self.conf.get('directories', 'expdir') + '/' + self.name,
self.conf.get('directories', 'test_features') + '/'
+ self.conf.get('gmm-features', 'name'),
self.conf.get('directories', 'expdir') + '/' + self.name,
self.conf.get('directories', 'expdir') + '/' + self.name))
#go back to working dir
os.chdir(current_dir)
@abstractproperty
def name(self):
'''the name of the GMM'''
pass
@abstractproperty
def trainscript(self):
'''the script used for training the GMM'''
pass
@abstractproperty
def conf_file(self):
'''the configuration file for this GMM'''
pass
@abstractproperty
def parent_gmm_alignments(self):
'''the path to the parent GMM model (empty for monophone GMM)'''
pass
@abstractproperty
def trainops(self):
'''the extra options for GMM training'''
pass
@abstractproperty
def graphopts(self):
'''the extra options for the decoding graph creation'''
pass
class MonoGmm(GMM):
''' a class for the monophone GMM'''
@property
def name(self):
return self.conf.get('mono_gmm', 'name')
@property
def trainscript(self):
return 'steps/train_mono.sh'
@property
def conf_file(self):
return 'mono.conf'
@property
def parent_gmm_alignments(self):
return ''
@property
def trainops(self):
return '--nj %s' % self.conf.get('general', 'num_jobs')
@property
def graphopts(self):
return '--mono'
class TriGmm(GMM):
'''a class for the triphone GMM'''
@property
def name(self):
return self.conf.get('tri_gmm', 'name')
@property
def trainscript(self):
return 'steps/train_deltas.sh'
@property
def conf_file(self):
return 'tri.conf'
@property
def parent_gmm_alignments(self):
return (self.conf.get('directories', 'expdir') + '/'
+ self.conf.get('mono_gmm', 'name') + '/ali')
@property
def trainops(self):
return (self.conf.get('tri_gmm', 'num_leaves') + ' '
+ self.conf.get('tri_gmm', 'tot_gauss'))
@property
def graphopts(self):
return ''
class LdaGmm(GMM):
'''a class for the LDA+MLLT GMM'''
@property
def name(self):
return self.conf.get('lda_mllt', 'name')
@property
def trainscript(self):
return 'steps/train_lda_mllt.sh'
@property
def conf_file(self):
return 'lda_mllt.conf'
@property
def parent_gmm_alignments(self):
return (self.conf.get('directories', 'expdir') + '/'
+ self.conf.get('tri_gmm', 'name') + '/ali')
@property
def trainops(self):
return '--context-opts "--context_width=%s"'% (
self.conf.get('lda_mllt', 'context_width') + ' '
+ self.conf.get('lda_mllt', 'num_leaves') + ' '
+ self.conf.get('lda_mllt', 'tot_gauss'))
@property
def graphopts(self):
return ''
|
|
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2010-2015, Michigan State University.
# Copyright (C) 2015-2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: [email protected]
# pylint: disable=too-few-public-methods,no-init,missing-docstring
"""This is khmer; please see http://khmer.readthedocs.io/."""
from collections import namedtuple
from math import log
import json
from khmer._khmer import Read
from khmer._khmer import forward_hash
# tests/test_{functions,countgraph,counting_single}.py
from khmer._khmer import forward_hash_no_rc # tests/test_functions.py
from khmer._khmer import reverse_hash # tests/test_functions.py
# tests/counting_single.py
from khmer._khmer import hash_murmur3 # tests/test_functions.py
from khmer._khmer import hash_no_rc_murmur3 # tests/test_functions.py
from khmer._khmer import reverse_complement
from khmer._khmer import get_version_cpp as __version_cpp__
# tests/test_version.py
from khmer._khmer import ReadParser # sandbox/to-casava-1.8-fastq.py
# tests/test_read_parsers.py,scripts/{filter-abund-single,load-graph}.py
# scripts/{abundance-dist-single,load-into-counting}.py
from khmer._khmer import FILETYPES
from khmer._oxli.graphs import (Counttable, QFCounttable, Nodetable,
CyclicCounttable,
SmallCounttable, Countgraph, SmallCountgraph,
Nodegraph)
from khmer._oxli.labeling import GraphLabels
from khmer._oxli.legacy_partitioning import SubsetPartition, PrePartitionInfo
from khmer._oxli.parsing import FastxParser
from khmer._oxli.readaligner import ReadAligner
from khmer._oxli.utils import get_n_primes_near_x, is_prime
import sys
from struct import pack, unpack
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
_buckets_per_byte = {
# calculated by hand from settings in third-part/cqf/gqf.h
'qfcounttable': 1 / 1.26,
'countgraph': 1,
'smallcountgraph': 2,
'nodegraph': 8,
}
def extract_nodegraph_info(filename):
"""Open the given nodegraph file and return a tuple of information.
Returns: the k-mer size, the table size, the number of tables, the version
of the table format, and the type of table flag.
Keyword argument:
filename -- the name of the nodegraph file to inspect
"""
ksize = None
n_tables = None
table_size = None
signature = None
version = None
ht_type = None
occupied = None
uint_size = len(pack('I', 0))
uchar_size = len(pack('B', 0))
ulonglong_size = len(pack('Q', 0))
try:
with open(filename, 'rb') as nodegraph:
signature, = unpack('4s', nodegraph.read(4))
version, = unpack('B', nodegraph.read(1))
ht_type, = unpack('B', nodegraph.read(1))
ksize, = unpack('I', nodegraph.read(uint_size))
n_tables, = unpack('B', nodegraph.read(uchar_size))
occupied, = unpack('Q', nodegraph.read(ulonglong_size))
table_size, = unpack('Q', nodegraph.read(ulonglong_size))
if signature != b"OXLI":
raise ValueError("Node graph '{}' is missing file type "
"signature".format(filename) + str(signature))
except:
raise ValueError("Node graph '{}' is corrupt ".format(filename))
return ksize, round(table_size, -2), n_tables, version, ht_type, occupied
def extract_countgraph_info(filename):
"""Open the given countgraph file and return a tuple of information.
Return: the k-mer size, the table size, the number of tables, the bigcount
flag, the version of the table format, and the type of table flag.
Keyword argument:
filename -- the name of the countgraph file to inspect
"""
CgInfo = namedtuple("CgInfo", ['ksize', 'n_tables', 'table_size',
'use_bigcount', 'version', 'ht_type',
'n_occupied'])
ksize = None
n_tables = None
table_size = None
signature = None
version = None
ht_type = None
use_bigcount = None
occupied = None
uint_size = len(pack('I', 0))
ulonglong_size = len(pack('Q', 0))
try:
with open(filename, 'rb') as countgraph:
signature, = unpack('4s', countgraph.read(4))
version, = unpack('B', countgraph.read(1))
ht_type, = unpack('B', countgraph.read(1))
if ht_type != FILETYPES['SMALLCOUNT']:
use_bigcount, = unpack('B', countgraph.read(1))
else:
use_bigcount = None
ksize, = unpack('I', countgraph.read(uint_size))
n_tables, = unpack('B', countgraph.read(1))
occupied, = unpack('Q', countgraph.read(ulonglong_size))
table_size, = unpack('Q', countgraph.read(ulonglong_size))
if signature != b'OXLI':
raise ValueError("Count graph file '{}' is missing file type "
"signature. ".format(filename) + str(signature))
except:
raise ValueError("Count graph file '{}' is corrupt ".format(filename))
return CgInfo(ksize, n_tables, round(table_size, -2), use_bigcount,
version, ht_type, occupied)
def calc_expected_collisions(graph, force=False, max_false_pos=.2):
"""Do a quick & dirty expected collision rate calculation on a graph.
Also check to see that collision rate is within threshold.
Keyword argument:
graph: the countgraph or nodegraph object to inspect
"""
sizes = graph.hashsizes()
n_ht = float(len(sizes))
occupancy = float(graph.n_occupied())
min_size = min(sizes)
fp_one = occupancy / min_size
fp_all = fp_one ** n_ht
if fp_all > max_false_pos:
print("**", file=sys.stderr)
print("** ERROR: the graph structure is too small for ",
file=sys.stderr)
print("** this data set. Increase data structure size",
file=sys.stderr)
print("** with --max_memory_usage/-M.", file=sys.stderr)
print("**", file=sys.stderr)
print("** Do not use these results!!", file=sys.stderr)
print("**", file=sys.stderr)
print("** (estimated false positive rate of %.3f;" % fp_all,
file=sys.stderr, end=' ')
print("max recommended %.3f)" % max_false_pos, file=sys.stderr)
print("**", file=sys.stderr)
if not force:
sys.exit(1)
return fp_all
from khmer._oxli.assembly import (LinearAssembler, SimpleLabeledAssembler,
JunctionCountAssembler)
from khmer._oxli.hashset import HashSet
from khmer._oxli.hllcounter import HLLCounter
from khmer._oxli.labeling import GraphLabels
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.cast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import platform
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class CastOpTest(test.TestCase):
def _toDataType(self, dtype):
"""Returns TensorFlow data type for numpy type."""
if dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
elif dtype == np.bool:
return dtypes.bool
elif dtype == np.complex64:
return dtypes.complex64
elif dtype == np.complex128:
return dtypes.complex128
else:
return None
def _cast(self, x, dtype, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
return math_ops.cast(val, self._toDataType(dtype), name="cast").eval()
def _test(self, x, dtype, use_gpu=False):
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
np_ans = x.astype(dtype)
tf_ans = self._cast(x, dtype, use_gpu)
self.assertAllEqual(np_ans, tf_ans)
def _testTypes(self, x, use_gpu=False):
"""Tests cast(x) to different tf."""
if use_gpu:
type_list = [
np.float32, np.float64, np.int64, np.complex64, np.complex128
]
else:
type_list = [
np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]
for from_type in type_list:
for to_type in type_list:
self._test(x.astype(from_type), to_type, use_gpu)
self._test(x.astype(np.bool), np.float32, use_gpu)
self._test(x.astype(np.uint8), np.float32, use_gpu)
if not use_gpu:
self._test(x.astype(np.bool), np.int32, use_gpu)
self._test(x.astype(np.int32), np.int32, use_gpu)
def _testAll(self, x):
self._testTypes(x, use_gpu=False)
if x.dtype == np.float32 or x.dtype == np.float64:
self._testTypes(x, use_gpu=True)
@test_util.run_deprecated_v1
def testBasic(self):
self._testAll(np.arange(-10, 10).reshape(2, 10))
self._testAll(np.linspace(-10, 10, 17))
@test_util.run_deprecated_v1
def testSmallValues(self):
f4 = np.finfo(np.float32)
f8 = np.finfo(np.float64)
self._testAll(
np.array([
0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
-f8.resolution
]))
def testBfloat16(self):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.cached_session(use_gpu=False):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
with self.cached_session(use_gpu=True):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
@test_util.run_deprecated_v1
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
# Special values like int32max, int64min, inf, -inf, nan casted to
# integer values in somewhat unexpected ways. And they behave
# differently on CPU and GPU.
def _compare(self, x, dst_dtype, expected, use_gpu=False):
np.testing.assert_equal(
self._cast(
x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
@test_util.run_deprecated_v1
def testIntToFloatBoundary(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(i4.min, np.float32, i4.min, False)
self._compare(i4.max, np.float32, i4.max, False)
self._compare(i8.min, np.float32, i8.min, False)
self._compare(i8.max, np.float32, i8.max, False)
self._compare(i4.min, np.float64, i4.min, False)
self._compare(i4.max, np.float64, i4.max, False)
self._compare(i8.min, np.float64, i8.min, False)
self._compare(i8.max, np.float64, i8.max, False)
# NOTE: GPU does not support int32/int64 for casting.
@test_util.run_deprecated_v1
def testInfNan(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(np.inf, np.float32, np.inf, False)
self._compare(np.inf, np.float64, np.inf, False)
if sys.byteorder == "big":
self._compare(np.inf, np.int32, i4.max, False)
self._compare(np.inf, np.int64, i8.max, False)
else:
# np.float64("np.inf").astype(np.int32) is negative on x86 but positive on ppc64le
# Numpy link to relevant discussion - https://github.com/numpy/numpy/issues/9040
# Tensorflow link to relevant discussion - https://github.com/tensorflow/tensorflow/issues/9360
if platform.machine() == "ppc64le":
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
else:
self._compare(np.inf, np.int32, i4.min, False)
self._compare(np.inf, np.int64, i8.min, False)
self._compare(-np.inf, np.float32, -np.inf, False)
self._compare(-np.inf, np.float64, -np.inf, False)
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
self._compare(np.nan, np.int32, i4.min, False)
self._compare(np.nan, np.int64, i8.min, False)
self._compare(np.inf, np.float32, np.inf, True)
self._compare(np.inf, np.float64, np.inf, True)
self._compare(-np.inf, np.float32, -np.inf, True)
self._compare(-np.inf, np.float64, -np.inf, True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
def _OpError(self, x, dtype, err):
with self.cached_session():
with self.assertRaisesOpError(err):
math_ops.cast(x, dtype).eval()
def testNotImplemented(self):
self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")
@test_util.run_deprecated_v1
def testCastToTypeOfVariable(self):
with self.cached_session() as sess:
x = variables.Variable(5, dtype=dtypes.float32)
y = variables.Variable(True, dtype=dtypes.bool)
cast = math_ops.cast(y, x.dtype)
variables.global_variables_initializer().run()
self.assertEqual(1.0, self.evaluate(cast))
@test_util.run_deprecated_v1
def testGradients(self):
t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
for src_t in t:
for dst_t in t:
with self.cached_session():
x = constant_op.constant(1.0, src_t)
z = array_ops.identity(x)
y = math_ops.cast(z, dst_t)
err = gradient_checker.compute_gradient_error(x, [], y, [])
self.assertLess(err, 1e-3)
class SparseTensorCastTest(test.TestCase):
@test_util.run_deprecated_v1
def testCast(self):
indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
values = constant_op.constant(np.array([1, 2, 3], np.int64))
shape = constant_op.constant([3], dtypes.int64)
st = sparse_tensor.SparseTensor(indices, values, shape)
st_cast = math_ops.cast(st, dtypes.float32)
with self.cached_session():
self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
self.assertAllEqual(st_cast.values.eval(),
np.array([1, 2, 3], np.float32))
self.assertAllEqual(st_cast.dense_shape.eval(), [3])
class SaturateCastTest(test.TestCase):
def testSaturate(self):
in_types = dtypes.float32,
out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
with self.cached_session() as sess:
for in_type in in_types:
for out_type in out_types:
lo, hi = in_type.min, in_type.max
x = constant_op.constant(
[lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
y = math_ops.saturate_cast(x, dtype=out_type)
self.assertEqual(y.dtype, out_type)
x, y = self.evaluate([x, y])
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
self.assertAllEqual(correct, y)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configure build environment for certain Intel platforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import subprocess
BASIC_BUILD_OPTS = ["--cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0", "--copt=-O3"]
SECURE_BUILD_OPTS = [
"--copt=-Wformat", "--copt=-Wformat-security", "--copt=-fstack-protector",
"--copt=-fPIC", "--copt=-fpic", "--linkopt=-znoexecstack",
"--linkopt=-zrelro", "--linkopt=-znow", "--linkopt=-fstack-protector"
]
class IntelPlatform(object):
min_gcc_major_version_ = 0
min_gcc_minor_version_ = 0
host_gcc_major_version_ = 0
host_gcc_minor_version_ = 0
BAZEL_PREFIX_ = "--copt="
ARCH_PREFIX_ = "-march="
FLAG_PREFIX_ = "-m"
def __init__(self, min_gcc_major_version, min_gcc_minor_version):
self.min_gcc_minor_version_ = min_gcc_minor_version
self.min_gcc_major_version_ = min_gcc_major_version
# Return True or False depending on whether
# The platform optimization flags can be generated by
# the gcc version specified in the parameters
def set_host_gcc_version(self, gcc_major_version, gcc_minor_version):
# True only if the gcc version in the tuple is >=
# min_gcc_major_version_, min_gcc_minor_version_
if gcc_major_version < self.min_gcc_major_version_:
print("Your MAJOR version of GCC is too old: {}; "
"it must be at least {}.{}".format(gcc_major_version,
self.min_gcc_major_version_,
self.min_gcc_minor_version_))
return False
elif gcc_major_version == self.min_gcc_major_version_ and \
gcc_minor_version < self.min_gcc_minor_version_:
print("Your MINOR version of GCC is too old: {}; "
"it must be at least {}.{}".format(gcc_minor_version,
self.min_gcc_major_version_,
self.min_gcc_minor_version_))
return False
print("gcc version OK: {}.{}".format(gcc_major_version, gcc_minor_version))
self.host_gcc_major_version_ = gcc_major_version
self.host_gcc_minor_version_ = gcc_minor_version
return True
# return a string with all the necessary bazel formatted flags for this
# platform in this gcc environment
def get_bazel_gcc_flags(self):
raise NotImplementedError(self)
# Returns True if the host gcc version is older than the gcc version in which
# the new march flag became available.
# Specify the version in which the new name usage began
def use_old_arch_names(self, gcc_new_march_major_version,
gcc_new_march_minor_version):
if self.host_gcc_major_version_ < gcc_new_march_major_version:
return True
elif self.host_gcc_major_version_ == gcc_new_march_major_version and \
self.host_gcc_minor_version_ < gcc_new_march_minor_version:
return True
return False
class NehalemPlatform(IntelPlatform):
def __init__(self):
IntelPlatform.__init__(self, 4, 8)
def get_bazel_gcc_flags(self):
NEHALEM_ARCH_OLD = "corei7"
NEHALEM_ARCH_NEW = "nehalem"
if self.use_old_arch_names(4, 9):
return self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
NEHALEM_ARCH_OLD + " "
else:
return self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
NEHALEM_ARCH_NEW + " "
class SandyBridgePlatform(IntelPlatform):
def __init__(self):
IntelPlatform.__init__(self, 4, 8)
def get_bazel_gcc_flags(self):
SANDYBRIDGE_ARCH_OLD = "corei7-avx"
SANDYBRIDGE_ARCH_NEW = "sandybridge"
if self.use_old_arch_names(4, 9):
return self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
SANDYBRIDGE_ARCH_OLD + " "
else:
return self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
SANDYBRIDGE_ARCH_NEW + " "
class HaswellPlatform(IntelPlatform):
def __init__(self):
IntelPlatform.__init__(self, 4, 8)
def get_bazel_gcc_flags(self):
HASWELL_ARCH_OLD = "core-avx2" # Only missing the POPCNT instruction
HASWELL_ARCH_NEW = "haswell"
POPCNT_FLAG = "popcnt"
if self.use_old_arch_names(4, 9):
ret_val = self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
HASWELL_ARCH_OLD + " "
return ret_val + self.BAZEL_PREFIX_ + self.FLAG_PREFIX_ + \
POPCNT_FLAG + " "
else:
return self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
HASWELL_ARCH_NEW + " "
class SkylakePlatform(IntelPlatform):
def __init__(self):
IntelPlatform.__init__(self, 4, 9)
def get_bazel_gcc_flags(self):
SKYLAKE_ARCH_OLD = "broadwell" # Only missing the POPCNT instruction
SKYLAKE_ARCH_NEW = "skylake-avx512"
# the flags that broadwell is missing: pku, clflushopt, clwb, avx512vl,
# avx512bw, avx512dq. xsavec and xsaves are available in gcc 5.x
# but for now, just exclude them.
AVX512_FLAGS = ["avx512f", "avx512cd"]
if self.use_old_arch_names(6, 1):
ret_val = self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
SKYLAKE_ARCH_OLD + " "
for flag in AVX512_FLAGS:
ret_val += self.BAZEL_PREFIX_ + self.FLAG_PREFIX_ + flag + " "
return ret_val
else:
return self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
SKYLAKE_ARCH_NEW + " "
class CascadelakePlatform(IntelPlatform):
def __init__(self):
IntelPlatform.__init__(self, 8, 3)
def get_bazel_gcc_flags(self):
CASCADELAKE_ARCH_OLD = "skylake-avx512" # Only missing the POPCNT instruction
CASCADELAKE_ARCH_NEW = "cascadelake"
# the flags that broadwell is missing: pku, clflushopt, clwb, avx512vl, avx512bw, avx512dq
VNNI_FLAG = "avx512vnni"
if IntelPlatform.use_old_arch_names(self, 9, 1):
ret_val = self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
CASCADELAKE_ARCH_OLD + " "
return ret_val + self.BAZEL_PREFIX_ + slef.FLAG_PREFIX_ + \
VNNI_FLAG + " "
else:
return self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
CASCADELAKE_ARCH_NEW + " "
class BuildEnvSetter(object):
"""Prepares the proper environment settings for various Intel platforms."""
default_platform_ = "haswell"
PLATFORMS_ = {
"nehalem": NehalemPlatform(),
"sandybridge": SandyBridgePlatform(),
"haswell": HaswellPlatform(),
"skylake": SkylakePlatform(),
"cascadelake": CascadelakePlatform()
}
def __init__(self):
self.args = None
self.bazel_flags_ = "build "
self.target_platform_ = None
# Return a tuple of the current gcc version
def get_gcc_version(self):
gcc_major_version = 0
gcc_minor_version = 0
# check to see if gcc is present
gcc_path = ""
gcc_path_cmd = "command -v gcc"
try:
gcc_path = subprocess.check_output(gcc_path_cmd, shell=True,
stderr=subprocess.STDOUT).\
strip()
print("gcc located here: {}".format(gcc_path))
if not os.access(gcc_path, os.F_OK | os.X_OK):
raise ValueError(
"{} does not exist or is not executable.".format(gcc_path))
gcc_output = subprocess.check_output(
[gcc_path, "-dumpfullversion", "-dumpversion"],
stderr=subprocess.STDOUT).strip()
# handle python2 vs 3 (bytes vs str type)
if isinstance(gcc_output, bytes):
gcc_output = gcc_output.decode("utf-8")
print("gcc version: {}".format(gcc_output))
gcc_info = gcc_output.split(".")
gcc_major_version = int(gcc_info[0])
gcc_minor_version = int(gcc_info[1])
except subprocess.CalledProcessException as e:
print("Problem getting gcc info: {}".format(e))
gcc_major_version = 0
gcc_minor_version = 0
return gcc_major_version, gcc_minor_version
def parse_args(self):
"""Set up argument parser, and parse CLI args."""
arg_parser = argparse.ArgumentParser(
description="Parse the arguments for the "
"TensorFlow build environment "
" setter")
arg_parser.add_argument(
"--disable-mkl",
dest="disable_mkl",
help="Turn off MKL. By default the compiler flag "
"--config=mkl is enabled.",
action="store_true")
arg_parser.add_argument(
"--disable-v2",
dest="disable_v2",
help="Don't build TensorFlow v2. By default the "
" compiler flag --config=v2 is enabled.",
action="store_true")
arg_parser.add_argument(
"--enable-bfloat16",
dest="enable_bfloat16",
help="Enable bfloat16 build. By default it is "
" disabled if no parameter is passed.",
action="store_true")
arg_parser.add_argument(
"-s",
"--secure-build",
dest="secure_build",
help="Enable secure build flags.",
action="store_true")
arg_parser.add_argument(
"-p",
"--platform",
choices=self.PLATFORMS_.keys(),
help="The target platform.",
dest="target_platform",
default=self.default_platform_)
arg_parser.add_argument(
"-f",
"--bazelrc-file",
dest="bazelrc_file",
help="The full path to the bazelrc file into which "
"the build command will be written. The path "
"will be relative to the container "
" environment.",
required=True)
self.args = arg_parser.parse_args()
def validate_args(self):
# Check the bazelrc file
if os.path.exists(self.args.bazelrc_file):
if os.path.isfile(self.args.bazelrc_file):
self._debug("The file {} exists and will be deleted.".format(
self.args.bazelrc_file))
elif os.path.isdir(self.args.bazelrc_file):
print("You can't write bazel config to \"{}\" "
"because it is a directory".format(self.args.bazelrc_file))
return False
# Validate gcc with the requested platform
gcc_major_version, gcc_minor_version = self.get_gcc_version()
if gcc_major_version == 0 or \
not self.target_platform_.set_host_gcc_version(
gcc_major_version, gcc_minor_version):
return False
return True
def set_build_args(self):
"""Generate Bazel build flags."""
for flag in BASIC_BUILD_OPTS:
self.bazel_flags_ += "{} ".format(flag)
if self.args.secure_build:
for flag in SECURE_BUILD_OPTS:
self.bazel_flags_ += "{} ".format(flag)
if not self.args.disable_mkl:
self.bazel_flags_ += "--config=mkl "
if not self.args.disable_v2:
self.bazel_flags_ += "--config=v2 "
if self.args.enable_bfloat16:
self.bazel_flags_ += "--copt=-DENABLE_INTEL_MKL_BFLOAT16 "
self.bazel_flags_ += self.target_platform_.get_bazel_gcc_flags()
def write_build_args(self):
self._debug("Writing build flags: {}".format(self.bazel_flags_))
with open(self.args.bazelrc_file, "w") as f:
f.write(self.bazel_flags_ + "\n")
def _debug(self, msg):
print(msg)
def go(self):
self.parse_args()
self.target_platform_ = self.PLATFORMS_.get(self.args.target_platform)
if self.validate_args():
self.set_build_args()
self.write_build_args()
else:
print("Error.")
env_setter = BuildEnvSetter()
env_setter.go()
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long, too-many-locals, too-many-statements
import json
import time
import sys
from itertools import chain
from knack.log import get_logger
from knack.util import CLIError
from azure.appconfiguration import (ConfigurationSetting,
ResourceReadOnlyError)
from azure.core import MatchConditions
from azure.cli.core.util import user_confirmation
from azure.core.exceptions import (HttpResponseError,
ResourceNotFoundError,
ResourceModifiedError)
from ._constants import (FeatureFlagConstants, KeyVaultConstants,
SearchFilterOptions, StatusCodes, ImportExportProfiles)
from ._models import (convert_configurationsetting_to_keyvalue,
convert_keyvalue_to_configurationsetting)
from ._utils import get_appconfig_data_client, prep_label_filter_for_url_encoding
from ._kv_helpers import (__compare_kvs_for_restore, __read_kv_from_file, __read_features_from_file,
__write_kv_and_features_to_file, __read_kv_from_config_store, __is_json_content_type,
__write_kv_and_features_to_config_store, __discard_features_from_retrieved_kv,
__read_kv_from_app_service, __write_kv_to_app_service, __print_restore_preview,
__serialize_kv_list_to_comparable_json_object, __print_preview,
__serialize_features_from_kv_list_to_comparable_json_object, __export_kvset_to_file,
__serialize_feature_list_to_comparable_json_object, __print_features_preview,
__import_kvset_from_file)
from .feature import list_feature
logger = get_logger(__name__)
def import_config(cmd,
source,
name=None,
connection_string=None,
label=None,
prefix="", # prefix to add
yes=False,
skip_features=False,
content_type=None,
auth_mode="key",
endpoint=None,
# from-file parameters
path=None,
format_=None,
separator=None,
depth=None,
profile=ImportExportProfiles.DEFAULT,
# from-configstore parameters
src_name=None,
src_connection_string=None,
src_key=None,
src_label=None,
preserve_labels=False,
src_auth_mode="key",
src_endpoint=None,
# from-appservice parameters
appservice_account=None):
src_features = []
dest_features = []
dest_kvs = []
source = source.lower()
profile = profile.lower()
format_ = format_.lower() if format_ else None
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
# fetch key values from source
if source == 'file':
if profile == ImportExportProfiles.KVSET:
__import_kvset_from_file(client=azconfig_client, path=path, yes=yes)
return
if format_ and content_type:
# JSON content type is only supported with JSON format.
# Error out if user has provided JSON content type with any other format.
if format_ != 'json' and __is_json_content_type(content_type):
raise CLIError("Failed to import '{}' file format with '{}' content type. Please provide JSON file format to match your content type.".format(format_, content_type))
if separator:
# If separator is provided, use max depth by default unless depth is specified.
depth = sys.maxsize if depth is None else int(depth)
else:
if depth and int(depth) != 1:
logger.warning("Cannot flatten hierarchical data without a separator. --depth argument will be ignored.")
depth = 1
src_kvs = __read_kv_from_file(file_path=path,
format_=format_,
separator=separator,
prefix_to_add=prefix,
depth=depth,
content_type=content_type)
if not skip_features:
# src_features is a list of KeyValue objects
src_features = __read_features_from_file(file_path=path, format_=format_)
elif source == 'appconfig':
src_azconfig_client = get_appconfig_data_client(cmd, src_name, src_connection_string, src_auth_mode, src_endpoint)
if label is not None and preserve_labels:
raise CLIError("Import failed! Please provide only one of these arguments: '--label' or '--preserve-labels'. See 'az appconfig kv import -h' for examples.")
if preserve_labels:
# We need label to be the same as src_label for preview later.
# This will have no effect on label while writing to config store
# as we check preserve_labels again before labelling KVs.
label = src_label
src_kvs = __read_kv_from_config_store(src_azconfig_client,
key=src_key,
label=src_label if src_label else SearchFilterOptions.EMPTY_LABEL,
prefix_to_add=prefix)
# We need to separate KV from feature flags
__discard_features_from_retrieved_kv(src_kvs)
if not skip_features:
# Get all Feature flags with matching label
all_features = __read_kv_from_config_store(src_azconfig_client,
key=FeatureFlagConstants.FEATURE_FLAG_PREFIX + '*',
label=src_label if src_label else SearchFilterOptions.EMPTY_LABEL)
for feature in all_features:
if feature.content_type == FeatureFlagConstants.FEATURE_FLAG_CONTENT_TYPE:
src_features.append(feature)
elif source == 'appservice':
src_kvs = __read_kv_from_app_service(
cmd, appservice_account=appservice_account, prefix_to_add=prefix, content_type=content_type)
# if customer needs preview & confirmation
if not yes:
# fetch key values from user's configstore
dest_kvs = __read_kv_from_config_store(azconfig_client,
key=SearchFilterOptions.ANY_KEY,
label=label if label else SearchFilterOptions.EMPTY_LABEL)
__discard_features_from_retrieved_kv(dest_kvs)
# generate preview and wait for user confirmation
need_kv_change = __print_preview(
old_json=__serialize_kv_list_to_comparable_json_object(keyvalues=dest_kvs, level=source),
new_json=__serialize_kv_list_to_comparable_json_object(keyvalues=src_kvs, level=source))
need_feature_change = False
if src_features and not skip_features:
# Append all features to dest_features list
all_features = __read_kv_from_config_store(azconfig_client,
key=FeatureFlagConstants.FEATURE_FLAG_PREFIX + '*',
label=label if label else SearchFilterOptions.EMPTY_LABEL)
for feature in all_features:
if feature.content_type == FeatureFlagConstants.FEATURE_FLAG_CONTENT_TYPE:
dest_features.append(feature)
need_feature_change = __print_features_preview(
old_json=__serialize_features_from_kv_list_to_comparable_json_object(keyvalues=dest_features),
new_json=__serialize_features_from_kv_list_to_comparable_json_object(keyvalues=src_features))
if not need_kv_change and not need_feature_change:
return
user_confirmation("Do you want to continue? \n")
# append all feature flags to src_kvs list
src_kvs.extend(src_features)
# import into configstore
__write_kv_and_features_to_config_store(azconfig_client,
key_values=src_kvs,
label=label,
preserve_labels=preserve_labels,
content_type=content_type)
def export_config(cmd,
destination,
name=None,
connection_string=None,
label=None,
key=None,
prefix="", # prefix to remove
yes=False,
skip_features=False,
skip_keyvault=False,
auth_mode="key",
endpoint=None,
# to-file parameters
path=None,
format_=None,
separator=None,
naming_convention='pascal',
resolve_keyvault=False,
profile=ImportExportProfiles.DEFAULT,
# to-config-store parameters
dest_name=None,
dest_connection_string=None,
dest_label=None,
preserve_labels=False,
dest_auth_mode="key",
dest_endpoint=None,
# to-app-service parameters
appservice_account=None):
src_features = []
dest_features = []
dest_kvs = []
destination = destination.lower()
profile = profile.lower()
format_ = format_.lower() if format_ else None
naming_convention = naming_convention.lower()
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
dest_azconfig_client = None
if destination == 'appconfig':
if dest_label is not None and preserve_labels:
raise CLIError("Export failed! Please provide only one of these arguments: '--dest-label' or '--preserve-labels'. See 'az appconfig kv export -h' for examples.")
if preserve_labels:
# We need dest_label to be the same as label for preview later.
# This will have no effect on label while writing to config store
# as we check preserve_labels again before labelling KVs.
dest_label = label
dest_azconfig_client = get_appconfig_data_client(cmd, dest_name, dest_connection_string, dest_auth_mode, dest_endpoint)
# fetch key values from user's configstore
src_kvs = __read_kv_from_config_store(azconfig_client,
key=key,
label=label if label else SearchFilterOptions.EMPTY_LABEL,
prefix_to_remove=prefix,
cli_ctx=cmd.cli_ctx if resolve_keyvault else None)
if skip_keyvault:
src_kvs = [keyvalue for keyvalue in src_kvs if keyvalue.content_type != KeyVaultConstants.KEYVAULT_CONTENT_TYPE]
# We need to separate KV from feature flags for the default export profile and only need to discard
# if skip_features is true for the appconfig/kvset export profile.
if profile == ImportExportProfiles.DEFAULT or (profile == ImportExportProfiles.KVSET and skip_features):
__discard_features_from_retrieved_kv(src_kvs)
if profile == ImportExportProfiles.KVSET:
__export_kvset_to_file(file_path=path, keyvalues=src_kvs, yes=yes)
return
if not skip_features:
# Get all Feature flags with matching label
if (destination == 'file' and format_ == 'properties') or destination == 'appservice':
skip_features = True
logger.warning("Exporting feature flags to properties file or appservice is currently not supported.")
else:
# src_features is a list of FeatureFlag objects
src_features = list_feature(cmd,
feature='*',
label=label if label else SearchFilterOptions.EMPTY_LABEL,
name=name,
connection_string=connection_string,
all_=True,
auth_mode=auth_mode,
endpoint=endpoint)
# if customer needs preview & confirmation
if not yes:
if destination == 'appconfig':
# dest_kvs contains features and KV that match the label
dest_kvs = __read_kv_from_config_store(dest_azconfig_client,
key=SearchFilterOptions.ANY_KEY,
label=dest_label if dest_label else SearchFilterOptions.EMPTY_LABEL)
__discard_features_from_retrieved_kv(dest_kvs)
if not skip_features:
# Append all features to dest_features list
dest_features = list_feature(cmd,
feature='*',
label=dest_label if dest_label else SearchFilterOptions.EMPTY_LABEL,
name=dest_name,
connection_string=dest_connection_string,
all_=True,
auth_mode=dest_auth_mode,
endpoint=dest_endpoint)
elif destination == 'appservice':
dest_kvs = __read_kv_from_app_service(cmd, appservice_account=appservice_account)
# generate preview and wait for user confirmation
need_kv_change = __print_preview(
old_json=__serialize_kv_list_to_comparable_json_object(keyvalues=dest_kvs, level=destination),
new_json=__serialize_kv_list_to_comparable_json_object(keyvalues=src_kvs, level=destination))
need_feature_change = False
if src_features:
need_feature_change = __print_features_preview(
old_json=__serialize_feature_list_to_comparable_json_object(features=dest_features),
new_json=__serialize_feature_list_to_comparable_json_object(features=src_features))
if not need_kv_change and not need_feature_change:
return
user_confirmation("Do you want to continue? \n")
# export to destination
if destination == 'file':
__write_kv_and_features_to_file(file_path=path, key_values=src_kvs, features=src_features,
format_=format_, separator=separator, skip_features=skip_features,
naming_convention=naming_convention)
elif destination == 'appconfig':
__write_kv_and_features_to_config_store(dest_azconfig_client, key_values=src_kvs, features=src_features,
label=dest_label, preserve_labels=preserve_labels)
elif destination == 'appservice':
__write_kv_to_app_service(cmd, key_values=src_kvs, appservice_account=appservice_account)
def set_key(cmd,
key,
name=None,
label=None,
content_type=None,
tags=None,
value=None,
yes=False,
connection_string=None,
auth_mode="key",
endpoint=None):
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
if content_type:
if content_type.lower() == KeyVaultConstants.KEYVAULT_CONTENT_TYPE:
logger.warning("There is a dedicated command to set key vault reference. 'appconfig kv set-keyvault -h'")
elif content_type.lower() == FeatureFlagConstants.FEATURE_FLAG_CONTENT_TYPE:
logger.warning("There is a dedicated command to set feature flag. 'appconfig feature set -h'")
retry_times = 3
retry_interval = 1
label = label if label and label != SearchFilterOptions.EMPTY_LABEL else None
for i in range(0, retry_times):
retrieved_kv = None
set_kv = None
new_kv = None
try:
retrieved_kv = azconfig_client.get_configuration_setting(key=key, label=label)
except ResourceNotFoundError:
logger.debug("Key '%s' with label '%s' not found. A new key-value will be created.", key, label)
except HttpResponseError as exception:
raise CLIError("Failed to retrieve key-values from config store. " + str(exception))
if retrieved_kv is None:
if __is_json_content_type(content_type):
try:
# Ensure that provided value is valid JSON. Error out if value is invalid JSON.
value = 'null' if value is None else value
json.loads(value)
except ValueError:
raise CLIError('Value "{}" is not a valid JSON object, which conflicts with the content type "{}".'.format(value, content_type))
set_kv = ConfigurationSetting(key=key,
label=label,
value="" if value is None else value,
content_type="" if content_type is None else content_type,
tags=tags)
else:
value = retrieved_kv.value if value is None else value
content_type = retrieved_kv.content_type if content_type is None else content_type
if __is_json_content_type(content_type):
try:
# Ensure that provided/existing value is valid JSON. Error out if value is invalid JSON.
json.loads(value)
except (TypeError, ValueError):
raise CLIError('Value "{}" is not a valid JSON object, which conflicts with the content type "{}". Set the value again in valid JSON format.'.format(value, content_type))
set_kv = ConfigurationSetting(key=key,
label=label,
value=value,
content_type=content_type,
tags=retrieved_kv.tags if tags is None else tags,
read_only=retrieved_kv.read_only,
etag=retrieved_kv.etag)
verification_kv = {
"key": set_kv.key,
"label": set_kv.label,
"content_type": set_kv.content_type,
"value": set_kv.value,
"tags": set_kv.tags
}
entry = json.dumps(verification_kv, indent=2, sort_keys=True, ensure_ascii=False)
confirmation_message = "Are you sure you want to set the key: \n" + entry + "\n"
user_confirmation(confirmation_message, yes)
try:
if set_kv.etag is None:
new_kv = azconfig_client.add_configuration_setting(set_kv)
else:
new_kv = azconfig_client.set_configuration_setting(set_kv, match_condition=MatchConditions.IfNotModified)
return convert_configurationsetting_to_keyvalue(new_kv)
except ResourceReadOnlyError:
raise CLIError("Failed to update read only key-value. Unlock the key-value before updating it.")
except HttpResponseError as exception:
if exception.status_code == StatusCodes.PRECONDITION_FAILED:
logger.debug('Retrying setting %s times with exception: concurrent setting operations', i + 1)
time.sleep(retry_interval)
else:
raise CLIError("Failed to set the key-value due to an exception: " + str(exception))
except Exception as exception:
raise CLIError("Failed to set the key-value due to an exception: " + str(exception))
raise CLIError("Failed to set the key '{}' due to a conflicting operation.".format(key))
def set_keyvault(cmd,
key,
secret_identifier,
name=None,
label=None,
tags=None,
yes=False,
connection_string=None,
auth_mode="key",
endpoint=None):
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
keyvault_ref_value = json.dumps({"uri": secret_identifier}, ensure_ascii=False, separators=(',', ':'))
retry_times = 3
retry_interval = 1
label = label if label and label != SearchFilterOptions.EMPTY_LABEL else None
for i in range(0, retry_times):
retrieved_kv = None
set_kv = None
new_kv = None
try:
retrieved_kv = azconfig_client.get_configuration_setting(key=key, label=label)
except ResourceNotFoundError:
logger.debug("Key '%s' with label '%s' not found. A new key-vault reference will be created.", key, label)
except HttpResponseError as exception:
raise CLIError("Failed to retrieve key-values from config store. " + str(exception))
if retrieved_kv is None:
set_kv = ConfigurationSetting(key=key,
label=label,
value=keyvault_ref_value,
content_type=KeyVaultConstants.KEYVAULT_CONTENT_TYPE,
tags=tags)
else:
set_kv = ConfigurationSetting(key=key,
label=label,
value=keyvault_ref_value,
content_type=KeyVaultConstants.KEYVAULT_CONTENT_TYPE,
tags=retrieved_kv.tags if tags is None else tags,
read_only=retrieved_kv.read_only,
etag=retrieved_kv.etag)
verification_kv = {
"key": set_kv.key,
"label": set_kv.label,
"content_type": set_kv.content_type,
"value": set_kv.value,
"tags": set_kv.tags
}
entry = json.dumps(verification_kv, indent=2, sort_keys=True, ensure_ascii=False)
confirmation_message = "Are you sure you want to set the keyvault reference: \n" + entry + "\n"
user_confirmation(confirmation_message, yes)
try:
if set_kv.etag is None:
new_kv = azconfig_client.add_configuration_setting(set_kv)
else:
new_kv = azconfig_client.set_configuration_setting(set_kv, match_condition=MatchConditions.IfNotModified)
return convert_configurationsetting_to_keyvalue(new_kv)
except ResourceReadOnlyError:
raise CLIError("Failed to update read only key vault reference. Unlock the key vault reference before updating it.")
except HttpResponseError as exception:
if exception.status_code == StatusCodes.PRECONDITION_FAILED:
logger.debug('Retrying setting %s times with exception: concurrent setting operations', i + 1)
time.sleep(retry_interval)
else:
raise CLIError("Failed to set the keyvault reference due to an exception: " + str(exception))
except Exception as exception:
raise CLIError("Failed to set the keyvault reference due to an exception: " + str(exception))
raise CLIError("Failed to set the keyvault reference '{}' due to a conflicting operation.".format(key))
def delete_key(cmd,
key,
name=None,
label=None,
yes=False,
connection_string=None,
auth_mode="key",
endpoint=None):
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
# list_configuration_settings returns kv with null label when:
# label = ASCII null 0x00, or URL encoded %00
# In delete, import and export commands, we treat missing --label as null label
# In list, restore and revision commands, we treat missing --label as all labels
entries = __read_kv_from_config_store(azconfig_client,
key=key,
label=label if label else SearchFilterOptions.EMPTY_LABEL)
confirmation_message = "Found '{}' key-values matching the specified key and label. Are you sure you want to delete these key-values?".format(len(entries))
user_confirmation(confirmation_message, yes)
deleted_entries = []
exception_messages = []
for entry in entries:
try:
deleted_kv = azconfig_client.delete_configuration_setting(key=entry.key,
label=entry.label,
etag=entry.etag,
match_condition=MatchConditions.IfNotModified)
deleted_entries.append(convert_configurationsetting_to_keyvalue(deleted_kv))
except ResourceReadOnlyError:
exception = "Failed to delete read-only key-value with key '{}' and label '{}'. Unlock the key-value before deleting it.".format(entry.key, entry.label)
exception_messages.append(exception)
except ResourceModifiedError:
exception = "Failed to delete key-value with key '{}' and label '{}' due to a conflicting operation.".format(entry.key, entry.label)
exception_messages.append(exception)
except HttpResponseError as ex:
exception_messages.append(str(ex))
raise CLIError('Delete operation failed. The following error(s) occurred:\n' + json.dumps(exception_messages, indent=2, ensure_ascii=False))
# Log errors if partially succeeded
if exception_messages:
if deleted_entries:
logger.error('Delete operation partially failed. The following error(s) occurred:\n%s\n',
json.dumps(exception_messages, indent=2, ensure_ascii=False))
else:
raise CLIError('Delete operation failed. \n' + json.dumps(exception_messages, indent=2, ensure_ascii=False))
return deleted_entries
def lock_key(cmd,
key,
label=None,
name=None,
connection_string=None,
yes=False,
auth_mode="key",
endpoint=None):
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
retry_times = 3
retry_interval = 1
for i in range(0, retry_times):
try:
retrieved_kv = azconfig_client.get_configuration_setting(key=key, label=label)
except ResourceNotFoundError:
raise CLIError("Key '{}' with label '{}' does not exist.".format(key, label))
except HttpResponseError as exception:
raise CLIError("Failed to retrieve key-values from config store. " + str(exception))
confirmation_message = "Are you sure you want to lock the key '{}' with label '{}'".format(key, label)
user_confirmation(confirmation_message, yes)
try:
new_kv = azconfig_client.set_read_only(retrieved_kv, match_condition=MatchConditions.IfNotModified)
return convert_configurationsetting_to_keyvalue(new_kv)
except HttpResponseError as exception:
if exception.status_code == StatusCodes.PRECONDITION_FAILED:
logger.debug('Retrying lock operation %s times with exception: concurrent setting operations', i + 1)
time.sleep(retry_interval)
else:
raise CLIError("Failed to lock the key-value due to an exception: " + str(exception))
except Exception as exception:
raise CLIError("Failed to lock the key-value due to an exception: " + str(exception))
raise CLIError("Failed to lock the key '{}' with label '{}' due to a conflicting operation.".format(key, label))
def unlock_key(cmd,
key,
label=None,
name=None,
connection_string=None,
yes=False,
auth_mode="key",
endpoint=None):
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
retry_times = 3
retry_interval = 1
for i in range(0, retry_times):
try:
retrieved_kv = azconfig_client.get_configuration_setting(key=key, label=label)
except ResourceNotFoundError:
raise CLIError("Key '{}' with label '{}' does not exist.".format(key, label))
except HttpResponseError as exception:
raise CLIError("Failed to retrieve key-values from config store. " + str(exception))
confirmation_message = "Are you sure you want to unlock the key '{}' with label '{}'".format(key, label)
user_confirmation(confirmation_message, yes)
try:
new_kv = azconfig_client.set_read_only(retrieved_kv, read_only=False, match_condition=MatchConditions.IfNotModified)
return convert_configurationsetting_to_keyvalue(new_kv)
except HttpResponseError as exception:
if exception.status_code == StatusCodes.PRECONDITION_FAILED:
logger.debug('Retrying unlock operation %s times with exception: concurrent setting operations', i + 1)
time.sleep(retry_interval)
else:
raise CLIError("Failed to unlock the key-value due to an exception: " + str(exception))
except Exception as exception:
raise CLIError("Failed to unlock the key-value due to an exception: " + str(exception))
raise CLIError("Failed to unlock the key '{}' with label '{}' due to a conflicting operation.".format(key, label))
def show_key(cmd,
key,
name=None,
label=None,
datetime=None,
connection_string=None,
auth_mode="key",
endpoint=None):
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
try:
key_value = azconfig_client.get_configuration_setting(key=key, label=label, accept_datetime=datetime)
if key_value is None:
raise CLIError("The key-value does not exist.")
return convert_configurationsetting_to_keyvalue(key_value)
except ResourceNotFoundError:
raise CLIError("Key '{}' with label '{}' does not exist.".format(key, label))
except HttpResponseError as exception:
raise CLIError('Failed to retrieve key-values from config store. ' + str(exception))
raise CLIError("Failed to get the key '{}' with label '{}'.".format(key, label))
def list_key(cmd,
key=None,
fields=None,
name=None,
label=None,
datetime=None,
connection_string=None,
top=None,
all_=False,
resolve_keyvault=False,
auth_mode="key",
endpoint=None):
if fields and resolve_keyvault:
raise CLIError("Please provide only one of these arguments: '--fields' or '--resolve-keyvault'. See 'az appconfig kv list -h' for examples.")
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
keyvalues = __read_kv_from_config_store(azconfig_client,
key=key if key else SearchFilterOptions.ANY_KEY,
label=label if label else SearchFilterOptions.ANY_LABEL,
datetime=datetime,
fields=fields,
top=top,
all_=all_,
cli_ctx=cmd.cli_ctx if resolve_keyvault else None)
return keyvalues
def restore_key(cmd,
datetime,
key=None,
name=None,
label=None,
connection_string=None,
yes=False,
auth_mode="key",
endpoint=None):
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
exception_messages = []
restore_keyvalues = __read_kv_from_config_store(azconfig_client,
key=key if key else SearchFilterOptions.ANY_KEY,
label=label if label else SearchFilterOptions.ANY_LABEL,
datetime=datetime)
current_keyvalues = __read_kv_from_config_store(azconfig_client,
key=key if key else SearchFilterOptions.ANY_KEY,
label=label if label else SearchFilterOptions.ANY_LABEL)
try:
kvs_to_restore, kvs_to_modify, kvs_to_delete = __compare_kvs_for_restore(restore_keyvalues, current_keyvalues)
if not yes:
need_change = __print_restore_preview(kvs_to_restore, kvs_to_modify, kvs_to_delete)
if need_change is False:
logger.debug('Canceling the restore operation based on user selection.')
return
keys_to_restore = len(kvs_to_restore) + len(kvs_to_modify) + len(kvs_to_delete)
restored_so_far = 0
for kv in chain(kvs_to_restore, kvs_to_modify):
set_kv = convert_keyvalue_to_configurationsetting(kv)
try:
azconfig_client.set_configuration_setting(set_kv)
restored_so_far += 1
except ResourceReadOnlyError:
exception = "Failed to update read-only key-value with key '{}' and label '{}'. Unlock the key-value before updating it.".format(set_kv.key, set_kv.label)
exception_messages.append(exception)
except ResourceModifiedError:
exception = "Failed to update key-value with key '{}' and label '{}' due to a conflicting operation.".format(set_kv.key, set_kv.label)
exception_messages.append(exception)
for kv in kvs_to_delete:
try:
azconfig_client.delete_configuration_setting(key=kv.key,
label=kv.label,
etag=kv.etag,
match_condition=MatchConditions.IfNotModified)
restored_so_far += 1
except ResourceReadOnlyError:
exception = "Failed to delete read-only key-value with key '{}' and label '{}'. Unlock the key-value before deleting it.".format(kv.key, kv.label)
exception_messages.append(exception)
except ResourceModifiedError:
exception = "Failed to delete key-value with key '{}' and label '{}' due to a conflicting operation.".format(kv.key, kv.label)
exception_messages.append(exception)
if restored_so_far != keys_to_restore:
logger.error('Failed after restoring %d out of %d keys. The following error(s) occurred:\n%s\n',
restored_so_far, keys_to_restore, json.dumps(exception_messages, indent=2, ensure_ascii=False))
else:
logger.debug('Successfully restored %d out of %d keys', restored_so_far, keys_to_restore)
return
except HttpResponseError as ex:
exception_messages.append(str(ex))
raise CLIError('Restore operation failed. The following error(s) occurred:\n' + json.dumps(exception_messages, indent=2, ensure_ascii=False))
def list_revision(cmd,
key=None,
fields=None,
name=None,
label=None,
datetime=None,
connection_string=None,
top=None,
all_=False,
auth_mode="key",
endpoint=None):
azconfig_client = get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint)
key = key if key else SearchFilterOptions.ANY_KEY
label = label if label else SearchFilterOptions.ANY_LABEL
label = prep_label_filter_for_url_encoding(label)
try:
revisions_iterable = azconfig_client.list_revisions(key_filter=key,
label_filter=label,
accept_datetime=datetime,
fields=fields)
retrieved_revisions = []
count = 0
if all_:
top = float('inf')
elif top is None:
top = 100
for revision in revisions_iterable:
kv_revision = convert_configurationsetting_to_keyvalue(revision)
if fields:
partial_revision = {}
for field in fields:
partial_revision[field.name.lower()] = kv_revision.__dict__[field.name.lower()]
retrieved_revisions.append(partial_revision)
else:
retrieved_revisions.append(kv_revision)
count += 1
if count >= top:
return retrieved_revisions
return retrieved_revisions
except HttpResponseError as ex:
raise CLIError('List revision operation failed.\n' + str(ex))
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
import datetime
import fixtures
import os
import uuid
from migrate.versioning import repository
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
import sqlalchemy
import testtools
from heat.db.sqlalchemy import migrate_repo
from heat.db.sqlalchemy import migration
from heat.db.sqlalchemy import models
from heat.tests import common
class DBNotAllowed(Exception):
pass
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations"""
def __init__(self, banned_resources=None):
super(BannedDBSchemaOperations, self).__init__()
self._banned_resources = banned_resources or []
@staticmethod
def _explode(resource, op):
print('%s.%s()' % (resource, op))
raise DBNotAllowed(
'Operation %s.%s() is not allowed in a database migration' % (
resource, op))
def setUp(self):
super(BannedDBSchemaOperations, self).setUp()
for thing in self._banned_resources:
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.drop' % thing,
lambda *a, **k: self._explode(thing, 'drop')))
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
class TestBannedDBSchemaOperations(testtools.TestCase):
def test_column(self):
column = sqlalchemy.Column()
with BannedDBSchemaOperations(['Column']):
self.assertRaises(DBNotAllowed, column.drop)
self.assertRaises(DBNotAllowed, column.alter)
def test_table(self):
table = sqlalchemy.Table()
with BannedDBSchemaOperations(['Table']):
self.assertRaises(DBNotAllowed, table.drop)
self.assertRaises(DBNotAllowed, table.alter)
class HeatMigrationsCheckers(test_migrations.WalkVersionsMixin,
common.FakeLogMixin):
"""Test sqlalchemy-migrate migrations."""
snake_walk = False
downgrade = False
@property
def INIT_VERSION(self):
return migration.INIT_VERSION
@property
def REPOSITORY(self):
migrate_file = migrate_repo.__file__
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_file))
)
@property
def migration_api(self):
temp = __import__('oslo_db.sqlalchemy.migration', globals(),
locals(), ['versioning_api'], 0)
return temp.versioning_api
@property
def migrate_engine(self):
return self.engine
def migrate_up(self, version, with_data=False):
"""Check that migrations don't cause downtime.
Schema migrations can be done online, allowing for rolling upgrades.
"""
# NOTE(xek): This is a list of migrations where we allow dropping
# things. The rules for adding exceptions are very very specific.
# Chances are you don't meet the critera.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
exceptions = [
64, # drop constraint
]
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
# NOTE(xek): We start requiring things be additive in
# liberty, so ignore all migrations before that point.
LIBERTY_START = 63
if version >= LIBERTY_START and version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
with BannedDBSchemaOperations(banned):
super(HeatMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(self.snake_walk, self.downgrade)
def assertColumnExists(self, engine, table, column):
t = utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnType(self, engine, table, column, sqltype):
t = utils.get_table(engine, table)
col = getattr(t.c, column)
self.assertIsInstance(col.type, sqltype)
def assertColumnNotExists(self, engine, table, column):
t = utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertColumnIsNullable(self, engine, table, column):
t = utils.get_table(engine, table)
col = getattr(t.c, column)
self.assertTrue(col.nullable)
def assertColumnIsNotNullable(self, engine, table, column_name):
table = utils.get_table(engine, table)
column = getattr(table.c, column_name)
self.assertFalse(column.nullable)
def assertIndexExists(self, engine, table, index):
t = utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = utils.get_table(engine, table)
index_columns = []
for idx in t.indexes:
if idx.name == index:
for ix in idx.columns:
index_columns.append(ix.name)
break
self.assertEqual(sorted(members), sorted(index_columns))
def _pre_upgrade_031(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(300, 303, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=4, username='angus', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
stack_ids = [('967aaefb-152e-405d-b13a-35d4c816390c', 0),
('9e9deba9-a303-4f29-84d3-c8165647c47e', 1),
('9a4bd1ec-8b21-46cd-964a-f66cb1cfa2f9', 2)]
data = [dict(id=ll_id, name='fruity',
raw_template_id=templ[templ_id]['id'],
user_creds_id=user[0]['id'],
username='angus', disable_rollback=True)
for ll_id, templ_id in stack_ids]
engine.execute(stack.insert(), data)
return data
def _check_031(self, engine, data):
self.assertColumnExists(engine, 'stack_lock', 'stack_id')
self.assertColumnExists(engine, 'stack_lock', 'engine_id')
self.assertColumnExists(engine, 'stack_lock', 'created_at')
self.assertColumnExists(engine, 'stack_lock', 'updated_at')
def _check_034(self, engine, data):
self.assertColumnExists(engine, 'raw_template', 'files')
def _pre_upgrade_035(self, engine):
# The stacks id are for the 33 version migration
event_table = utils.get_table(engine, 'event')
data = [{
'id': '22222222-152e-405d-b13a-35d4c816390c',
'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
'resource_action': 'Test',
'resource_status': 'TEST IN PROGRESS',
'resource_name': 'Testing Resource',
'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
'resource_status_reason': '',
'resource_type': '',
'resource_properties': None,
'created_at': timeutils.utcnow()},
{'id': '11111111-152e-405d-b13a-35d4c816390c',
'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
'resource_action': 'Test',
'resource_status': 'TEST COMPLETE',
'resource_name': 'Testing Resource',
'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
'resource_status_reason': '',
'resource_type': '',
'resource_properties': None,
'created_at': timeutils.utcnow() +
datetime.timedelta(days=5)}]
engine.execute(event_table.insert(), data)
return data
def _check_035(self, engine, data):
self.assertColumnExists(engine, 'event', 'id')
self.assertColumnExists(engine, 'event', 'uuid')
event_table = utils.get_table(engine, 'event')
events_in_db = list(event_table.select().execute())
last_id = 0
for index, event in enumerate(data):
last_id = index + 1
self.assertEqual(last_id, events_in_db[index].id)
self.assertEqual(event['id'], events_in_db[index].uuid)
# Check that the autoincremental id is ok
data = [{
'uuid': '33333333-152e-405d-b13a-35d4c816390c',
'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
'resource_action': 'Test',
'resource_status': 'TEST COMPLEATE AGAIN',
'resource_name': 'Testing Resource',
'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
'resource_status_reason': '',
'resource_type': '',
'resource_properties': None,
'created_at': timeutils.utcnow()}]
result = engine.execute(event_table.insert(), data)
self.assertEqual(last_id + 1, result.inserted_primary_key[0])
def _check_036(self, engine, data):
self.assertColumnExists(engine, 'stack', 'stack_user_project_id')
def _pre_upgrade_037(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = '''{"heat_template_version": "2013-05-23",
"parameters": {
"key_name": {
"Type": "string"
}
}
}'''
data = [dict(id=4, template=templ, files='{}')]
engine.execute(raw_template.insert(), data)
return data[0]
def _check_037(self, engine, data):
raw_template = utils.get_table(engine, 'raw_template')
templs = list(raw_template.select().
where(raw_template.c.id == str(data['id'])).
execute())
template = jsonutils.loads(templs[0].template)
data_template = jsonutils.loads(data['template'])
self.assertNotIn('Type', template['parameters']['key_name'])
self.assertIn('type', template['parameters']['key_name'])
self.assertEqual(template['parameters']['key_name']['type'],
data_template['parameters']['key_name']['Type'])
def _check_038(self, engine, data):
self.assertColumnNotExists(engine, 'software_config', 'io')
def _check_039(self, engine, data):
self.assertColumnIsNullable(engine, 'stack', 'user_creds_id')
def _check_040(self, engine, data):
self.assertColumnNotExists(engine, 'software_deployment', 'signal_id')
def _pre_upgrade_041(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = '''{"heat_template_version": "2013-05-23",
"resources": {
"my_instance": {
"Type": "OS::Nova::Server"
}
},
"outputs": {
"instance_ip": {
"Value": { "get_attr": "[my_instance, networks]" }
}
}
}'''
data = [dict(id=7, template=templ, files='{}')]
engine.execute(raw_template.insert(), data)
return data[0]
def _check_041(self, engine, data):
raw_template = utils.get_table(engine, 'raw_template')
templs = list(raw_template.select().
where(raw_template.c.id == str(data['id'])).
execute())
template = jsonutils.loads(templs[0].template)
self.assertIn('type', template['resources']['my_instance'])
self.assertNotIn('Type', template['resources']['my_instance'])
self.assertIn('value', template['outputs']['instance_ip'])
self.assertNotIn('Value', template['outputs']['instance_ip'])
def _pre_upgrade_043(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = '''{"HeatTemplateFormatVersion" : "2012-12-11",
"Parameters" : {
"foo" : { "Type" : "String", "NoEcho": "True" },
"bar" : { "Type" : "String", "NoEcho": "True", "Default": "abc" },
"blarg" : { "Type" : "String", "Default": "quux" }
}
}'''
data = [dict(id=8, template=templ, files='{}')]
engine.execute(raw_template.insert(), data)
return data[0]
def _check_043(self, engine, data):
raw_template = utils.get_table(engine, 'raw_template')
templ = list(raw_template.select().
where(raw_template.c.id == data['id']).execute())
template = jsonutils.loads(templ[0].template)
self.assertEqual(template['HeatTemplateFormatVersion'], '2012-12-12')
def _pre_upgrade_045(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(200, 203, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=6, username='steve', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
stack_ids = [('s1', '967aaefb-152e-505d-b13a-35d4c816390c', 0),
('s2', '9e9deba9-a303-5f29-84d3-c8165647c47e', 1),
('s1*', '9a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9', 2)]
data = [dict(id=ll_id, name=name,
raw_template_id=templ[templ_id]['id'],
user_creds_id=user[0]['id'],
username='steve', disable_rollback=True)
for name, ll_id, templ_id in stack_ids]
data[2]['owner_id'] = '967aaefb-152e-505d-b13a-35d4c816390c'
engine.execute(stack.insert(), data)
return data
def _check_045(self, engine, data):
self.assertColumnExists(engine, 'stack', 'backup')
stack_table = utils.get_table(engine, 'stack')
stacks_in_db = list(stack_table.select().execute())
stack_names_in_db = [s.name for s in stacks_in_db]
# Assert the expected stacks are still there
for stack in data:
self.assertIn(stack['name'], stack_names_in_db)
# And that the backup flag is set as expected
for stack in stacks_in_db:
if stack.name.endswith('*'):
self.assertTrue(stack.backup)
else:
self.assertFalse(stack.backup)
def _check_046(self, engine, data):
self.assertColumnExists(engine, 'resource', 'properties_data')
def _pre_upgrade_047(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(100, 105, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=7, username='steve', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
stack_ids = [
('s9', '167aaefb-152e-505d-b13a-35d4c816390c', 0),
('n1', '1e9deba9-a303-5f29-84d3-c8165647c47e', 1),
('n2', '1e9deba9-a304-5f29-84d3-c8165647c47e', 2),
('n3', '1e9deba9-a305-5f29-84d3-c8165647c47e', 3),
('s9*', '1a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9', 4)]
data = [dict(id=ll_id, name=name,
raw_template_id=templ[tmpl_id]['id'],
user_creds_id=user[0]['id'],
owner_id=None,
backup=False,
username='steve', disable_rollback=True)
for name, ll_id, tmpl_id in stack_ids]
# Make a nested tree s1->s2->s3->s4 with a s1 backup
data[1]['owner_id'] = '167aaefb-152e-505d-b13a-35d4c816390c'
data[2]['owner_id'] = '1e9deba9-a303-5f29-84d3-c8165647c47e'
data[3]['owner_id'] = '1e9deba9-a304-5f29-84d3-c8165647c47e'
data[4]['owner_id'] = '167aaefb-152e-505d-b13a-35d4c816390c'
data[4]['backup'] = True
engine.execute(stack.insert(), data)
return data
def _check_047(self, engine, data):
self.assertColumnExists(engine, 'stack', 'nested_depth')
stack_table = utils.get_table(engine, 'stack')
stacks_in_db = list(stack_table.select().execute())
stack_ids_in_db = [s.id for s in stacks_in_db]
# Assert the expected stacks are still there
for stack in data:
self.assertIn(stack['id'], stack_ids_in_db)
# And that the depth is set as expected
def n_depth(sid):
s = [s for s in stacks_in_db if s.id == sid][0]
return s.nested_depth
self.assertEqual(0, n_depth('167aaefb-152e-505d-b13a-35d4c816390c'))
self.assertEqual(1, n_depth('1e9deba9-a303-5f29-84d3-c8165647c47e'))
self.assertEqual(2, n_depth('1e9deba9-a304-5f29-84d3-c8165647c47e'))
self.assertEqual(3, n_depth('1e9deba9-a305-5f29-84d3-c8165647c47e'))
self.assertEqual(0, n_depth('1a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9'))
def _check_049(self, engine, data):
self.assertColumnExists(engine, 'user_creds', 'region_name')
def _check_051(self, engine, data):
column_list = [('id', False),
('host', False),
('topic', False),
('binary', False),
('hostname', False),
('engine_id', False),
('report_interval', False),
('updated_at', True),
('created_at', True),
('deleted_at', True)]
for column in column_list:
self.assertColumnExists(engine, 'service', column[0])
if not column[1]:
self.assertColumnIsNotNullable(engine, 'service', column[0])
else:
self.assertColumnIsNullable(engine, 'service', column[0])
def _check_052(self, engine, data):
self.assertColumnExists(engine, 'stack', 'convergence')
def _check_055(self, engine, data):
self.assertColumnExists(engine, 'stack', 'prev_raw_template_id')
self.assertColumnExists(engine, 'stack', 'current_traversal')
self.assertColumnExists(engine, 'stack', 'current_deps')
def _pre_upgrade_056(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(900, 903, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=uid, username='test_user', password='password',
tenant='test_project', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='') for uid in range(900, 903)]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
stack_ids = [('967aaefa-152e-405d-b13a-35d4c816390c', 0),
('9e9debab-a303-4f29-84d3-c8165647c47e', 1),
('9a4bd1e9-8b21-46cd-964a-f66cb1cfa2f9', 2)]
data = [dict(id=ll_id, name=ll_id,
raw_template_id=templ[templ_id]['id'],
user_creds_id=user[templ_id]['id'],
username='test_user',
disable_rollback=True,
parameters='test_params',
created_at=timeutils.utcnow(),
deleted_at=None)
for ll_id, templ_id in stack_ids]
data[-1]['deleted_at'] = timeutils.utcnow()
engine.execute(stack.insert(), data)
return data
def _check_056(self, engine, data):
self.assertColumnNotExists(engine, 'stack', 'parameters')
self.assertColumnExists(engine, 'raw_template', 'environment')
self.assertColumnExists(engine, 'raw_template', 'predecessor')
# Get the parameters in stack table
stack_parameters = {}
for stack in data:
templ_id = stack['raw_template_id']
stack_parameters[templ_id] = (stack['parameters'],
stack.get('deleted_at'))
# validate whether its moved to raw_template
raw_template_table = utils.get_table(engine, 'raw_template')
raw_templates = raw_template_table.select().execute()
for raw_template in raw_templates:
if raw_template.id in stack_parameters:
stack_param, deleted_at = stack_parameters[raw_template.id]
tmpl_env = raw_template.environment
if engine.name == 'sqlite' and deleted_at is None:
stack_param = '"%s"' % stack_param
if deleted_at is None:
self.assertEqual(stack_param,
tmpl_env,
'parameters migration from stack to '
'raw_template failed')
else:
self.assertIsNone(tmpl_env,
'parameters migration did not skip '
'deleted stack')
def _pre_upgrade_057(self, engine):
# template
raw_template = utils.get_table(engine, 'raw_template')
templ = [dict(id=11, template='{}', files='{}')]
engine.execute(raw_template.insert(), templ)
# credentials
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=11, username='steve', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
# stack
stack = utils.get_table(engine, 'stack')
stack_data = [dict(id='867aaefb-152e-505d-b13a-35d4c816390c',
name='s1',
raw_template_id=templ[0]['id'],
user_creds_id=user[0]['id'],
username='steve', disable_rollback=True)]
engine.execute(stack.insert(), stack_data)
# resource
resource = utils.get_table(engine, 'resource')
res_data = [dict(id='167aaefb-152e-505d-b13a-35d4c816390c',
name='res-4',
stack_id=stack_data[0]['id'],
user_creds_id=user[0]['id']),
dict(id='177aaefb-152e-505d-b13a-35d4c816390c',
name='res-5',
stack_id=stack_data[0]['id'],
user_creds_id=user[0]['id'])]
engine.execute(resource.insert(), res_data)
# resource_data
resource_data = utils.get_table(engine, 'resource_data')
rd_data = [dict(key='fruit',
value='blueberries',
reduct=False,
resource_id=res_data[0]['id']),
dict(key='fruit',
value='apples',
reduct=False,
resource_id=res_data[1]['id'])]
engine.execute(resource_data.insert(), rd_data)
return {'resource': res_data, 'resource_data': rd_data}
def _check_057(self, engine, data):
def uuid_in_res_data(res_uuid):
for rd in data['resource']:
if rd['id'] == res_uuid:
return True
return False
def rd_matches_old_data(key, value, res_uuid):
for rd in data['resource_data']:
if (rd['resource_id'] == res_uuid and rd['key'] == key
and rd['value'] == value):
return True
return False
self.assertColumnIsNotNullable(engine, 'resource', 'id')
res_table = utils.get_table(engine, 'resource')
res_in_db = list(res_table.select().execute())
# confirm the resource.id is an int and the uuid field has been
# copied from the old id.
for r in res_in_db:
self.assertIsInstance(r.id, six.integer_types)
self.assertTrue(uuid_in_res_data(r.uuid))
# confirm that the new resource_id points to the correct resource.
rd_table = utils.get_table(engine, 'resource_data')
rd_in_db = list(rd_table.select().execute())
for rd in rd_in_db:
for r in res_in_db:
if rd.resource_id == r.id:
self.assertTrue(rd_matches_old_data(rd.key, rd.value,
r.uuid))
def _check_058(self, engine, data):
self.assertColumnExists(engine, 'resource', 'engine_id')
self.assertColumnExists(engine, 'resource', 'atomic_key')
def _check_059(self, engine, data):
column_list = [('entity_id', False),
('traversal_id', False),
('is_update', False),
('atomic_key', False),
('stack_id', False),
('input_data', True),
('updated_at', True),
('created_at', True)]
for column in column_list:
self.assertColumnExists(engine, 'sync_point', column[0])
if not column[1]:
self.assertColumnIsNotNullable(engine, 'sync_point',
column[0])
else:
self.assertColumnIsNullable(engine, 'sync_point', column[0])
def _check_060(self, engine, data):
column_list = ['needed_by', 'requires', 'replaces', 'replaced_by',
'current_template_id']
for column in column_list:
self.assertColumnExists(engine, 'resource', column)
def _check_061(self, engine, data):
for tab_name in ['stack', 'resource', 'software_deployment']:
self.assertColumnType(engine, tab_name, 'status_reason',
sqlalchemy.Text)
def _check_062(self, engine, data):
self.assertColumnExists(engine, 'stack', 'parent_resource_name')
def _check_063(self, engine, data):
self.assertColumnExists(engine, 'resource',
'properties_data_encrypted')
def _check_064(self, engine, data):
self.assertColumnNotExists(engine, 'raw_template',
'predecessor')
def _pre_upgrade_065(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(960, 963, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=uid, username='test_user', password='password',
tenant='test_project', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='') for uid in range(960, 963)]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
root_sid = '9a6a3ddb-2219-452c-8fec-a4977f8fe474'
stack_ids = [(root_sid, 0, None),
('b6a23bc2-cd4e-496f-be2e-c11d06124ea2', 1, root_sid),
('7a927947-e004-4afa-8d11-62c1e049ecbd', 2, root_sid)]
data = [dict(id=ll_id, name=ll_id,
owner_id=owner_id,
raw_template_id=templ[templ_id]['id'],
user_creds_id=user[templ_id]['id'],
username='test_user',
disable_rollback=True,
parameters='test_params',
created_at=timeutils.utcnow(),
deleted_at=None)
for ll_id, templ_id, owner_id in stack_ids]
engine.execute(stack.insert(), data)
res_table = utils.get_table(engine, 'resource')
resource_ids = [(960, root_sid),
(961, 'b6a23bc2-cd4e-496f-be2e-c11d06124ea2'),
(962, '7a927947-e004-4afa-8d11-62c1e049ecbd')]
resources = [dict(id=rid, stack_id=sid)
for rid, sid in resource_ids]
engine.execute(res_table.insert(), resources)
def _check_065(self, engine, data):
self.assertColumnExists(engine, 'resource', 'root_stack_id')
res_table = utils.get_table(engine, 'resource')
res_in_db = list(res_table.select().execute())
self.assertTrue(len(res_in_db) >= 3)
# confirm the resource.root_stack_id is set for all resources
for r in res_in_db:
self.assertTrue(r.root_stack_id is not None)
if r.id >= 960 and r.id <= 962:
root_stack_id = '9a6a3ddb-2219-452c-8fec-a4977f8fe474'
self.assertEqual(root_stack_id, r.root_stack_id)
def _check_071(self, engine, data):
self.assertIndexExists(engine, 'stack', 'ix_stack_owner_id')
self.assertIndexMembers(engine, 'stack', 'ix_stack_owner_id',
['owner_id'])
def _check_073(self, engine, data):
# check if column still exists and is not nullable.
self.assertColumnIsNotNullable(engine, 'resource_data', 'resource_id')
# Ensure that only one foreign key exists and is created as expected.
inspector = sqlalchemy.engine.reflection.Inspector.from_engine(engine)
resource_data_fkeys = inspector.get_foreign_keys('resource_data')
self.assertEqual(1, len(resource_data_fkeys))
fk = resource_data_fkeys[0]
self.assertEqual('fk_resource_id', fk['name'])
self.assertEqual(['resource_id'], fk['constrained_columns'])
self.assertEqual('resource', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
class TestHeatMigrationsMySQL(HeatMigrationsCheckers,
test_base.MySQLOpportunisticTestCase):
pass
class TestHeatMigrationsPostgreSQL(HeatMigrationsCheckers,
test_base.PostgreSQLOpportunisticTestCase):
pass
class TestHeatMigrationsSQLite(HeatMigrationsCheckers,
test_base.DbTestCase):
pass
class ModelsMigrationSyncMixin(object):
def get_metadata(self):
return models.BASE.metadata
def get_engine(self):
return self.engine
def db_sync(self, engine):
migration.db_sync(engine=engine)
def include_object(self, object_, name, type_, reflected, compare_to):
if name in ['migrate_version'] and type_ == 'table':
return False
return True
class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.MySQLOpportunisticTestCase):
pass
class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.PostgreSQLOpportunisticTestCase):
pass
class ModelsMigrationsSyncSQLite(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.DbTestCase):
pass
|
|
from numpy import matrix, array, abs, ravel, zeros_like, dot
from scipy import rand, linalg, mat, random
from scipy.sparse import csr_matrix
from scipy.linalg import svd, eigvals
from pyamg.util.linalg import approximate_spectral_radius,\
infinity_norm, norm, condest, cond,\
ishermitian, pinv_array
from pyamg import gallery
from numpy.testing import TestCase, assert_almost_equal, assert_equal,\
assert_array_almost_equal
class TestLinalg(TestCase):
def test_norm(self):
cases = []
cases.append(4)
cases.append(-1)
cases.append(2.5)
cases.append(3 + 5j)
cases.append(7 - 2j)
cases.append([1 + 3j, 6])
cases.append([1 + 3j, 6 - 2j])
for A in cases:
assert_almost_equal(norm(A), linalg.norm(A))
def test_approximate_spectral_radius(self):
cases = []
cases.append(matrix([[-4]]))
cases.append(array([[-4]]))
cases.append(array([[2, 0], [0, 1]]))
cases.append(array([[-2, 0], [0, 1]]))
cases.append(array([[100, 0, 0], [0, 101, 0], [0, 0, 99]]))
for i in range(1, 5):
cases.append(rand(i, i))
# method should be almost exact for small matrices
for A in cases:
A = A.astype(float)
Asp = csr_matrix(A)
[E, V] = linalg.eig(A)
E = abs(E)
largest_eig = (E == E.max()).nonzero()[0]
expected_eig = E[largest_eig]
expected_vec = V[:, largest_eig]
assert_almost_equal(approximate_spectral_radius(A), expected_eig)
assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
vec = approximate_spectral_radius(A, return_vector=True)[1]
minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
diff = minnorm / norm(expected_vec)
assert_almost_equal(diff, 0.0, decimal=4)
vec = approximate_spectral_radius(Asp, return_vector=True)[1]
minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
diff = minnorm / norm(expected_vec)
assert_almost_equal(diff, 0.0, decimal=4)
# try symmetric matrices
for A in cases:
A = A + A.transpose()
A = A.astype(float)
Asp = csr_matrix(A)
[E, V] = linalg.eig(A)
E = abs(E)
largest_eig = (E == E.max()).nonzero()[0]
expected_eig = E[largest_eig]
expected_vec = V[:, largest_eig]
assert_almost_equal(approximate_spectral_radius(A), expected_eig)
assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
vec = approximate_spectral_radius(A, return_vector=True)[1]
minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
diff = minnorm / norm(expected_vec)
assert_almost_equal(diff, 0.0, decimal=4)
vec = approximate_spectral_radius(Asp, return_vector=True)[1]
minnorm = min(norm(expected_vec + vec), norm(expected_vec - vec))
diff = minnorm / norm(expected_vec)
assert_almost_equal(diff, 0.0, decimal=4)
# test a larger matrix, and various parameter choices
cases = []
A1 = gallery.poisson((50, 50), format='csr')
cases.append((A1, 7.99241331495))
A2 = gallery.elasticity.linear_elasticity((32, 32), format='bsr')[0]
cases.append((A2, 536549.922189))
for A, expected in cases:
# test that increasing maxiter increases accuracy
ans1 = approximate_spectral_radius(A, tol=1e-16, maxiter=5,
restart=0)
del A.rho
ans2 = approximate_spectral_radius(A, tol=1e-16, maxiter=15,
restart=0)
del A.rho
assert_equal(abs(ans2 - expected) < 0.5*abs(ans1 - expected), True)
# test that increasing restart increases accuracy
ans1 = approximate_spectral_radius(A, tol=1e-16, maxiter=10,
restart=0)
del A.rho
ans2 = approximate_spectral_radius(A, tol=1e-16, maxiter=10,
restart=1)
del A.rho
assert_equal(abs(ans2 - expected) < 0.8*abs(ans1 - expected), True)
# test tol
ans1 = approximate_spectral_radius(A, tol=0.1, maxiter=15,
restart=5)
del A.rho
assert_equal(abs(ans1 - expected)/abs(expected) < 0.1, True)
ans2 = approximate_spectral_radius(A, tol=0.001, maxiter=15,
restart=5)
del A.rho
assert_equal(abs(ans2 - expected)/abs(expected) < 0.001, True)
assert_equal(abs(ans2 - expected) < 0.1*abs(ans1 - expected), True)
def test_infinity_norm(self):
A = matrix([[-4]])
assert_equal(infinity_norm(csr_matrix(A)), 4)
A = matrix([[1, 0, -5], [-2, 5, 0]])
assert_equal(infinity_norm(csr_matrix(A)), 7)
A = matrix([[0, 1], [0, -5]])
assert_equal(infinity_norm(csr_matrix(A)), 5)
A = matrix([[1.3, -4.7, 0], [-2.23, 5.5, 0], [9, 0, -2]])
assert_equal(infinity_norm(csr_matrix(A)), 11)
class TestComplexLinalg(TestCase):
def test_approximate_spectral_radius(self):
cases = []
cases.append(matrix([[-4-4.0j]]))
cases.append(matrix([[-4+8.2j]]))
cases.append(matrix([[2.0-2.9j, 0], [0, 1.5]]))
cases.append(matrix([[-2.0-2.4j, 0], [0, 1.21]]))
cases.append(matrix([[100+1.0j, 0, 0],
[0, 101-1.0j, 0],
[0, 0, 99+9.9j]]))
for i in range(1, 6):
cases.append(matrix(rand(i, i)+1.0j*rand(i, i)))
# method should be almost exact for small matrices
for A in cases:
Asp = csr_matrix(A)
[E, V] = linalg.eig(A)
E = abs(E)
largest_eig = (E == E.max()).nonzero()[0]
expected_eig = E[largest_eig]
# expected_vec = V[:, largest_eig]
assert_almost_equal(approximate_spectral_radius(A), expected_eig)
assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
vec = approximate_spectral_radius(A, return_vector=True)[1]
Avec = A * vec
Avec = ravel(Avec)
vec = ravel(vec)
rayleigh = abs(dot(Avec, vec) / dot(vec, vec))
assert_almost_equal(rayleigh, expected_eig, decimal=4)
vec = approximate_spectral_radius(Asp, return_vector=True)[1]
Aspvec = Asp * vec
Aspvec = ravel(Aspvec)
vec = ravel(vec)
rayleigh = abs(dot(Aspvec, vec) / dot(vec, vec))
assert_almost_equal(rayleigh, expected_eig, decimal=4)
AA = mat(A).H*mat(A)
AAsp = csr_matrix(AA)
[E, V] = linalg.eig(AA)
E = abs(E)
largest_eig = (E == E.max()).nonzero()[0]
expected_eig = E[largest_eig]
# expected_vec = V[:, largest_eig]
assert_almost_equal(approximate_spectral_radius(AA),
expected_eig)
assert_almost_equal(approximate_spectral_radius(AAsp),
expected_eig)
vec = approximate_spectral_radius(AA, return_vector=True)[1]
AAvec = AA * vec
AAvec = ravel(AAvec)
vec = ravel(vec)
rayleigh = abs(dot(AAvec, vec) / dot(vec, vec))
assert_almost_equal(rayleigh, expected_eig, decimal=4)
vec = approximate_spectral_radius(AAsp, return_vector=True)[1]
AAspvec = AAsp * vec
AAspvec = ravel(AAspvec)
vec = ravel(vec)
rayleigh = abs(dot(AAspvec, vec) / dot(vec, vec))
assert_almost_equal(rayleigh, expected_eig, decimal=4)
def test_infinity_norm(self):
A = matrix([[-4-3.0j]])
assert_equal(infinity_norm(csr_matrix(A)), 5.0)
A = matrix([[1, 0, 4.0-3.0j], [-2, 5, 0]])
assert_equal(infinity_norm(csr_matrix(A)), 7)
A = matrix([[0, 1], [0, -4.0+3.0j]])
assert_equal(infinity_norm(csr_matrix(A)), 5.0)
def test_cond(self):
# make tests repeatable
random.seed(0)
# Should be exact
cases = []
A = mat(array([2.14]))
cases.append(A)
A = mat(array([2.14j]))
cases.append(A)
A = mat(array([-1.2 + 2.14j]))
cases.append(A)
for i in range(1, 6):
A = mat(rand(i, i))
cases.append(A)
cases.append(1.0j*A)
A = mat(A + 1.0j*rand(i, i))
cases.append(A)
for A in cases:
U, Sigma, Vh = svd(A)
exact = max(Sigma)/min(Sigma)
c = cond(A)
assert_almost_equal(exact, c)
def test_condest(self):
# make tests repeatable
random.seed(0)
# Should be exact for small matrices
cases = []
A = mat(array([2.14]))
cases.append(A)
A = mat(array([2.14j]))
cases.append(A)
A = mat(array([-1.2 + 2.14j]))
cases.append(A)
for i in range(1, 6):
A = mat(rand(i, i))
cases.append(A)
cases.append(1.0j*A)
A = mat(A + 1.0j*rand(i, i))
cases.append(A)
for A in cases:
eigs = eigvals(A)
exact = max(abs(eigs))/min(abs(eigs))
c = condest(A)
assert_almost_equal(exact, c)
def test_ishermitian(self):
# make tests repeatable
random.seed(0)
casesT = []
casesF = []
# 1x1
casesT.append(mat(rand(1, 1)))
casesF.append(mat(1.0j*rand(1, 1)))
# 2x2
A = array([[1.0, 0.0], [2.0, 1.0]])
Ai = 1.0j*A
casesF.append(A)
casesF.append(Ai)
A = A + Ai
casesF.append(A)
casesT.append(A + A.conjugate().T)
# 3x3
A = mat(rand(3, 3))
Ai = 1.0j*rand(3, 3)
casesF.append(A)
casesF.append(Ai)
A = A + Ai
casesF.append(A)
casesT.append(A + A.H)
for A in casesT:
# dense arrays
assert_equal(ishermitian(A, fast_check=False), True)
assert_equal(ishermitian(A, fast_check=True), True)
# csr arrays
A = csr_matrix(A)
assert_equal(ishermitian(A, fast_check=False), True)
assert_equal(ishermitian(A, fast_check=True), True)
for A in casesF:
# dense arrays
assert_equal(ishermitian(A, fast_check=False), False)
assert_equal(ishermitian(A, fast_check=True), False)
# csr arrays
A = csr_matrix(A)
assert_equal(ishermitian(A, fast_check=False), False)
assert_equal(ishermitian(A, fast_check=True), False)
def test_pinv_array(self):
from scipy.linalg import pinv2
tests = []
tests.append(rand(1, 1, 1))
tests.append(rand(3, 1, 1))
tests.append(rand(1, 2, 2))
tests.append(rand(3, 2, 2))
tests.append(rand(1, 3, 3))
tests.append(rand(3, 3, 3))
A = rand(1, 3, 3)
A[0, 0, :] = A[0, 1, :]
tests.append(A)
tests.append(rand(1, 1, 1) + 1.0j*rand(1, 1, 1))
tests.append(rand(3, 1, 1) + 1.0j*rand(3, 1, 1))
tests.append(rand(1, 2, 2) + 1.0j*rand(1, 2, 2))
tests.append(rand(3, 2, 2) + 1.0j*rand(3, 2, 2))
tests.append(rand(1, 3, 3) + 1.0j*rand(1, 3, 3))
tests.append(rand(3, 3, 3) + 1.0j*rand(3, 3, 3))
A = rand(1, 3, 3) + 1.0j*rand(1, 3, 3)
A[0, 0, :] = A[0, 1, :]
tests.append(A)
for test in tests:
pinv_test = zeros_like(test)
for i in range(pinv_test.shape[0]):
pinv_test[i] = pinv2(test[i])
pinv_array(test)
assert_array_almost_equal(test, pinv_test, decimal=4)
|
|
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from collections import namedtuple, OrderedDict, Counter
import itertools
from sympy import symbols
from sympy import Basic as symbasic
from sympy import Eq as symeq
from sympy.core.containers import Tuple as symtuple
from sympy.functions.elementary.piecewise import Piecewise as sympiecewise
from .. import util
from .layer import ConvLayer
from .network import Network
from .resource import Resource
from .scheduling_constraint import SchedulingConstraintLayerPipeline as Cstr
class PipelineSegment():
'''
Inter-layer pipeline segment.
Segment is a two-level layer hierarchy, where the first level is spatially
scheduled and the second level is temporally scheduled.
'''
# pylint: disable=too-many-instance-attributes
# Scheduling index in the segment, as a tuple of spatial and temporal
# scheduling indices.
SchedIndex = namedtuple('SchedIndex', ['sp_idx', 'tm_idx'])
def __init__(self, seg, network, batch_size, resource, max_util_drop=0.05,
with_opt=True):
if not isinstance(seg, tuple):
raise TypeError('PipelineSegment: seg must be a tuple.')
for ltpl in seg:
if not isinstance(ltpl, tuple):
raise TypeError('PipelineSegment: seg must be a tuple '
'of sub-tuples.')
if not isinstance(network, Network):
raise TypeError('PipelineSegment: network must be '
'a Network instance.')
if not isinstance(resource, Resource):
raise TypeError('PipelineSegment: resource must be '
'a Resource instance.')
self.seg = seg
self.network = network
self.batch_size = batch_size
self.resource = resource
self.max_util_drop = max_util_drop
self.with_opt = with_opt
self.valid = self._init_deps()
if not self.valid:
return
# Resource allocation.
self.valid = self._alloc_resource(max_util_drop=max_util_drop)
if not self.valid:
return
# Scheduling constraints.
self.valid = self._init_sym_cstrs()
if not self.valid:
return
def allocation(self):
'''
Get resource allocation, as a tuple of sub-tuples corresponding to the
layers in the segment.
'''
if not self.valid:
return None
return self.alloc
def gen_constraint(self, max_time_overhead=float('inf')):
'''
Generate scheduling constraint for the segment, as a tuple of
sub-tuples of SchedulingConstraint instances, corresponding to the
layers in the segment.
Yield the segment constraint tuple, and hints for pruning.
Pruning hints are the top-level loop blocking factors. Smaller hints
indicate better (lower) cost, and larger hints indicate better segment
timing (with lower time overhead). Constraints with smaller hints are
generated before those with larger hints. So if a constraint results in
a valid scheduling, the later ones with all hints larger than its can
be pruned.
'''
syms = self.cstr_symvals.keys()
vals = self.cstr_symvals.values()
assert syms and vals
# Sort from small to large.
# This is not a strict ordering, but we guarantee that if all values in
# hint A are larger than the corresponding values in hint B, A will be
# generated after B.
vals = [sorted(v) for v in vals]
syms = list(syms)
if self.cstr_topbat_idx is not None:
# Tovhd = (1 + 1/to + 1 + 1/to + ...) / tb
# >= (1 + 1 + ...) / tb = num_sp_fbs / tb
min_topbat = 1. * self.cstr_num_sp_fbs / max_time_overhead
pos = self.cstr_topbat_idx
vals[pos] = [t for t in vals[pos] if t >= min_topbat]
for valp in itertools.product(*vals):
constraint = tuple()
for atpl in self._subs_symargs(self.cstr_symargs,
tuple(zip(syms, valp))):
ctpl = tuple()
for a in atpl:
# Construct kwargs, adjust the types of the values.
kwargs = {}
kwargs['topbat'] = int(a.get('topbat', 0))
kwargs['fbifm'] = bool(a.get('fbifm', False))
if not kwargs['fbifm']:
kwargs['topifm'] = int(a.get('topifm', 0))
kwargs['fbofm'] = bool(a.get('fbofm', False))
if not kwargs['fbofm']:
kwargs['topofm'] = int(a.get('topofm', 0))
kwargs['update_dict'] = a.get('update_dict')
c = Cstr(**kwargs)
ctpl += (c,)
constraint += (ctpl,)
if None in valp:
assert len(valp) == 1
hints = (1,)
else:
hints = tuple(valp)
yield constraint, hints
def __getitem__(self, index):
return self.seg[index]
def __iter__(self):
return self.seg.__iter__()
def __len__(self):
return len(self.seg)
def __eq__(self, other):
if isinstance(other, self.__class__):
# pylint: disable=protected-access
return self._key_attrs() == other._key_attrs()
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(tuple(self._key_attrs()))
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'seg={}'.format(repr(self.seg)),
'network={}'.format(repr(self.network)),
'batch_size={}'.format(repr(self.batch_size)),
'resource={}'.format(repr(self.resource)),
'max_util_drop={}'.format(repr(self.max_util_drop)),
'with_opt={}'.format(repr(self.with_opt))]))
def _key_attrs(self):
''' Used for comparison. '''
return (self.seg, self.network, self.batch_size, self.resource,
self.max_util_drop, self.with_opt)
def _init_deps(self):
'''
Initialize the dependency relationship of the layers in the segment as
a mapping of the scheduling indices, and check validation. Return
whether the segment is valid to schedule.
We categorize dependencies to 3 categories:
- local: with the same spatial index but different temporal indices;
- neighbor: with different spatial indices but in the same segment;
- memory: in different segments, from/to memory.
The values of the src/dst dicts are tuples of indices of the neighbor
dependencies. A layer can have at most one neighbor source (must be a
last temporal scheduled layer), but may have multiple neighbor
destinations (could be temporal scheduled in the middle). Also, all
layers with the same spatial index can have at most one neighbor
source.
Special index `None` means memory dependency, i.e., from/to memory.
Memory sources and neighbor sources must be mutual exclusive, in order
to correctly set the src data regions; memory destinations and neighbor
destinations can co-exist.
Local dependencies are omitted, as by default each layer has its
immediately previous layer as local source and immediately next layer
as local destination.
Construct an ifmap forwarding dict for shared memory source data. It
maps previous layer name tuples, to a list of scheduling indices of all
layers in this segment that share these exact previous layers. The
first in the list is responsible to fetch the previous layer data and
to forward them to others. We allow shared memory source data between
two layers only when both layers have memory dependency only (so their
temporal indices must be 0), and their previous layers are exactly the
same.
Construct an ofmap forwarding dict for multiple destinations of both
on-chip and off-chip. It maps the scheduling index of a layer in this
segment that has both memory and neighbor/local destinations (so needs
to store its ofmaps back to memory), to a list of scheduling indices of
all layers in this segment that accepts its ofmaps as ifmaps. Neighbor
dependencies are only between the last temporal one and the first
temporal ones; local dependencies are only between adjacent temporal
ones.
'''
self.src_dict = [[None for _ in ltpl] for ltpl in self.seg]
self.dst_dict = [[None for _ in ltpl] for ltpl in self.seg]
self.ifm_fwd_dict = {}
self.ofm_fwd_dict = {}
# Mapping from layer to spatial/temporal indices in the segment.
layer2idx = {l: PipelineSegment.SchedIndex(sp_idx, tm_idx)
for sp_idx, ltpl in enumerate(self.seg)
for tm_idx, l in enumerate(ltpl)}
# Mapping from previous layer tuple to layer.
prevs2layer = {}
for sp_idx, ltpl in enumerate(self.seg):
single_nbr_src = None
for tm_idx, l in enumerate(ltpl):
assert layer2idx[l] == (sp_idx, tm_idx)
# Sources.
src = tuple()
prevs = self.network.prevs(l)
assert all(p not in layer2idx or layer2idx[p] < layer2idx[l]
for p in prevs)
mem_src = [p for p in prevs if p not in layer2idx]
lcl_src = [p for p in prevs if p not in mem_src
and layer2idx[p].sp_idx == sp_idx]
nbr_src = [p for p in prevs if p not in mem_src + lcl_src]
# Ensure single local source to be the immediately previous.
# Check at the destination so here are assertions.
if not lcl_src:
assert tm_idx == 0
else:
assert len(lcl_src) == 1 \
and layer2idx[lcl_src[0]].tm_idx == tm_idx - 1
# Mutual exclusive.
if mem_src and nbr_src:
# We now allow each spatial scheduling (vertex) to have
# both memory source and neighbor source when generating
# segments. But each single layer cannot have both;
# otherwise there would be multiple source data regions.
return False
if mem_src:
# Memory source.
src += (None,)
if nbr_src:
# Neighbor source.
# Single neighbor source to be the last temporal scheduled.
assert len(nbr_src) == 1
prev_idx = layer2idx[nbr_src[0]]
assert prev_idx.tm_idx == len(self.seg[prev_idx.sp_idx]) - 1
# Single neighbor source across this spatial scheduling.
if single_nbr_src is not None:
return False
single_nbr_src = prev_idx
src += (prev_idx,)
# Shared memory source.
if mem_src and not lcl_src:
assert not nbr_src
assert tm_idx == 0
if prevs in prevs2layer:
fet_idx = layer2idx[prevs2layer[prevs]]
self.ifm_fwd_dict.setdefault(prevs, [fet_idx]).append(
layer2idx[l])
else:
prevs2layer[prevs] = l
# Destinations.
dst = tuple()
nexts = self.network.nexts(l)
assert all(n not in layer2idx or layer2idx[n] > layer2idx[l]
for n in nexts)
mem_dst = [n for n in nexts if n not in layer2idx]
lcl_dst = [n for n in nexts if n not in mem_dst
and layer2idx[n].sp_idx == sp_idx]
nbr_dst = [n for n in nexts if n not in mem_dst + lcl_dst]
# Ensure single local destination to be the immediate next.
if not lcl_dst:
if tm_idx != len(ltpl) - 1:
# Not utilize local data, sub-optimal.
return False
else:
if len(lcl_dst) != 1 \
or layer2idx[lcl_dst[0]].tm_idx != tm_idx + 1:
# Local data will not be available if not adjacent.
return False
# Mutual exclusive.
# Now they can co-exist.
# assert not mem_dst or not nbr_dst
if mem_dst and nbr_dst:
assert tm_idx == len(ltpl) - 1
self.ofm_fwd_dict[layer2idx[l]] = [layer2idx[n]
for n in nbr_dst]
if mem_dst and lcl_dst:
assert not nbr_dst
self.ofm_fwd_dict[layer2idx[l]] = [layer2idx[lcl_dst[0]]]
if mem_dst:
# Memory destination.
dst += (None,)
if nbr_dst:
# Neighbor destinations.
# This layer is the last temporal scheduled.
assert tm_idx == len(ltpl) - 1
dst += tuple(layer2idx[n] for n in nbr_dst)
# Basic pipelining requires a linear structure (on-chip).
if not self.with_opt:
if len(nbr_src) + len(lcl_src) > 1 \
or len(nbr_dst) + len(lcl_dst) > 1 \
or ((sp_idx, tm_idx) != (0, 0)
and not nbr_src and not lcl_src):
return False
self.src_dict[sp_idx][tm_idx] = src
self.dst_dict[sp_idx][tm_idx] = dst
return True
def _alloc_resource(self, max_util_drop=0.05):
'''
Decide the resource allocation. Return whether the allocation succeeds.
`max_util_drop` specifies the maximum utilization drop due to mismatch
throughput between layers.
'''
self.alloc = tuple()
# Allocate processing subregions.
subregions = self._alloc_proc(max_util_drop=max_util_drop)
if not subregions:
return False
no_time_mux = len(self.network) == sum(len(ltpl) for ltpl in self.seg)
# All layers that have model filters must be spatially scheduled.
if no_time_mux:
for ltpl in self.seg:
if len([l for l in ltpl
if isinstance(self.network[l], ConvLayer)]) > 1:
no_time_mux = False
break
for sp_idx, ltpl in enumerate(self.seg):
# Resource for the subregion.
rtpl = tuple()
for tm_idx, _ in enumerate(ltpl):
# Processing region.
proc_region = subregions[sp_idx]
# Data source.
src = self.src_dict[sp_idx][tm_idx]
if None in src:
# Data source is memory.
assert src == (None,)
src_data_region = self.resource.src_data_region
for sh_idx_list in self.ifm_fwd_dict.values():
# Find shared memory source to use forwarding.
if (sp_idx, tm_idx) in sh_idx_list[1:]:
src_data_region = subregions[sh_idx_list[0].sp_idx]
break
elif src:
# Data source is neighbor.
assert len(src) == 1
src_data_region = subregions[src[0].sp_idx]
else:
# Data source is all local.
src_data_region = proc_region
# Data destination.
dst = self.dst_dict[sp_idx][tm_idx]
if None in dst:
# Data destination is memory.
# assert dst == (None,)
# Now we can have both memory and neighbor destinations. If
# they co-exist, we need to store them locally and also
# store back to memory. In this case the dst data region is
# set to memory.
dst_data_region = self.resource.dst_data_region
elif dst:
# Data destinations are neighbors.
# Put data in local. The next layers will fetch.
dst_data_region = proc_region
else:
# Data destination is all local.
dst_data_region = proc_region
# Make resource.
# Note that DRAM bandwidth is not split here. We optimistically
# assume each layer can use the full DRAM bandwidth at
# different time. We adjust this assumption when calculating
# the segment timing.
rtpl += (self.resource._replace(
proc_region=proc_region,
src_data_region=src_data_region,
dst_data_region=dst_data_region,
no_time_mux=no_time_mux),)
assert len(rtpl) == len(ltpl)
self.alloc += (rtpl,)
assert len(self.alloc) == len(self.seg)
return True
def _alloc_proc(self, max_util_drop=0.05):
'''
Allocate processing subregions for the segment.
Return a list of processing subregions corresponding to the first-level
(spatial scheduled) layers in the segment. Return None if allocation
failed.
`max_util_drop` specifies the maximum utilization drop due to mismatch
throughput between layers.
'''
# Spatial allocation.
proc_region = self.resource.proc_region
dim_nodes = proc_region.dim
total_nodes = dim_nodes.size()
# Number of operations of each spatial allocation.
ops = [sum(self.network[l].total_ops() for l in ltpl)
for ltpl in self.seg]
# Enforce a common factor among the numbers of nodes allocated to all
# vertices in the segment. Such common factor is likely to be the
# common height of the vertex node regions.
common_factor_list = [cf for cf, _ in util.factorize(dim_nodes.h, 2)]
for cf in sorted(common_factor_list, reverse=True):
# Pick the largest common factor within the utilization constraint.
# Number of nodes of each vertex should be approximate to the
# number of ops of the vertex.
nodes_raw = [o * 1. / sum(ops) * total_nodes for o in ops]
# Round to the common factor multiples.
assert total_nodes % cf == 0
nodes = [max(1, int(round(nr / cf))) * cf for nr in nodes_raw]
# Fix margin.
while sum(nodes) != total_nodes:
diff = [n - nr for n, nr in zip(nodes, nodes_raw)]
if sum(nodes) > total_nodes:
# Decrease the nodes for the vertex with the maximum
# positive difference.
idx, _ = max(enumerate(diff), key=lambda tpl: tpl[1])
nodes[idx] -= cf
else:
# Increase the nodes for the vertex with the minimum
# negative difference.
idx, _ = min(enumerate(diff), key=lambda tpl: tpl[1])
nodes[idx] += cf
if 0 in nodes:
continue
# Utilization.
time = max(o * 1. / n for o, n in zip(ops, nodes))
utilization = sum(ops) / time / sum(nodes)
assert utilization < 1 + 1e-6
if utilization >= 1 - max_util_drop:
# Found
break
else:
# Not found.
return None
# Allocate in the processing region according to the number of nodes.
subregions = proc_region.allocate(nodes)
assert subregions
assert len(subregions) == len(self.seg)
if len(subregions) == 1:
assert subregions[0] == proc_region
return subregions
def _init_sym_cstrs(self):
'''
Initialize the symbolic scheduling constraints for the layers in the
segment, by constructing a nested lists of dicts `cstr_symargs` whose
values can be symbolic expressions for the keyword arguments of layers
in the segment, and a dict `cstr_symvals` mapping each symbol to its
possible numerical values.
Rules for constraints.
- Top BAT loop factor.
With a single layer, there is no constraint on the top BAT loop factor.
Otherwise all layers must share the same factor, namely `topbat_shr`.
- Fmap forwarding and fully buffering.
Only CONV layers require to fully buffer fmaps. Local-region layers
process data in a streaming manner.
Each CONV layer, and all local-region layers immediately following it
within the same spatial scheduling, are made into a group G.
(initial) if G is both the first spatial and the first temporal
scheduling with a CONV layer, it can choose whether to fully buffer
ofmaps or not. This is a configuration to explore, namely `fbofm_init`.
We decide its value by choosing the one that gives the fewer fully
buffered inter-spatial pairs on the critical forwarding path, and the
smaller maximum fully buffered data size.
(within-group) within G, the CONV layer, and all local-region layers,
should use the same top OFM factors (IFM factors are automatically
determined by OFM factors in local-region layers), unless CONV ofmaps
need to be fully buffered, in which case, the CONV layer and the last
layer in G fully buffer ofmaps (top OFM factor is 1), and other layers
still use the same top OFM factors but can be different than 1.
(inter-temporal) if G has a source from G' in the same spatial
scheduling (which must be immediately before G), G should fully buffer
ifmaps, and G' should fully buffer ofmaps.
(inter-spatial) if G has a source from G' in another spatial scheduling
(where the source must be the last temporal scheduling in G' and that
spatial scheduling),
(a) if G' already fully buffers ofmaps, make G fully buffer ifmaps.
(b) otherwise, make G fully buffer ofmaps (do not require G' to fully
buffer ifmaps; leave it to other rules, e.g. inter-temporal, to
decide); forward data between G' and G, by matching their top O/IFM
factors (biasing this case for smaller pipeline filling delay).
Notice the destination can be: (1) the leading CONV layer, whose top
IFM factor is constrained; (2) a local-region layer, where we constrain
the top OFM factors of this group (except otherwise constrained by
fully buffering ofmaps).
'''
# pylint: disable=too-many-branches
# Symbolic variables mapping to numerical values.
symvals = dict()
# Top BAT loop factor.
topbat = symbols('topbat_shr', integer=True)
symvals[topbat] = [t for t, _ in util.factorize(self.batch_size, 2)]
# Whether the initial CONV layer fully buffers ofmaps.
fbofm_init = symbols('fbofm_init')
symvals[fbofm_init] = [False, True]
def _layer_topofm_vals(layer_name):
layer = self.network[layer_name]
# We require that the total ofmap size takes at least 5% of the
# gbuf capacity of a single node, to avoid too fine blocking.
tmax = layer.total_ofmap_size(self.batch_size) \
/ (0.05 * self.resource.size_gbuf)
vals = [t for t, _ in util.factorize(layer.nofm, 2)
if t <= tmax or t == 1]
assert vals
return vals
def _layer_topifm_vals(layer_name):
layer = self.network[layer_name]
# We require that the total ifmap size takes at least 5% of the
# gbuf capacity of a single node, to avoid too fine blocking.
tmax = layer.total_ifmap_size(self.batch_size) \
/ (0.05 * self.resource.size_gbuf)
vals = [t for t, _ in util.factorize(layer.nifm, 2)
if t <= tmax or t == 1]
assert vals
return vals
# Layer constraint kwargs.
symargs = [[{'topbat': topbat} for _ in ltpl] for ltpl in self.seg]
# Candidates for critical forwarding path between spatial scheduling.
sp_crit_path_cands = set()
sp_crit_path_cands.add((0,)) # init with the first spatial.
# The last CONV layer index.
last_conv = PipelineSegment.SchedIndex(-1, 0)
# Whether the current group needs to fully buffer ofmap. Delayed apply
# to the last layer in the group.
curr_fbofm = False
for sp_idx, ltpl in enumerate(self.seg):
# Initial topofm, in case of a non-CONV starting layer.
curr_topofm = symbols('topofm_{}_s'.format(sp_idx), integer=True)
symvals[curr_topofm] = _layer_topofm_vals(ltpl[0])
for tm_idx, l in enumerate(ltpl):
layer = self.network[l]
curr_sa = symargs[sp_idx][tm_idx]
# Neighbor source dependency.
nsrc_sa = None
src_deps = self.src_dict[sp_idx][tm_idx]
if any(s is not None for s in src_deps):
assert len(src_deps) == 1
nbr_src = src_deps[0]
assert nbr_src.sp_idx < sp_idx
nsrc_sa = symargs[nbr_src.sp_idx][nbr_src.tm_idx]
assert nsrc_sa # not empty, used to test nbr src exists.
# Set critical path candidates.
new_cands = set()
for cand in sp_crit_path_cands:
if cand[-1] == nbr_src.sp_idx:
new_cands.add(cand + (sp_idx,))
sp_crit_path_cands |= new_cands
if isinstance(layer, ConvLayer):
# Conv layer.
# The last group may require to fully buffer ofmaps.
# Delayed apply to the immediate previous layer.
if curr_fbofm is not False:
assert last_conv >= (0, 0)
if last_conv.sp_idx == sp_idx:
assert tm_idx > 0
lsrc_sa = symargs[sp_idx][tm_idx - 1]
else:
lsrc_sa = symargs[last_conv.sp_idx][-1]
lsrc_sa['fbofm'] = curr_fbofm
# Reset.
curr_fbofm = False
# New topofm for a new group.
curr_topofm = symbols('topofm_{}_{}'.format(sp_idx, tm_idx),
integer=True)
symvals[curr_topofm] = _layer_topofm_vals(l)
# Set topofm.
curr_sa['topofm'] = curr_topofm
if sp_idx == last_conv.sp_idx:
# Rule inter-temporal.
assert tm_idx > 0
# Make this group fully buffer ifmaps.
curr_sa['fbifm'] = True
# Make the last group fully buffer ofmaps.
last_sa = symargs[sp_idx][last_conv.tm_idx]
lsrc_sa = symargs[sp_idx][tm_idx - 1]
last_sa['fbofm'] = True
lsrc_sa['fbofm'] = True
elif nsrc_sa:
# Rule inter-spatial.
# We only look at this rule when inter-temporal rule
# does not apply and the ifmaps of this group are not
# yet required to fully buffer.
if not self.with_opt:
# Basic pipelining requires fully buffering all
# pairs of neighbor src/dst.
nsrc_sa['fbofm'] = True
nsrc_fbofm = nsrc_sa.get('fbofm', False)
# (a): if the source already fully buffers ofmaps.
# Make this group fully buffer ifmaps.
curr_sa['fbifm'] = symeq(nsrc_fbofm, True)
# (b)-(1): otherwise.
# Make this group fully buffer ofmaps.
curr_sa['fbofm'] = symeq(nsrc_fbofm, False)
curr_fbofm = symeq(nsrc_fbofm, False) # delayed apply.
# Match top OFM/IFM factors.
curr_sa['topifm'] = sympiecewise(
(nsrc_sa['topofm'], symeq(nsrc_fbofm, False)),
(curr_sa.get('topifm', 0), True))
elif last_conv < (0, 0):
# The first CONV layer.
# Rule initial.
curr_sa['fbofm'] = fbofm_init
curr_fbofm = fbofm_init
last_conv = PipelineSegment.SchedIndex(sp_idx, tm_idx)
else:
# Non-Conv layer.
if nsrc_sa:
# Rule inter-spatial, (b)-(2).
nsrc_fbofm = nsrc_sa.get('fbofm', False)
curr_topofm = sympiecewise(
(nsrc_sa['topofm'], symeq(nsrc_fbofm, False)),
(curr_topofm, True))
# Also backtrace this group.
for bt_idx in range(last_conv.tm_idx, tm_idx):
symargs[sp_idx][bt_idx]['topofm'] = curr_topofm
# Rule within-group.
curr_sa['topofm'] = curr_topofm
# If this layer has no on-chip destinations, cancel the
# requirement to fully buffer ofmaps.
if all(d is None for d in self.dst_dict[sp_idx][tm_idx]) \
and tm_idx == len(ltpl) - 1:
curr_sa.pop('fbofm', False)
# Simplify.
self._simplify_symargs(symargs, symvals)
# Get critical forwarding path between spatial scheduling.
# The critical path has the longest forwarding chain.
sp_crit_path = max(sp_crit_path_cands, key=len)
# Check maximum fully-buffering size, and decide fbofm_init.
opt_val = None
opt_key = (float('inf'),) * 2 # (num of fb pairs, max fb size)
num_sp_fbs = 0
for val in symvals.get(fbofm_init, [False]):
subs_symargs = self._subs_symargs(symargs, fbofm_init, val)
maxsz = 0
numfb = 0
for sp_idx, (ltpl, atpl) in enumerate(zip(self.seg, subs_symargs)):
ms = max(itertools.chain(
((self.network[l].total_ofmap_size() if a.get('fbofm')
else 0)
+ (self.network[l].total_ifmap_size() if a.get('fbifm')
else 0)
for l, a in zip(ltpl, atpl)),
[0])) # safe max with default.
if ms > self.alloc[sp_idx][0].proc_region.dim.size() \
* self.alloc[sp_idx][0].size_gbuf:
break
maxsz = max(maxsz, ms)
if sp_idx in sp_crit_path and atpl[-1].get('fbofm', False):
numfb += 1
else:
key = (numfb, maxsz)
if key < opt_key:
opt_val, opt_key = val, key
num_sp_fbs = numfb
if opt_val is None:
return False
# Use the optimal value.
symvals[fbofm_init] = [opt_val]
self._simplify_symargs(symargs, symvals)
# Shared memory source must have the same topifm.
for sh_idx_list in self.ifm_fwd_dict.values():
assert len(sh_idx_list) > 1
fet_sp_idx = sh_idx_list[0].sp_idx
sh_symarg_list = [symargs[idx.sp_idx][0] for idx in sh_idx_list]
# Must have no constraint on ifmaps access from memory.
assert all(not sa.get('fbifm', False) and not sa.get('topifm', 0)
for sa in sh_symarg_list)
# Cannot constrain both topifm and topofm.
if any(sa.get('fbofm', False) or sa.get('topofm', 0)
for sa in sh_symarg_list):
sh_kwargs = {'fbifm': True}
else:
topifm = symbols('topifm_{}'.format(fet_sp_idx), integer=True)
symvals[topifm] = _layer_topifm_vals(self.seg[fet_sp_idx][0])
sh_kwargs = {'topifm': topifm}
# Set constraints.
for sa in sh_symarg_list:
sa.update(sh_kwargs)
# Simplify.
self._simplify_symargs(symargs, symvals)
# Turn constraints into lazily updated rules.
self._lazify_topofm_symargs(symargs, symvals)
# Cannot simplify any more as update_dict is not sympifi-able.
# Sort symbol dict.
symvals = OrderedDict(sorted(((s, symvals[s]) for s in symvals),
key=lambda item: str(item[0])))
if not symvals:
# Must add a dummy symbol so iterative substitution can happen.
symvals[symbols('_dummy')] = [None]
self.cstr_symargs = symargs
self.cstr_symvals = symvals
self.cstr_num_sp_fbs = num_sp_fbs
try:
self.cstr_topbat_idx = list(symvals.keys()).index(topbat)
except ValueError:
self.cstr_topbat_idx = None
return True
@staticmethod
def _simplify_symargs_one_pass(symargs, symvals):
'''
Simplify symargs and symvals in-place:
- If fbi/ofm is False, then remove it.
- If fbi/ofm is True, then remove topi/ofm.
- If a symbol can take only one value, then substitute it.
- If a symbol only occurs once, then remove its constraint.
Return whether the symargs and symvals are already simplified.
'''
for a in itertools.chain.from_iterable(symargs):
is_fbifm = a.get('fbifm')
is_fbofm = a.get('fbofm')
# pylint: disable=singleton-comparison
# lhs may be symbolic, see
# docs.sympy.org/latest/modules/logic.html#sympy.logic.boolalg.BooleanTrue
if is_fbifm == True:
a.pop('topifm', 0)
if is_fbifm == False:
a.pop('fbifm', False)
if is_fbofm == True:
a.pop('topofm', 0)
if is_fbofm == False:
a.pop('fbofm', False)
subs_dict = {}
# Possible values for symbols.
subs_dict.update(
(s, symvals[s][0]) for s in symvals if len(symvals[s]) == 1)
# Count the occurrence of symbols in all args (values).
symcnts = Counter(
s for a in itertools.chain.from_iterable(symargs)
for val in a.values() for s in symtuple(val).free_symbols)
assert set(symcnts.keys()).issubset(symvals.keys())
subs_dict.update((s, None)
for s in set(symvals.keys()) - set(symcnts.keys()))
subs_dict.update((s, 0 if str(s).startswith('top') else False)
for s in symcnts if symcnts[s] <= 1)
# Substitute symbols and remove from symbol dict.
for a in itertools.chain.from_iterable(symargs):
for k in a:
a[k] = symtuple(a[k]).subs(subs_dict)[0]
for s in subs_dict:
del symvals[s]
return not subs_dict
def _simplify_symargs(self, symargs, symvals):
''' Simplify symargs and symvals in-place iteratively. '''
while not self._simplify_symargs_one_pass(symargs, symvals):
pass
used_syms = symtuple(
*[symtuple(*a.values())
for a in itertools.chain.from_iterable(symargs)]).free_symbols
assert set(used_syms) == set(symvals.keys())
assert all(val for val in symvals.values())
@staticmethod
def _subs_symargs(symargs, *subs_args):
'''
Substitute symbols. The additional arguments are passed to subs().
Return a new substituted copy without modifying the original one.
'''
# sympify=False is necessary because there may be str in the values.
return [[dict((k, symtuple(a[k], sympify=False).subs(*subs_args)[0])
for k in a) for a in atpl] for atpl in symargs]
class TopOfmUpdateLambda(symbasic):
''' A sympifi-able lambda function to lazily update topofm. '''
# pylint: disable=no-init
def __new__(cls, *args):
return super(PipelineSegment.TopOfmUpdateLambda, cls).__new__(cls)
def __call__(self, arg_s, arg_r):
setattr(arg_s, 'topofm', arg_r.scheme['to'][0])
def _lazify_topofm_symargs(self, symargs, symvals):
'''
Turn qualified topofm constraints into lazily updated rules.
If a symbol is only used as the topofm constraint by a single CONV
layer and some local-region layers, we can turn it into a lazily update
rule.
'''
sym2conv = {} # symbol --> the only CONV layer using it.
sym2lrs = {} # symbol --> list of local-region layer using it.
unqual_syms = set() # symbols used by two or more CONV layers.
for l, a in zip(itertools.chain.from_iterable(self.seg),
itertools.chain.from_iterable(symargs)):
layer = self.network[l]
if isinstance(layer, ConvLayer):
topofm = a.get('topofm', 0)
topifm = a.get('topifm', 0)
for s in symtuple(topofm, topifm).free_symbols:
if s not in unqual_syms:
if s in sym2conv:
# If a symbol is used in two CONV layers, it cannot
# be lazily updated.
del sym2conv[s]
sym2lrs.pop(s, [])
unqual_syms.add(s)
elif topofm == s:
assert s not in sym2lrs
sym2conv[s] = l
else:
topofm = a.get('topofm', 0)
if topofm in sym2conv:
sym2lrs.setdefault(topofm, []).append(l)
assert 0 not in sym2conv and 0 not in sym2lrs
syms = sym2conv.keys() # symbols to be lazily updated.
lr2conv = {} # local-region layer to the CONV layer constraining it.
for s in syms:
for lr in sym2lrs.get(s, []):
lr2conv[lr] = sym2conv[s]
lconvs = set(lr2conv.values()) # CONV layer whose topofm to be removed.
for l, a in zip(itertools.chain.from_iterable(self.seg),
itertools.chain.from_iterable(symargs)):
if l in lconvs:
# Remove CONV topofm.
assert sym2conv[a['topofm']] == l
del a['topofm']
elif l in lr2conv:
# Link local-region layer to the CONV layer.
lconv = lr2conv[l]
assert sym2conv[a['topofm']] == lconv
del a['topofm']
a['update_dict'] = {
lconv: PipelineSegment.TopOfmUpdateLambda()}
for s in syms:
del symvals[s]
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
from oslo_config import cfg
from oslo_utils import timeutils
import requests
from six.moves import range
import webob
import webob.dec
import webob.exc
from nova.api import ec2
from nova import context
from nova import exception
from nova import test
from nova import wsgi
CONF = cfg.CONF
@webob.dec.wsgify
def conditional_forbid(req):
"""Helper wsgi app returns 403 if param 'die' is 1."""
if 'die' in req.params and req.params['die'] == '1':
raise webob.exc.HTTPForbidden()
return 'OK'
class LockoutTestCase(test.NoDBTestCase):
"""Test case for the Lockout middleware."""
def setUp(self):
super(LockoutTestCase, self).setUp()
timeutils.set_time_override()
self.lockout = ec2.Lockout(conditional_forbid)
def tearDown(self):
timeutils.clear_time_override()
super(LockoutTestCase, self).tearDown()
def _send_bad_attempts(self, access_key, num_attempts=1):
"""Fail x."""
for i in range(num_attempts):
req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
self.assertEqual(req.get_response(self.lockout).status_int, 403)
def _is_locked_out(self, access_key):
"""Sends a test request to see if key is locked out."""
req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
return (req.get_response(self.lockout).status_int == 403)
def test_lockout(self):
self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
def test_timeout(self):
self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test'))
def test_multiple_keys(self):
self._send_bad_attempts('test1', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
def test_window_timeout(self):
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
timeutils.advance_time_seconds(CONF.lockout_window * 60)
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
class ExecutorTestCase(test.NoDBTestCase):
def setUp(self):
super(ExecutorTestCase, self).setUp()
self.executor = ec2.Executor()
def _execute(self, invoke):
class Fake(object):
pass
fake_ec2_request = Fake()
fake_ec2_request.invoke = invoke
fake_wsgi_request = Fake()
fake_wsgi_request.environ = {
'nova.context': context.get_admin_context(),
'ec2.request': fake_ec2_request,
}
return self.executor(fake_wsgi_request)
def _extract_message(self, result):
tree = etree.fromstring(result.body)
return tree.findall('./Errors')[0].find('Error/Message').text
def _extract_code(self, result):
tree = etree.fromstring(result.body)
return tree.findall('./Errors')[0].find('Error/Code').text
def test_instance_not_found(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=5)
result = self._execute(not_found)
self.assertIn('i-00000005', self._extract_message(result))
self.assertEqual('InvalidInstanceID.NotFound',
self._extract_code(result))
def test_instance_not_found_none(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=None)
# NOTE(mikal): we want no exception to be raised here, which was what
# was happening in bug/1080406
result = self._execute(not_found)
self.assertIn('None', self._extract_message(result))
self.assertEqual('InvalidInstanceID.NotFound',
self._extract_code(result))
def test_snapshot_not_found(self):
def not_found(context):
raise exception.SnapshotNotFound(snapshot_id=5)
result = self._execute(not_found)
self.assertIn('snap-00000005', self._extract_message(result))
self.assertEqual('InvalidSnapshot.NotFound',
self._extract_code(result))
def test_volume_not_found(self):
def not_found(context):
raise exception.VolumeNotFound(volume_id=5)
result = self._execute(not_found)
self.assertIn('vol-00000005', self._extract_message(result))
self.assertEqual('InvalidVolume.NotFound', self._extract_code(result))
def test_floating_ip_bad_create_request(self):
def bad_request(context):
raise exception.FloatingIpBadRequest()
result = self._execute(bad_request)
self.assertIn('BadRequest', self._extract_message(result))
self.assertEqual('UnsupportedOperation', self._extract_code(result))
class FakeResponse(object):
reason = "Test Reason"
def __init__(self, status_code=400):
self.status_code = status_code
def json(self):
return {}
class KeystoneAuthTestCase(test.NoDBTestCase):
def setUp(self):
super(KeystoneAuthTestCase, self).setUp()
self.kauth = ec2.EC2KeystoneAuth(conditional_forbid)
def _validate_ec2_error(self, response, http_status, ec2_code):
self.assertEqual(response.status_code, http_status,
'Expected HTTP status %s' % http_status)
root_e = etree.XML(response.body)
self.assertEqual(root_e.tag, 'Response',
"Top element must be Response.")
errors_e = root_e.find('Errors')
error_e = errors_e[0]
code_e = error_e.find('Code')
self.assertIsNotNone(code_e, "Code element must be present.")
self.assertEqual(code_e.text, ec2_code)
def test_no_signature(self):
req = wsgi.Request.blank('/test')
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
def test_no_key_id(self):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
@mock.patch.object(requests, 'request', return_value=FakeResponse())
def test_communication_failure(self, mock_request):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
req.GET['AWSAccessKeyId'] = 'test-key-id'
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
mock_request.assert_called_with('POST', CONF.keystone_ec2_url,
data=mock.ANY, headers=mock.ANY,
verify=mock.ANY, cert=mock.ANY)
@mock.patch.object(requests, 'request', return_value=FakeResponse(200))
def test_no_result_data(self, mock_request):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
req.GET['AWSAccessKeyId'] = 'test-key-id'
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
mock_request.assert_called_with('POST', CONF.keystone_ec2_url,
data=mock.ANY, headers=mock.ANY,
verify=mock.ANY, cert=mock.ANY)
|
|
#!/usr/bin/env python3
import pytest
import aiospamc
from aiospamc.frontend import (
check,
headers,
process,
ping,
report,
report_if_spam,
symbols,
tell,
)
from aiospamc.exceptions import (
BadResponse,
UsageException,
DataErrorException,
NoInputException,
NoUserException,
NoHostException,
UnavailableException,
InternalSoftwareException,
OSErrorException,
OSFileException,
CantCreateException,
IOErrorException,
TemporaryFailureException,
ProtocolException,
NoPermissionException,
ConfigException,
ServerTimeoutException,
ResponseException,
)
from aiospamc.client import Client
from aiospamc.incremental_parser import ResponseParser
from aiospamc.options import ActionOption, MessageClassOption
from aiospamc.responses import Response
@pytest.mark.parametrize(
"func,expected_verb",
[
(check, "CHECK"),
(headers, "HEADERS"),
(process, "PROCESS"),
(report, "REPORT"),
(report_if_spam, "REPORT_IFSPAM"),
(symbols, "SYMBOLS"),
],
)
async def test_functions_with_default_parameters(
func, expected_verb, mock_client_dependency, spam, mocker
):
req_spy = mocker.spy(mock_client_dependency, "request")
await func(spam, client=mock_client_dependency)
req = req_spy.await_args[0][0]
assert expected_verb == req.verb
assert "User" not in req.headers
assert "Compress" not in req.headers
assert spam == req.body
@pytest.mark.parametrize(
"func,expected_verb",
[
(check, "CHECK"),
(headers, "HEADERS"),
(process, "PROCESS"),
(report, "REPORT"),
(report_if_spam, "REPORT_IFSPAM"),
(symbols, "SYMBOLS"),
],
)
async def test_functions_with_optional_parameters(
func, expected_verb, mock_client_dependency, spam, mocker
):
req_spy = mocker.spy(mock_client_dependency, "request")
await func(spam, user="testuser", compress=True, client=mock_client_dependency)
req = req_spy.await_args[0][0]
assert expected_verb == req.verb
assert "testuser" == req.headers["User"].name
assert "zlib" == req.headers["Compress"].algorithm
assert spam == req.body
@pytest.mark.parametrize(
"func",
[
check,
headers,
process,
report,
report_if_spam,
symbols,
],
)
async def test_functions_returns_response(func, mock_client_dependency, spam, mocker):
req_spy = mocker.spy(mock_client_dependency, "request")
result = await func(spam, client=mock_client_dependency)
assert req_spy.spy_return is result
async def test_ping_request_with_parameters(mock_client_dependency, mocker):
req_spy = mocker.spy(mock_client_dependency, "request")
await ping(client=mock_client_dependency)
req = req_spy.await_args[0][0]
assert "PING" == req.verb
assert "User" not in req.headers
async def test_ping_returns_response(mock_client_dependency, mocker):
req_spy = mocker.spy(mock_client_dependency, "request")
result = await ping(client=mock_client_dependency)
assert req_spy.spy_return is result
async def test_tell_request_with_default_parameters(
mock_client_dependency, spam, mocker
):
req_spy = mocker.spy(mock_client_dependency, "request")
await tell(spam, MessageClassOption.spam, client=mock_client_dependency)
req = req_spy.await_args[0][0]
assert "TELL" == req.verb
assert "User" not in req.headers
assert "Compress" not in req.headers
assert MessageClassOption.spam == req.headers["Message-class"].value
assert spam == req.body
async def test_tell_request_with_optional_parameters(
mock_client_dependency, spam, mocker
):
req_spy = mocker.spy(mock_client_dependency, "request")
await tell(
spam,
MessageClassOption.spam,
set_action=ActionOption(local=True, remote=True),
remove_action=ActionOption(local=True, remote=True),
user="testuser",
compress=True,
client=mock_client_dependency,
)
req = req_spy.await_args[0][0]
assert "TELL" == req.verb
assert "testuser" == req.headers["User"].name
assert "zlib" == req.headers["Compress"].algorithm
assert MessageClassOption.spam == req.headers["Message-class"].value
assert ActionOption(local=True, remote=True) == req.headers["Set"].action
assert ActionOption(local=True, remote=True) == req.headers["Remove"].action
assert spam == req.body
async def test_tell_returns_response(mock_client_dependency, spam, mocker):
req_spy = mocker.spy(mock_client_dependency, "request")
result = await tell(spam, MessageClassOption.spam, client=mock_client_dependency)
assert req_spy.spy_return is result
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_bad_response(
func, mock_client_response, response_invalid, mocker
):
mock_client = mock_client_response(response_invalid)
with pytest.raises(BadResponse):
await func(mocker.MagicMock(), client=mock_client)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_usage(func, mock_client_response, mocker, ex_usage):
mock_client = mock_client_response(ex_usage)
with pytest.raises(UsageException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_data_err(func, mock_client_response, mocker, ex_data_err):
mock_client = mock_client_response(ex_data_err)
with pytest.raises(DataErrorException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_no_input(func, mock_client_response, mocker, ex_no_input):
mock_client = mock_client_response(ex_no_input)
with pytest.raises(NoInputException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_no_user(func, mock_client_response, mocker, ex_no_user):
mock_client = mock_client_response(ex_no_user)
with pytest.raises(NoUserException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_no_host(func, mock_client_response, mocker, ex_no_host):
mock_client = mock_client_response(ex_no_host)
with pytest.raises(NoHostException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_unavailable(func, mock_client_response, mocker, ex_unavailable):
mock_client = mock_client_response(ex_unavailable)
with pytest.raises(UnavailableException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_software(func, mock_client_response, mocker, ex_software):
mock_client = mock_client_response(ex_software)
with pytest.raises(InternalSoftwareException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_os_error(func, mock_client_response, mocker, ex_os_err):
mock_client = mock_client_response(ex_os_err)
with pytest.raises(OSErrorException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_os_file(func, mock_client_response, mocker, ex_os_file):
mock_client = mock_client_response(ex_os_file)
with pytest.raises(OSFileException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_cant_create(func, mock_client_response, mocker, ex_cant_create):
mock_client = mock_client_response(ex_cant_create)
with pytest.raises(CantCreateException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_io_error(func, mock_client_response, mocker, ex_io_err):
mock_client = mock_client_response(ex_io_err)
with pytest.raises(IOErrorException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_temporary_failure(
func, mock_client_response, mocker, ex_temp_fail
):
mock_client = mock_client_response(ex_temp_fail)
with pytest.raises(TemporaryFailureException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_protocol(func, mock_client_response, mocker, ex_protocol):
mock_client = mock_client_response(ex_protocol)
with pytest.raises(ProtocolException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_no_permission(func, mock_client_response, mocker, ex_no_perm):
mock_client = mock_client_response(ex_no_perm)
with pytest.raises(NoPermissionException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_config(func, mock_client_response, mocker, ex_config):
mock_client = mock_client_response(ex_config)
with pytest.raises(ConfigException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_timeout(func, mock_client_response, mocker, ex_timeout):
mock_client = mock_client_response(ex_timeout)
with pytest.raises(ServerTimeoutException):
await func(
mocker.MagicMock(),
client=mock_client,
)
@pytest.mark.parametrize(
"func", [check, headers, process, report, report_if_spam, symbols]
)
async def test_raises_undefined(func, mock_client_response, mocker, ex_undefined):
mock_client = mock_client_response(ex_undefined)
with pytest.raises(ResponseException):
await func(
mocker.MagicMock(),
client=mock_client,
)
async def test_ping_raises_usage(mock_client_response, ex_usage):
mock_client = mock_client_response(ex_usage)
with pytest.raises(UsageException):
await ping(client=mock_client)
async def test_ping_raises_data_err(mock_client_response, ex_data_err):
mock_client = mock_client_response(ex_data_err)
with pytest.raises(DataErrorException):
await ping(client=mock_client)
async def test_ping_raises_no_input(mock_client_response, ex_no_input):
mock_client = mock_client_response(ex_no_input)
with pytest.raises(NoInputException):
await ping(client=mock_client)
async def test_ping_raises_no_user(mock_client_response, ex_no_user):
mock_client = mock_client_response(ex_no_user)
with pytest.raises(NoUserException):
await ping(client=mock_client)
async def test_ping_raises_no_host(mock_client_response, ex_no_host):
mock_client = mock_client_response(ex_no_host)
with pytest.raises(NoHostException):
await ping(client=mock_client)
async def test_ping_raises_unavailable(mock_client_response, ex_unavailable):
mock_client = mock_client_response(ex_unavailable)
with pytest.raises(UnavailableException):
await ping(client=mock_client)
async def test_ping_raises_software(mock_client_response, ex_software):
mock_client = mock_client_response(ex_software)
with pytest.raises(InternalSoftwareException):
await ping(client=mock_client)
async def test_ping_raises_os_error(mock_client_response, ex_os_err):
mock_client = mock_client_response(ex_os_err)
with pytest.raises(OSErrorException):
await ping(client=mock_client)
async def test_ping_raises_os_file(mock_client_response, ex_os_file):
mock_client = mock_client_response(ex_os_file)
with pytest.raises(OSFileException):
await ping(client=mock_client)
async def test_ping_raises_cant_create(mock_client_response, ex_cant_create):
mock_client = mock_client_response(ex_cant_create)
with pytest.raises(CantCreateException):
await ping(client=mock_client)
async def test_ping_raises_io_error(mock_client_response, ex_io_err):
mock_client = mock_client_response(ex_io_err)
with pytest.raises(IOErrorException):
await ping(client=mock_client)
async def test_ping_raises_temporary_failure(mock_client_response, ex_temp_fail):
mock_client = mock_client_response(ex_temp_fail)
with pytest.raises(TemporaryFailureException):
await ping(client=mock_client)
async def test_ping_raises_protocol(mock_client_response, ex_protocol):
mock_client = mock_client_response(ex_protocol)
with pytest.raises(ProtocolException):
await ping(client=mock_client)
async def test_ping_raises_no_permission(mock_client_response, ex_no_perm):
mock_client = mock_client_response(ex_no_perm)
with pytest.raises(NoPermissionException):
await ping(client=mock_client)
async def test_ping_raises_config(mock_client_response, ex_config):
mock_client = mock_client_response(ex_config)
with pytest.raises(ConfigException):
await ping(client=mock_client)
async def test_ping_raises_timeout(mock_client_response, ex_timeout):
mock_client = mock_client_response(ex_timeout)
with pytest.raises(ServerTimeoutException):
await ping(client=mock_client)
async def test_ping_raises_undefined(mock_client_response, ex_undefined):
mock_client = mock_client_response(ex_undefined)
with pytest.raises(ResponseException):
await ping(client=mock_client)
async def test_tell_raises_usage(mock_client_response, mocker, ex_usage):
mock_client = mock_client_response(ex_usage)
with pytest.raises(UsageException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_data_err(mock_client_response, mocker, ex_data_err):
mock_client = mock_client_response(ex_data_err)
with pytest.raises(DataErrorException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_no_input(mock_client_response, mocker, ex_no_input):
mock_client = mock_client_response(ex_no_input)
with pytest.raises(NoInputException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_no_user(mock_client_response, mocker, ex_no_user):
mock_client = mock_client_response(ex_no_user)
with pytest.raises(NoUserException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_no_host(mock_client_response, mocker, ex_no_host):
mock_client = mock_client_response(ex_no_host)
with pytest.raises(NoHostException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_unavailable(mock_client_response, mocker, ex_unavailable):
mock_client = mock_client_response(ex_unavailable)
with pytest.raises(UnavailableException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_software(mock_client_response, mocker, ex_software):
mock_client = mock_client_response(ex_software)
with pytest.raises(InternalSoftwareException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_os_error(mock_client_response, mocker, ex_os_err):
mock_client = mock_client_response(ex_os_err)
with pytest.raises(OSErrorException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_os_file(mock_client_response, mocker, ex_os_file):
mock_client = mock_client_response(ex_os_file)
with pytest.raises(OSFileException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_cant_create(mock_client_response, mocker, ex_cant_create):
mock_client = mock_client_response(ex_cant_create)
with pytest.raises(CantCreateException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_io_error(mock_client_response, mocker, ex_io_err):
mock_client = mock_client_response(ex_io_err)
with pytest.raises(IOErrorException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_temporary_failure(
mock_client_response, mocker, ex_temp_fail
):
mock_client = mock_client_response(ex_temp_fail)
with pytest.raises(TemporaryFailureException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_protocol(mock_client_response, mocker, ex_protocol):
mock_client = mock_client_response(ex_protocol)
with pytest.raises(ProtocolException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_no_permission(mock_client_response, mocker, ex_no_perm):
mock_client = mock_client_response(ex_no_perm)
with pytest.raises(NoPermissionException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_config(mock_client_response, mocker, ex_config):
mock_client = mock_client_response(ex_config)
with pytest.raises(ConfigException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_timeout(mock_client_response, mocker, ex_timeout):
mock_client = mock_client_response(ex_timeout)
with pytest.raises(ServerTimeoutException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
async def test_tell_raises_undefined(mock_client_response, mocker, ex_undefined):
mock_client = mock_client_response(ex_undefined)
with pytest.raises(ResponseException):
await tell(
mocker.MagicMock(),
MessageClassOption.spam,
client=mock_client,
)
|
|
#!python3
import io
import re
from enum import Enum
import editor
import console
from blackmamba.ide.annotation import Annotation, Style
from itertools import groupby
from blackmamba.config import get_config_value
import blackmamba.ide.tab as tab
import os
import blackmamba.log as log
def _hud_alert_delay():
return get_config_value('analyzer.hud_alert_delay', 1.0)
def _ignore_codes():
return get_config_value('analyzer.ignore_codes', ['W391', 'W293'])
def _max_line_length():
return get_config_value('analyzer.max_line_length', 79)
def _remove_whitespaces():
return get_config_value('analyzer.remove_whitespaces', True)
_REMOVE_TRAILING_WHITESPACES_REGEX = re.compile(r'[ \t]+$', re.MULTILINE)
_REMOVE_TRAILING_LINES_REGEX = re.compile(r'\s+\Z', re.MULTILINE)
#
# Common for pep8 & pyflakes
#
class _Source(Enum):
pep8 = 'PEP8'
pyflakes = 'pyflakes'
flake8 = 'flake8'
class _AnalyzerAnnotation(Annotation):
def __init__(self, line, text, source, style):
super().__init__(line, text, style)
self.source = source
def __lt__(self, other):
if self.source is _Source.pep8 and other.source is _Source.pyflakes:
return True
if self.source is _Source.flake8 and other.source is not _Source.flake8:
return True
if self.style is Style.warning and other.style is Style.error:
return True
return False
#
# pep8
#
def _pep8_annotations(text, ignore=None, max_line_length=None):
import pep8
class _Pep8AnnotationReport(pep8.BaseReport):
def __init__(self, options):
super().__init__(options)
self.annotations = []
def error(self, line_number, offset, text, check):
# If super doesn't return code, this one is ignored
if not super().error(line_number, offset, text, check):
return
annotation = _AnalyzerAnnotation(self.line_offset + line_number, text, _Source.pep8, Style.warning)
self.annotations.append(annotation)
# pep8 requires you to include \n at the end of lines
lines = text.splitlines(True)
style_guide = pep8.StyleGuide(reporter=_Pep8AnnotationReport, )
options = style_guide.options
if ignore:
options.ignore = tuple(ignore)
else:
options.ignore = tuple()
if max_line_length:
options.max_line_length = max_line_length
checker = pep8.Checker(None, lines, options, None)
checker.check_all()
return checker.report.annotations
#
# pyflakes
#
_LINE_COL_MESSAGE_REGEX = re.compile(r'^(\d+):(\d+): (.*)$')
_LINE_MESSAGE_REGEX = re.compile(r'^(\d+): (.*)$')
def _get_annotations(path, stream, style):
path_len = len(path)
annotations = []
for line in stream.getvalue().splitlines():
if not line.startswith(path):
continue
line = line[(path_len + 1):] # Strip 'filename:'
match = _LINE_COL_MESSAGE_REGEX.fullmatch(line)
if not match:
match = _LINE_MESSAGE_REGEX.fullmatch(line)
if not match:
continue
line = int(match.group(1))
if match.lastindex == 3:
annotation = _AnalyzerAnnotation(
line, 'Col {}: {}'.format(match.group(2), match.group(3)),
_Source.pyflakes, style
)
else:
annotation = _AnalyzerAnnotation(
line, match.group(2),
_Source.pyflakes, style
)
annotations.append(annotation)
return annotations
def _pyflakes_annotations(path, text):
import pyflakes.api as pyflakes
warning_stream = io.StringIO()
error_stream = io.StringIO()
reporter = pyflakes.modReporter.Reporter(warning_stream, error_stream)
pyflakes.check(text, path, reporter)
warnings = _get_annotations(path, warning_stream, Style.warning)
errors = _get_annotations(path, error_stream, Style.error)
return warnings + errors
#
# flake8
#
def _parse_flake8_output(path, output_path):
path_len = len(path)
annotations = []
with open(output_path, 'r') as output:
report = output.read()
for line in report.splitlines():
if not line.startswith(path):
continue
line = line[(path_len + 1):] # Strip 'filename:'
match = _LINE_COL_MESSAGE_REGEX.fullmatch(line)
if not match:
match = _LINE_MESSAGE_REGEX.fullmatch(line)
if not match:
continue
line = int(match.group(1))
def get_style(message):
return Style.warning if message.startswith('W') else Style.error
if match.lastindex == 3:
annotation = _AnalyzerAnnotation(
line, 'Col {}: {}'.format(match.group(2), match.group(3)),
_Source.flake8, get_style(match.group(3))
)
else:
annotation = _AnalyzerAnnotation(
line, match.group(2),
_Source.flake8, get_style(match.group(2))
)
annotations.append(annotation)
return annotations
def _flake8_annotations(path, options):
import os
_tmp = os.environ.get('TMPDIR', os.environ.get('TMP'))
_output_file = os.path.join(_tmp, 'blackmamba.flake8.txt')
annotations = []
for o in options:
try:
from flake8.main import application
if os.path.exists(_output_file):
os.remove(_output_file)
o = list(o)
o.insert(0, path)
o.extend([
'-j', '0', # Disable subprocess
'--output-file={}'.format(_output_file)
])
app = application.Application()
app.run(o)
del app
annotations.extend(_parse_flake8_output(path, _output_file))
except Exception as e:
log.error('flake8 failed: {}'.format(str(e)))
if os.path.exists(_output_file):
os.remove(_output_file)
return annotations
#
# main
#
def _annotate(line, annotations, scroll):
by_priority = sorted(annotations, reverse=True)
style = by_priority[0].style.value
text = ',\n'.join([a.text for a in by_priority])
editor.annotate_line(line, text, style, True, scroll=scroll)
def _remove_trailing_whitespaces(text):
return _REMOVE_TRAILING_WHITESPACES_REGEX.sub('', text)
def _remove_trailing_lines(text):
return _REMOVE_TRAILING_LINES_REGEX.sub('', text)
def _editor_text():
text = editor.get_text()
range_end = len(text)
if _remove_whitespaces():
text = _remove_trailing_whitespaces(text)
text = _remove_trailing_lines(text)
editor.replace_text(0, range_end, text)
tab.save()
# Pythonista is adding '\n' automatically, so, if we removed them
# all we have to simulate Pythonista behavior by adding '\n'
# for pyflakes & pep8 analysis
return text + '\n'
tab.save()
return text
def main():
path = editor.get_path()
if not path:
return
if not path.endswith('.py'):
return
editor.clear_annotations()
flake8_options = get_config_value('analyzer.flake8', None)
selection = editor.get_selection()
text = _editor_text()
if flake8_options:
annotations = _flake8_annotations(
os.path.abspath(path),
flake8_options
)
else:
annotations = _pep8_annotations(
text,
ignore=_ignore_codes(),
max_line_length=_max_line_length()
)
annotations += _pyflakes_annotations(path, text)
if not annotations:
if selection:
editor.set_selection(selection[0], scroll=True)
console.hud_alert('No Issues Found', 'iob:checkmark_32', _hud_alert_delay())
return None
scroll = True
by_line = sorted(annotations, key=lambda x: x.line)
for l, a in groupby(by_line, lambda x: x.line):
_annotate(l, a, scroll)
scroll = False
if __name__ == '__main__':
from blackmamba.bundle import bundle
with bundle('analyze'):
main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and creates session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_mod
class SessionManager(object):
"""Training helper that restores from checkpoint and creates session.
This class is a small wrapper that takes care of session creation and
checkpoint recovery. It also provides functions that to facilitate
coordination among multiple training threads or processes.
* Checkpointing trained variables as the training progresses.
* Initializing variables on startup, restoring them from the most recent
checkpoint after a crash, or wait for checkpoints to become available.
### Usage:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will checkpoint the model in '/tmp/mydir'.
sm = SessionManager()
sess = sm.prepare_session(master, init_op, saver, checkpoint_dir)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`prepare_session()` initializes or restores a model. It requires `init_op`
and `saver` as an argument.
A second process could wait for the model to be ready by doing the following:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will wait for the model to become ready.
sm = SessionManager()
sess = sm.wait_for_session(master)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`wait_for_session()` waits for a model to be initialized by other processes.
"""
# Protects _TENSORFLOW_LAUNCHED
_launch_lock = threading.Lock()
# True if we have already launched the tensorflow in-process server.
_TENSORFLOW_LAUNCHED = False
def __init__(self, local_init_op=None, ready_op=None,
graph=None, recovery_wait_secs=30):
"""Creates a SessionManager.
The `local_init_op` is an `Operation` that is run always after a new session
was created. If `None`, this step is skipped.
The `ready_op` is an `Operation`. The model is considered ready
if that operation succeeds. If `None`, the model is not checked
for readiness.
`recovery_wait_secs` is the number of seconds between checks that
the model is ready. It is used by processes to wait for a model to
be initialized or restored. Defaults to 30 seconds.
Args:
local_init_op: An `Operation` run immediately after session creation.
Usually used to initialize tables and local variables.
ready_op: An `Operation` to check if the model is initialized.
graph: The `Graph` that the model will use.
recovery_wait_secs: Seconds between checks for the model to be ready.
"""
# Sets default values of arguments.
if graph is None:
graph = ops.get_default_graph()
self._local_init_op = local_init_op
self._ready_op = ready_op
self._graph = graph
self._recovery_wait_secs = recovery_wait_secs
self._target = None
def prepare_session(self, master, init_op=None, saver=None,
checkpoint_dir=None, wait_for_checkpoint=False,
max_wait_secs=7200, config=None, init_feed_dict=None,
init_fn=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
either running the provided `init_op`, or calling the provided `init_fn`.
It is an error if the model cannot be recovered and neither an `init_op`
or an `init_fn` are passed.
This is a convenient function for the following, with a few error checks
added:
```python
sess, initialized = self.recover_session(master)
if not initialized:
if init_op:
sess.run(init_op, feed_dict=init_feed_dict)
if init_fn;
init_fn(sess)
return sess
```
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
"""
sess, initialized = self.recover_session(
master, saver, checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config)
if not initialized:
if not init_op and not init_fn:
raise RuntimeError("Model is not initialized and no init_op or "
"init_fn was given")
if init_op:
sess.run(init_op, feed_dict=init_feed_dict)
if init_fn:
init_fn(sess)
not_ready = self._model_not_ready(sess)
if not_ready:
raise RuntimeError("Init operations did not make model ready. "
"Init op: %s, init fn: %s, error: %s"
% (init_op.name, init_fn, not_ready))
return sess
def recover_session(self, master, saver=None, checkpoint_dir=None,
wait_for_checkpoint=False, max_wait_secs=7200,
config=None):
"""Creates a `Session`, recovering if possible.
Creates a new session on 'master'. If the session is not initialized
and can be recovered from a checkpoint, recover it.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, initialized) where 'initialized' is `True` if
the session could be recovered, `False` otherwise.
"""
self._target = master
sess = session.Session(self._target, graph=self._graph, config=config)
if self._local_init_op:
sess.run([self._local_init_op])
# If either saver or checkpoint_dir is not specified, cannot restore. Just
# return.
if not saver or not checkpoint_dir:
not_ready = self._model_not_ready(sess)
return sess, not_ready is None
# Waits up until max_wait_secs for checkpoint to become available.
wait_time = 0
ckpt = saver_mod.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info("Waiting for checkpoint to be available.")
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = saver_mod.get_checkpoint_state(checkpoint_dir)
else:
return sess, False
# Loads the checkpoint and verifies that it makes the model ready.
saver.restore(sess, ckpt.model_checkpoint_path)
last_checkpoints = []
for fname in ckpt.all_model_checkpoint_paths:
fnames = gfile.Glob(fname)
if fnames:
mtime = gfile.Stat(fnames[0]).mtime
last_checkpoints.append((fname, mtime))
saver.set_last_checkpoints_with_time(last_checkpoints)
not_ready = self._model_not_ready(sess)
if not_ready:
logging.info("Restoring model from %s did not make model ready: %s",
ckpt.model_checkpoint_path, not_ready)
return sess, False
else:
logging.info("Restored model from %s", ckpt.model_checkpoint_path)
return sess, True
def wait_for_session(self, master, config=None, max_wait_secs=float("Inf")):
"""Creates a new `Session` and waits for model to be ready.
Creates a new `Session` on 'master'. Waits for the model to be
initialized or recovered from a checkpoint. It's expected that
another thread or process will make the model ready, and that this
is intended to be used by threads/processes that participate in a
distributed training configuration where a different thread/process
is responsible for initializing or recovering the model being trained.
NB: The amount of time this method waits for the session is bounded
by max_wait_secs. By default, this function will wait indefinitely.
Args:
master: `String` representation of the TensorFlow master to use.
config: Optional ConfigProto proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
Returns:
A `Session`. May be None if the operation exceeds the timeout
specified by config.operation_timeout_in_ms.
Raises:
tf.DeadlineExceededError: if the session is not available after
max_wait_secs.
"""
self._target = master
if max_wait_secs is None:
max_wait_secs = float("Inf")
timer = _CountDownTimer(max_wait_secs)
while True:
sess = session.Session(self._target, graph=self._graph, config=config)
if self._local_init_op:
sess.run([self._local_init_op])
not_ready = self._model_not_ready(sess)
if not not_ready:
return sess
self._safe_close(sess)
# Do we have enough time left to try again?
remaining_ms_after_wait = (
timer.secs_remaining() - self._recovery_wait_secs)
if remaining_ms_after_wait < 0:
raise errors.DeadlineExceededError(
None, None,
"Session was not ready after waiting %d secs." % (max_wait_secs,))
logging.info("Waiting for model to be ready: %s", not_ready)
time.sleep(self._recovery_wait_secs)
def _safe_close(self, sess):
"""Closes a session without raising an exception.
Just like sess.close() but ignores exceptions.
Args:
sess: A `Session`.
"""
# pylint: disable=broad-except
try:
sess.close()
except Exception:
# Intentionally not logging to avoid user complaints that
# they get cryptic errors. We really do not care that Close
# fails.
pass
# pylint: enable=broad-except
def _model_not_ready(self, sess):
"""Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
`None` if the model is ready, a `String` with the reason why it is not
ready otherwise.
"""
if self._ready_op is None:
return None
else:
try:
sess.run(self._ready_op)
return None
except errors.FailedPreconditionError as e:
if "uninitialized" not in str(e):
logging.warning("Model not ready raised: %s", str(e))
raise e
return str(e)
class _CountDownTimer(object):
def __init__(self, duration_secs):
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0, diff)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from pyqtgraph.flowchart.library.common import CtrlNode, PlottingCtrlNode
from pyqtgraph.graphicsItems.InfiniteLine import InfiniteLine
import numpy as np
import acq4.util.functions as functions
from six.moves import range
class Threshold(CtrlNode):
"""Absolute threshold detection filter. Returns indexes where data crosses threshold."""
nodeName = 'ThresholdDetect'
uiTemplate = [
('direction', 'combo', {'values': ['rising', 'falling'], 'index': 0}),
('threshold', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-12, 'dec': True, 'bounds': [None, None], 'siPrefix': True}),
]
def __init__(self, name, **opts):
CtrlNode.__init__(self, name, self.uiTemplate)
def processData(self, data):
s = self.stateGroup.state()
if s['direction'] == 'rising':
d = 1
else:
d = -1
return functions.threshold(data, s['threshold'], d)
class StdevThreshold(CtrlNode):
"""Relative threshold event detection. Finds regions in data greater than threshold*stdev.
Returns a record array with columns: index, length, sum, peak.
This function is only useful for data with its baseline removed."""
nodeName = 'StdevThreshold'
uiTemplate = [
('threshold', 'spin', {'value': 0, 'step': 1, 'minStep': 0.1, 'dec': True, 'bounds': [None, None], 'siPrefix': True}),
]
def __init__(self, name, **opts):
CtrlNode.__init__(self, name, self.uiTemplate)
def processData(self, data):
s = self.stateGroup.state()
return functions.stdevThresholdEvents(data, s['threshold'])
class ZeroCrossingEvents(CtrlNode):
"""Detects events in a waveform by splitting the data up into chunks separated by zero-crossings,
then keeping only the ones that meet certain criteria."""
nodeName = 'ZeroCrossing'
uiTemplate = [
('minLength', 'intSpin', {'value': 0, 'min': 0, 'max': 100000}),
('minSum', 'spin', {'value': 0, 'step': 1, 'minStep': 0.1, 'dec': True, 'bounds': [None, None], 'siPrefix': True}),
('minPeak', 'spin', {'value': 0, 'step': 1, 'minStep': 0.1, 'dec': True, 'bounds': [None, None], 'siPrefix': True}),
('eventLimit', 'intSpin', {'value': 400, 'min': 1, 'max': 1e9}),
]
def __init__(self, name, **opts):
CtrlNode.__init__(self, name, self.uiTemplate)
def processData(self, data):
s = self.stateGroup.state()
events = functions.zeroCrossingEvents(data, minLength=s['minLength'], minPeak=s['minPeak'], minSum=s['minSum'])
events = events[:s['eventLimit']]
return events
class ThresholdEvents(PlottingCtrlNode):
"""Detects regions of a waveform that cross a threshold (positive or negative) and returns the time, length, sum, and peak of each event."""
nodeName = 'ThresholdEvents'
uiTemplate = [
('baseline', 'spin', {'value':0, 'step':1, 'minStep': 0.1, 'dec': True, 'bounds': [None, None], 'siPrefix': True, 'tip': 'Blue line -- Set the baseline to measure the minPeak and threshold from'}),
('minPeak', 'spin', {'value': 0, 'step': 1, 'minStep': 0.1, 'dec': True, 'bounds': [None, None], 'siPrefix': True, 'tip': 'Yellow line -- Events must reach this far from baseline to be detected.'}),
('threshold', 'spin', {'value': 1e-12, 'step': 1, 'minStep': 0.1, 'dec': True, 'bounds': [None, None], 'siPrefix': True, 'tip': 'Green line -- Events are detected only if they cross this threshold (distance from baseline).'}),
('display', 'check', {'value':True, 'tip':'If checked display dragable lines for baseline, minPeak and threshold'}),
#('index', 'combo', {'values':['start','peak'], 'index':0}),
('minLength', 'intSpin', {'value': 0, 'min': 0, 'max': 1e9, 'tip': 'Events must contain this many samples to be detected.'}),
('minSum', 'spin', {'value': 0, 'step': 1, 'minStep': 0.1, 'dec': True, 'bounds': [None, None], 'siPrefix': True}),
('eventLimit', 'intSpin', {'value': 100, 'min': 1, 'max': 1e9, 'tip': 'Limits the number of events that may be detected in a single trace. This prevents runaway processes due to over-sensitive detection criteria.'}),
('deadTime', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'bounds': [0,None], 'siPrefix': True, 'suffix': 's', 'tip': 'Ignore events that occur too quickly following another event.'}),
('adjustTimes', 'check', {'value': True, 'tip': 'If False, then event times are reported where the trace crosses threshold. If True, the event time is adjusted to estimate when the trace would have crossed 0.'}),
('reverseTime', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'bounds': [0,None], 'siPrefix': True, 'suffix': 's', 'tip': 'Ignore events that 1) have the opposite sign of the event immediately prior and 2) occur within the given time window after the prior event. This is useful for ignoring rebound signals.'}),
]
def __init__(self, name, **opts):
PlottingCtrlNode.__init__(self, name, self.uiTemplate)
#self.plotTerminal = self.addOutput('plot', optional=True)
self.baseLine = InfiniteLine(angle=0, movable=True, pen='b')
self.minPeakLine = InfiniteLine(angle=0, movable=True, pen='y')
self.thresholdLine = InfiniteLine(angle=0, movable=True, pen='g')
self.lines = [self.baseLine, self.minPeakLine, self.thresholdLine]
self.ctrls['display'].toggled.connect(self.displayToggled)
self.ctrls['baseline'].sigValueChanged.connect(self.adjustBaseLine)
self.ctrls['threshold'].sigValueChanged.connect(self.adjustThresholdLine)
self.ctrls['minPeak'].sigValueChanged.connect(self.adjustPeakLine)
for line in self.lines:
line.sigPositionChangeFinished.connect(self.updateCtrlValues)
#self.remotePlot = None
def restoreState(self, state):
CtrlNode.restoreState(self, state)
for c in self.plotTerminal.connections():
#print c
p = c.node().getPlot()
for l in self.lines:
p.addItem(l)
self.baseLine.setPos(self.ctrls['baseline'].value())
self.minPeakLine.setPos(self.ctrls['minPeak'].value())
self.thresholdLine.setPos(self.ctrls['threshold'].value())
def displayToggled(self):
b = self.ctrls['display'].isChecked()
for item in self.lines:
item.setVisible(b)
def adjustBaseLine(self, sb):
#print "vlaue:", value
self.baseLine.setValue(sb.value())
def adjustThresholdLine(self, sb):
self.thresholdLine.setValue(sb.value()+self.baseLine.value())
def adjustPeakLine(self, sb):
self.minPeakLine.setValue(sb.value()+self.baseLine.value())
def updateCtrlValues(self, line):
self.ctrls['baseline'].setValue(self.baseLine.value())
self.ctrls['minPeak'].setValue(self.minPeakLine.value()-self.baseLine.value())
self.ctrls['threshold'].setValue(self.thresholdLine.value()-self.baseLine.value())
#def connected(self, term, remote):
#CtrlNode.connected(self, term, remote)
#if term is not self.plot:
#return
#node = remote.node()
#node.sigPlotChanged.connect(self.connectToPlot)
#self.connectToPlot(node)
def connectToPlot(self, node):
#if self.remotePlot is not None:
# self.remotePlot = None
if node.plot is None:
return
for l in self.lines:
node.getPlot().addItem(l)
#def disconnected(self, term, remote):
#CtrlNode.disconnected(self, term, remote)
#if term is not self.plot:
#return
#remote.node().sigPlotChanged.disconnect(self.connectToPlot)
#self.disconnectFromPlot(remote.node().getPlot())
def disconnectFromPlot(self, plot):
#if self.remotePlot is None:
# return
for l in self.lines:
plot.removeItem(l)
def processData(self, data):
s = self.stateGroup.state()
#print "==== Threshold Events ====="
#print " baseline:", s['baseline']
events = functions.thresholdEvents(data, s['threshold'], s['adjustTimes'], baseline=s['baseline'])
## apply first round of filters
mask = events['len'] >= s['minLength']
mask *= abs(events['sum']) >= s['minSum']
mask *= abs(events['peak']) >= abs(s['minPeak'])
events = events[mask]
## apply deadtime filter
mask = np.ones(len(events), dtype=bool)
last = 0
dt = s['deadTime']
rt = s['reverseTime']
for i in range(1, len(events)):
tdiff = events[i]['time'] - events[last]['time']
if tdiff < dt: ## check dead time
mask[i] = False
elif tdiff < rt and (events[i]['peak'] * events[last]['peak'] < 0): ## check reverse time
mask[i] = False
else:
last = i
#mask[1:] *= (events[1:]['time']-events[:-1]['time']) >= s['deadTime']
events = events[mask]
## limit number of events
events = events[:s['eventLimit']]
return events
class SpikeDetector(CtrlNode):
"""Very simple spike detector. Returns the indexes of sharp spikes by comparing each sample to its neighbors."""
nodeName = "SpikeDetect"
uiTemplate = [
('radius', 'intSpin', {'value': 1, 'min': 1, 'max': 100000}),
('minDiff', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-12, 'dec': True, 'siPrefix': True}),
]
def __init__(self, name, **opts):
CtrlNode.__init__(self, name, self.uiTemplate)
def processData(self, data):
s = self.stateGroup.state()
radius = s['radius']
d1 = data.view(np.ndarray)
d2 = d1[radius:] - d1[:-radius] #a derivative
mask1 = d2 > s['minDiff'] #where derivative is large and positive
mask2 = d2 < -s['minDiff'] #where derivative is large and negative
maskpos = mask1[:-radius] * mask2[radius:] #both need to be true
maskneg = mask1[radius:] * mask2[:-radius]
mask = maskpos + maskneg ## All regions that match criteria
## now reduce consecutive hits to a single hit.
hits = (mask[1:] - mask[:-1]) > 0
sHits = np.argwhere(hits)[:,0]+(radius+2)
## convert to record array with 'index' column
ret = np.empty(len(sHits), dtype=[('index', int), ('time', float)])
ret['index'] = sHits
ret['time'] = data.xvals('Time')[sHits]
return ret
def processBypassed(self, args):
return {'Out': np.empty(0, dtype=[('index', int), ('time', float)])}
class EventFilter(CtrlNode):
"""Selects events from a list based on various criteria"""
nodeName = "EventFilter"
uiTemplate = [
#('minLength', 'intSpin', {'value': 0, 'min': 0, 'max': 1e9}),
#('minSum', 'spin', {'value': 0, 'step': 1, 'minStep': 0.1, 'dec': True, 'range': [None, None], 'siPrefix': True}),
#('minPeak', 'spin', {'value': 0, 'step': 1, 'minStep': 0.1, 'dec': True, 'range': [None, None], 'siPrefix': True}),
#('eventLimit', 'intSpin', {'value': 100, 'min': 1, 'max': 1e9}),
#('deadTime', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'range': [0,None], 'siPrefix': True, 'suffix': 's'}),
('fitAmplitude', 'check', {'value': False}),
('minFitAmp', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-12, 'bounds': [None,None], 'siPrefix': True, 'hidden': True}),
('maxFitAmp', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-12, 'bounds': [None,None], 'siPrefix': True, 'hidden': True}),
('fitDecayTau', 'check', {'value': False}),
('minFitDecayTau', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'bounds': [None,None], 'siPrefix': True, 'suffix': 's', 'hidden': True}),
('maxFitDecayTau', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'bounds': [None,None], 'siPrefix': True, 'suffix': 's', 'hidden': True}),
('fitRiseTau', 'check', {'value': False}),
('minFitRiseTau', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'bounds': [None,None], 'siPrefix': True, 'suffix': 's', 'hidden': True}),
('maxFitRiseTau', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'bounds': [None,None], 'siPrefix': True, 'suffix': 's', 'hidden': True}),
('fitFractionalError', 'check', {'value': False}),
('minFitFractionalError', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-12, 'bounds': [None,None], 'siPrefix': True, 'hidden': True}),
('maxFitFractionalError', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-12, 'bounds': [None,None], 'siPrefix': True, 'hidden': True}),
('fitLengthOverDecay', 'check', {'value': False}),
('minFitLengthOverDecay', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-12, 'bounds': [None,None], 'siPrefix': True, 'hidden': True}),
('maxFitLengthOverDecay', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-12, 'bounds': [None,None], 'siPrefix': True, 'hidden': True}),
('fitTime', 'check', {'value': False}),
('minFitTime', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'bounds': [None,None], 'siPrefix': True, 'suffix': 's', 'hidden': True}),
('maxFitTime', 'spin', {'value': 0, 'step': 1, 'minStep': 1e-4, 'bounds': [None,None], 'siPrefix': True, 'suffix': 's', 'hidden': True}),
('region', 'combo', {'values': ['all']}),
]
ranges = [
('fitAmplitude', 'minFitAmp', 'maxFitAmp'),
('fitDecayTau', 'minFitDecayTau', 'maxFitDecayTau'),
('fitRiseTau', 'minFitRiseTau', 'maxFitRiseTau'),
('fitFractionalError', 'minFitFractionalError', 'maxFitFractionalError'),
('fitLengthOverDecay', 'minFitLengthOverDecay', 'maxFitLengthOverDecay'),
('fitTime', 'minFitTime', 'maxFitTime'),
]
def __init__(self, name):
CtrlNode.__init__(self, name, terminals={
'events': {'io': 'in'},
'regions': {'io': 'in'},
'output': {'io': 'out', 'bypass': 'events'}})
for check, spin1, spin2 in self.ranges:
self.ctrls[check].toggled.connect(self.checkToggled)
#self.updateRegions()
def updateRegions(self, regions):
regCombo = self.ctrls['region']
### first check length of comboLists and update if they do not match -- avoids index errors in check of individual items below
if regCombo.count() != len(regions):
regCombo.clear()
regCombo.addItems(regions)
return
### check individual items in the list
test = []
for i in range(regCombo.count()):
test.append(regCombo.itemText(i) == regions[i])
if False not in test:
return
else:
regCombo.clear()
regCombo.addItems(regions)
return
def updateUi(self):
pass
def checkToggled(self):
#s = self.stateGroup.state()
for name, a, b in self.ranges:
if self.ctrls[name].isChecked():
self.showRow(a)
self.showRow(b)
else:
self.hideRow(a)
self.hideRow(b)
def process(self, events, regions=None, display=True):
s = self.stateGroup.state()
data=events
mask = np.ones(len(data), dtype=bool)
if display:
newReg = ['all']
if regions is None:
regions = {}
for r in regions.keys():
newReg.append(r.node().name())
self.updateRegions(newReg)
for b, mn, mx in self.ranges:
if s[b]:
try:
mask *= data[b] < s[mx]
mask *= data[b] > s[mn]
except ValueError: ## If the data doesn't have this key, don't fret; just ignore it.
pass
#print " filter 1:", mask.sum()
region = s['region']
if region != 'all':
mask *= data['region'] == region
#print " filter 2:", mask.sum(), region
#print " filter 3:", len(data[mask])
return {'output':data[mask]}
|
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
new_script_process = {'env': env_map}
ctx.logger.info('Operation is executed with inputs {0}'.format(inputs))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
new_script_process['env'].update(inputs['process']['env'])
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('artifacts/php-type/scripts/install_php.sh'), new_script_process, operationOutputNames)
for k,v in parsed_output['outputs'].items():
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:create:{0}'.format(k)] = v
ctx.instance.update()
|
|
# -*- coding: utf-8 -*-
"""Constants for PyBEL.
This module maintains the strings used throughout the PyBEL codebase to promote consistency.
"""
from .config import connection
def get_cache_connection() -> str:
"""Get the preferred RFC-1738 database connection string.
1. Check the environment variable ``PYBEL_CONNECTION``
2. Check the ``PYBEL_CONNECTION`` key in the config file ``~/.config/pybel/config.json``. Optionally, this config
file might be in a different place if the environment variable ``PYBEL_CONFIG_DIRECTORY`` has been set.
3. Return a default connection string using a SQLite database in the ``~/.pybel``. Optionally, this directory
might be in a different place if the environment variable ``PYBEL_RESOURCE_DIRECTORY`` has been set.
"""
return connection
PYBEL_CONTEXT_TAG = "pybel_context"
PYBEL_AUTOEVIDENCE = "Automatically added by PyBEL"
CITATION_TYPE_BOOK = "book"
CITATION_TYPE_PUBMED = "pubmed"
CITATION_TYPE_PMC = "pmc"
CITATION_TYPE_URL = "url"
CITATION_TYPE_DOI = "doi"
CITATION_TYPE_OTHER = "other"
CITATION_TYPES = {
CITATION_TYPE_BOOK,
CITATION_TYPE_PUBMED,
CITATION_TYPE_PMC,
CITATION_TYPE_URL,
CITATION_TYPE_DOI,
CITATION_TYPE_OTHER,
}
CITATION_NORMALIZER = {
"pubmed central": "pmc",
"pmid": "pubmed",
"online resource": "url",
}
NAMESPACE_DOMAIN_BIOPROCESS = "BiologicalProcess"
NAMESPACE_DOMAIN_CHEMICAL = "Chemical"
NAMESPACE_DOMAIN_GENE = "Gene and Gene Products"
NAMESPACE_DOMAIN_OTHER = "Other"
#: The valid namespace types
#: .. seealso:: https://wiki.openbel.org/display/BELNA/Custom+Namespaces
NAMESPACE_DOMAIN_TYPES = {
NAMESPACE_DOMAIN_BIOPROCESS,
NAMESPACE_DOMAIN_CHEMICAL,
NAMESPACE_DOMAIN_GENE,
NAMESPACE_DOMAIN_OTHER,
}
#: Represents the key for the citation date in a citation dictionary
CITATION_DATE = "date"
#: Represents the key for the citation authors in a citation dictionary
CITATION_AUTHORS = "authors"
#: Represents the key for the citation comment in a citation dictionary
CITATION_JOURNAL = "journal"
#: Represents the key for the optional PyBEL citation volume entry in a citation dictionary
CITATION_VOLUME = "volume"
#: Represents the key for the optional PyBEL citation issue entry in a citation dictionary
CITATION_ISSUE = "issue"
#: Represents the key for the optional PyBEL citation pages entry in a citation dictionary
CITATION_PAGES = "pages"
#: Represents the key for the optional PyBEL citation first author entry in a citation dictionary
CITATION_FIRST_AUTHOR = "first"
#: Represents the key for the optional PyBEL citation last author entry in a citation dictionary
CITATION_LAST_AUTHOR = "last"
#: Represents the type of article (Journal Article, Review, etc.)
CITATION_ARTICLE_TYPE = "article_type"
# Used during BEL parsing
MODIFIER = "modifier"
EFFECT = "effect"
FROM_LOC = "fromLoc"
TO_LOC = "toLoc"
LOCATION = "location"
ACTIVITY = "Activity"
DEGRADATION = "Degradation"
TRANSLOCATION = "Translocation"
CELL_SECRETION = "CellSecretion"
CELL_SURFACE_EXPRESSION = "CellSurfaceExpression"
INTRACELLULAR = "intracellular"
EXTRACELLULAR = "extracellular space"
CELL_SURFACE = "cell surface"
# Internal node data format keys
#: The node data key specifying the node's function (e.g. :data:`GENE`, :data:`MIRNA`, :data:`BIOPROCESS`, etc.)
FUNCTION = "function"
#: The key specifying a concept
CONCEPT = "concept"
#: The key specifying an identifier dictionary's namespace. Used for nodes, activities, and transformations.
NAMESPACE = "namespace"
#: The key specifying an identifier dictionary's name. Used for nodes, activities, and transformations.
NAME = "name"
#: The key specifying an identifier dictionary
IDENTIFIER = "identifier"
#: The key specifying an optional label for the node
LABEL = "label"
#: The key specifying an optional description for the node
DESCRIPTION = "description"
#: The key specifying xrefs
XREFS = "xref"
#: They key representing the nodes that are a member of a composite or complex
MEMBERS = "members"
#: The key representing the nodes appearing in the reactant side of a biochemical reaction
REACTANTS = "reactants"
#: The key representing the nodes appearing in the product side of a biochemical reaction
PRODUCTS = "products"
#: The node data key specifying a fusion dictionary, containing :data:`PARTNER_3P`, :data:`PARTNER_5P`,
# :data:`RANGE_3P`, and :data:`RANGE_5P`
FUSION = "fusion"
#: The key specifying the identifier dictionary of the fusion's 3-Prime partner
PARTNER_3P = "partner_3p"
#: The key specifying the identifier dictionary of the fusion's 5-Prime partner
PARTNER_5P = "partner_5p"
#: The key specifying the range dictionary of the fusion's 3-Prime partner
RANGE_3P = "range_3p"
#: The key specifying the range dictionary of the fusion's 5-Prime partner
RANGE_5P = "range_5p"
FUSION_REFERENCE = "reference"
FUSION_START = "left"
FUSION_STOP = "right"
FUSION_MISSING = "missing"
#: The key specifying the node has a list of associated variants
VARIANTS = "variants"
#: The key representing what kind of variation is being represented
KIND = "kind"
#: The value for :data:`KIND` for an HGVS variant
HGVS = "hgvs"
#: The value for :data:`KIND` for a protein modification
PMOD = "pmod"
#: The value for :data:`KIND` for a gene modification
GMOD = "gmod"
#: The value for :data:`KIND` for a fragment
FRAGMENT = "frag"
#: The allowed values for :data:`KIND`
PYBEL_VARIANT_KINDS = {
HGVS,
PMOD,
GMOD,
FRAGMENT,
}
#: The group of all BEL-provided keys for node data dictionaries, used for hashing.
PYBEL_NODE_DATA_KEYS = {
FUNCTION,
NAMESPACE,
NAME,
IDENTIFIER,
VARIANTS,
FUSION,
MEMBERS,
REACTANTS,
PRODUCTS,
}
#: Used as a namespace when none is given when lenient parsing mode is turned on. Not recommended!
DIRTY = "dirty"
#: Represents the BEL abundance, abundance()
ABUNDANCE = "Abundance"
#: Represents the BEL abundance, geneAbundance()
#: .. seealso:: http://openbel.org/language/version_2.0/bel_specification_version_2.0.html#Xabundancea
GENE = "Gene"
#: Represents the BEL abundance, rnaAbundance()
RNA = "RNA"
#: Represents the BEL abundance, microRNAAbundance()
MIRNA = "miRNA"
#: Represents the BEL abundance, proteinAbundance()
PROTEIN = "Protein"
#: Represents the BEL function, biologicalProcess()
BIOPROCESS = "BiologicalProcess"
#: Represents the BEL function, pathology()
PATHOLOGY = "Pathology"
#: Represents the BEL function, populationAbundance()
POPULATION = "Population"
#: Represents the BEL abundance, compositeAbundance()
COMPOSITE = "Composite"
#: Represents the BEL abundance, complexAbundance()
COMPLEX = "Complex"
#: Represents the BEL transformation, reaction()
REACTION = "Reaction"
#: A set of all of the valid PyBEL node functions
PYBEL_NODE_FUNCTIONS = {
ABUNDANCE,
GENE,
RNA,
MIRNA,
PROTEIN,
BIOPROCESS,
PATHOLOGY,
COMPOSITE,
COMPLEX,
REACTION,
POPULATION,
}
#: The mapping from PyBEL node functions to BEL strings
rev_abundance_labels = {
ABUNDANCE: "a",
GENE: "g",
MIRNA: "m",
PROTEIN: "p",
RNA: "r",
BIOPROCESS: "bp",
PATHOLOGY: "path",
COMPLEX: "complex",
COMPOSITE: "composite",
POPULATION: "pop",
}
# Internal edge data keys
#: The key for an internal edge data dictionary for the relation string
RELATION = "relation"
#: The key for an internal edge data dictionary for the citation dictionary
CITATION = "citation"
CITATION_DB = NAMESPACE # for backwards compatibility
CITATION_IDENTIFIER = IDENTIFIER # for backwards compatibility
#: The key for an internal edge data dictionary for the evidence string
EVIDENCE = "evidence"
#: The key for an internal edge data dictionary for the annotations dictionary
ANNOTATIONS = "annotations"
SOURCE = "source"
SUBJECT = SOURCE # for backwards compatibility
TARGET = "target"
OBJECT = TARGET # for backwards compatibility
#: The key for an internal edge data dictionary for the source modifier dictionary
SOURCE_MODIFIER = "source_modifier"
#: The key for an internal edge data dictionary for the target modifier dictionary
TARGET_MODIFIER = "target_modifier"
#: The key or an internal edge data dictionary for the line number
LINE = "line"
#: The key representing the hash of the other
HASH = "hash"
#: The group of all BEL-provided keys for edge data dictionaries, used for hashing.
PYBEL_EDGE_DATA_KEYS = {
RELATION,
CITATION,
EVIDENCE,
ANNOTATIONS,
SOURCE_MODIFIER,
TARGET_MODIFIER,
}
#: The group of all PyBEL-specific keys for edge data dictionaries, not used for hashing.
PYBEL_EDGE_METADATA_KEYS = {
LINE,
HASH,
}
#: The group of all PyBEL annotated keys for edge data dictionaries
PYBEL_EDGE_ALL_KEYS = PYBEL_EDGE_DATA_KEYS | PYBEL_EDGE_METADATA_KEYS
#: A BEL relationship
HAS_REACTANT = "hasReactant"
#: A BEL relationship
HAS_PRODUCT = "hasProduct"
#: A BEL relationship
HAS_VARIANT = "hasVariant"
#: A BEL relationship
#: :data:`GENE` to :data:`RNA` is called transcription
TRANSCRIBED_TO = "transcribedTo"
#: A BEL relationship
#: :data:`RNA` to :data:`PROTEIN` is called translation
TRANSLATED_TO = "translatedTo"
#: A BEL relationship
INCREASES = "increases"
#: A BEL relationship
DIRECTLY_INCREASES = "directlyIncreases"
#: A BEL relationship
DECREASES = "decreases"
#: A BEL relationship
DIRECTLY_DECREASES = "directlyDecreases"
#: A BEL relationship
CAUSES_NO_CHANGE = "causesNoChange"
#: A BEL relationship
REGULATES = "regulates"
#: A BEL relationship
DIRECTLY_REGULATES = "directlyRegulates"
#: A BEL relationship
BINDS = "binds"
#: A BEL relationship
CORRELATION = "correlation"
#: A BEL relationship
NO_CORRELATION = "noCorrelation"
#: A BEL relationship
NEGATIVE_CORRELATION = "negativeCorrelation"
#: A BEL relationship
POSITIVE_CORRELATION = "positiveCorrelation"
#: A BEL relationship
ASSOCIATION = "association"
#: A BEL relationship
ORTHOLOGOUS = "orthologous"
#: A BEL relationship
ANALOGOUS_TO = "analogousTo"
#: A BEL relationship
IS_A = "isA"
#: A BEL relationship
RATE_LIMITING_STEP_OF = "rateLimitingStepOf"
#: A BEL relationship
SUBPROCESS_OF = "subProcessOf"
#: A BEL relationship
BIOMARKER_FOR = "biomarkerFor"
#: A BEL relationship
PROGONSTIC_BIOMARKER_FOR = "prognosticBiomarkerFor"
#: A BEL relationship, added by PyBEL
EQUIVALENT_TO = "equivalentTo"
#: A BEL relationship, added by PyBEL
PART_OF = "partOf"
#: A set of all causal relationships that have an increasing effect
CAUSAL_INCREASE_RELATIONS = {INCREASES, DIRECTLY_INCREASES}
#: A set of all causal relationships that have a decreasing effect
CAUSAL_DECREASE_RELATIONS = {DECREASES, DIRECTLY_DECREASES}
#: A set of all causal relationships that have an inderminate polarity
CAUSAL_APOLAR_RELATIONS = {REGULATES, DIRECTLY_REGULATES}
#: A set of direct causal relations
DIRECT_CAUSAL_RELATIONS = {DIRECTLY_DECREASES, DIRECTLY_INCREASES, DIRECTLY_REGULATES}
#: A set of direct causal relations
INDIRECT_CAUSAL_RELATIONS = {DECREASES, INCREASES, REGULATES}
#: A set of causal relationships that are polar
CAUSAL_POLAR_RELATIONS = CAUSAL_INCREASE_RELATIONS | CAUSAL_DECREASE_RELATIONS
#: A set of all causal relationships
CAUSAL_RELATIONS = CAUSAL_INCREASE_RELATIONS | CAUSAL_DECREASE_RELATIONS | CAUSAL_APOLAR_RELATIONS
APOLAR_CORRELATIVE_RELATIONS = {
CORRELATION,
NO_CORRELATION,
}
POLAR_CORRELATIVE_RELATIONS = {
POSITIVE_CORRELATION,
NEGATIVE_CORRELATION,
}
#: A set of all correlative relationships
CORRELATIVE_RELATIONS = APOLAR_CORRELATIVE_RELATIONS | POLAR_CORRELATIVE_RELATIONS
#: A set of polar relations
POLAR_RELATIONS = CAUSAL_POLAR_RELATIONS | POLAR_CORRELATIVE_RELATIONS
#: A set of all relationships that are inherently directionless, and are therefore added to the graph twice
TWO_WAY_RELATIONS = CORRELATIVE_RELATIONS | {
ASSOCIATION,
ORTHOLOGOUS,
ANALOGOUS_TO,
EQUIVALENT_TO,
BINDS,
}
#: A list of relationship types that don't require annotations or evidence
UNQUALIFIED_EDGES = {
HAS_REACTANT,
HAS_PRODUCT,
HAS_VARIANT,
TRANSCRIBED_TO,
TRANSLATED_TO,
IS_A,
EQUIVALENT_TO,
PART_OF,
ORTHOLOGOUS,
}
# BEL Keywords
BEL_KEYWORD_SET = "SET"
BEL_KEYWORD_DOCUMENT = "DOCUMENT"
BEL_KEYWORD_DEFINE = "DEFINE"
BEL_KEYWORD_NAMESPACE = "NAMESPACE"
BEL_KEYWORD_ANNOTATION = "ANNOTATION"
BEL_KEYWORD_AS = "AS"
BEL_KEYWORD_URL = "URL"
BEL_KEYWORD_LIST = "LIST"
BEL_KEYWORD_OWL = "OWL"
BEL_KEYWORD_PATTERN = "PATTERN"
BEL_KEYWORD_UNSET = "UNSET"
BEL_KEYWORD_STATEMENT_GROUP = "STATEMENT_GROUP"
BEL_KEYWORD_CITATION = "Citation"
BEL_KEYWORD_EVIDENCE = "Evidence"
BEL_KEYWORD_SUPPORT = "SupportingText"
BEL_KEYWORD_ALL = "ALL"
BEL_KEYWORD_METADATA_NAME = "Name"
BEL_KEYWORD_METADATA_VERSION = "Version"
BEL_KEYWORD_METADATA_DESCRIPTION = "Description"
BEL_KEYWORD_METADATA_AUTHORS = "Authors"
BEL_KEYWORD_METADATA_CONTACT = "ContactInfo"
BEL_KEYWORD_METADATA_LICENSES = "Licenses"
BEL_KEYWORD_METADATA_COPYRIGHT = "Copyright"
BEL_KEYWORD_METADATA_DISCLAIMER = "Disclaimer"
BEL_KEYWORD_METADATA_PROJECT = "Project"
# Internal metadata representation. See BELGraph documentation, since these are shielded from the user by properties.
#: The key for the document metadata dictionary. Can be accessed by :code:`graph.graph[GRAPH_METADATA]`, or by using
#: the property built in to the :class:`pybel.BELGraph`, :func:`pybel.BELGraph.document`
GRAPH_METADATA = "document_metadata"
GRAPH_NAMESPACE_URL = "namespace_url"
GRAPH_NAMESPACE_PATTERN = "namespace_pattern"
GRAPH_ANNOTATION_URL = "annotation_url"
GRAPH_ANNOTATION_MIRIAM = "annotation_miriam"
GRAPH_ANNOTATION_CURIE = "annotation_curie"
GRAPH_ANNOTATION_PATTERN = "annotation_pattern"
GRAPH_ANNOTATION_LIST = "annotation_list"
GRAPH_WARNINGS = "warnings"
GRAPH_PYBEL_VERSION = "pybel_version"
GRAPH_PATH = "path"
#: The key for the document name. Can be accessed by :code:`graph.document[METADATA_NAME]` or by using the property
#: built into the :class:`pybel.BELGraph` class, :func:`pybel.BELGraph.name`
METADATA_NAME = "name"
#: The key for the document version. Can be accessed by :code:`graph.document[METADATA_VERSION]`
METADATA_VERSION = "version"
#: The key for the document description. Can be accessed by :code:`graph.document[METADATA_DESCRIPTION]`
METADATA_DESCRIPTION = "description"
#: The key for the document authors. Can be accessed by :code:`graph.document[METADATA_NAME]`
METADATA_AUTHORS = "authors"
#: The key for the document contact email. Can be accessed by :code:`graph.document[METADATA_CONTACT]`
METADATA_CONTACT = "contact"
#: The key for the document licenses. Can be accessed by :code:`graph.document[METADATA_LICENSES]`
METADATA_LICENSES = "licenses"
#: The key for the document copyright information. Can be accessed by :code:`graph.document[METADATA_COPYRIGHT]`
METADATA_COPYRIGHT = "copyright"
#: The key for the document disclaimer. Can be accessed by :code:`graph.document[METADATA_DISCLAIMER]`
METADATA_DISCLAIMER = "disclaimer"
#: The key for the document project. Can be accessed by :code:`graph.document[METADATA_PROJECT]`
METADATA_PROJECT = "project"
#: Provides a mapping from BEL language keywords to internal PyBEL strings
DOCUMENT_KEYS = {
BEL_KEYWORD_METADATA_AUTHORS: METADATA_AUTHORS,
BEL_KEYWORD_METADATA_CONTACT: METADATA_CONTACT,
BEL_KEYWORD_METADATA_COPYRIGHT: METADATA_COPYRIGHT,
BEL_KEYWORD_METADATA_DESCRIPTION: METADATA_DESCRIPTION,
BEL_KEYWORD_METADATA_DISCLAIMER: METADATA_DISCLAIMER,
BEL_KEYWORD_METADATA_LICENSES: METADATA_LICENSES,
BEL_KEYWORD_METADATA_NAME: METADATA_NAME,
BEL_KEYWORD_METADATA_VERSION: METADATA_VERSION,
BEL_KEYWORD_METADATA_PROJECT: METADATA_PROJECT,
}
#: The keys to use when inserting a graph to the cache
METADATA_INSERT_KEYS = {
METADATA_NAME,
METADATA_VERSION,
METADATA_DESCRIPTION,
METADATA_AUTHORS,
METADATA_CONTACT,
METADATA_LICENSES,
METADATA_COPYRIGHT,
METADATA_DISCLAIMER,
}
#: Provides a mapping from internal PyBEL strings to BEL language keywords. Is the inverse of :data:`DOCUMENT_KEYS`
INVERSE_DOCUMENT_KEYS = {v: k for k, v in DOCUMENT_KEYS.items()}
#: A set representing the required metadata during BEL document parsing
REQUIRED_METADATA = {
METADATA_NAME,
METADATA_VERSION,
METADATA_DESCRIPTION,
METADATA_AUTHORS,
METADATA_CONTACT,
}
# Modifier parser constants
#: The key for the starting position of a fragment range
FRAGMENT_START = "start"
#: The key for the stopping position of a fragment range
FRAGMENT_STOP = "stop"
#: The key signifying that there is neither a start nor stop position defined
FRAGMENT_MISSING = "missing"
#: The key for any additional descriptive data about a fragment
FRAGMENT_DESCRIPTION = "description"
#: The order for serializing gene modification data
GMOD_ORDER = [KIND, IDENTIFIER]
#: The key for the reference nucleotide in a gene substitution.
#: Only used during parsing since this is converted to HGVS.
GSUB_REFERENCE = "reference"
#: The key for the position of a gene substitution.
#: Only used during parsing since this is converted to HGVS
GSUB_POSITION = "position"
#: The key for the effect of a gene substitution.
#: Only used during parsing since this is converted to HGVS
GSUB_VARIANT = "variant"
#: The key for the protein modification code.
PMOD_CODE = "code"
#: The key for the protein modification position.
PMOD_POSITION = "pos"
#: The order for serializing information about a protein modification
PMOD_ORDER = [KIND, IDENTIFIER, PMOD_CODE, PMOD_POSITION]
#: The key for the reference amino acid in a protein substitution.
#: Only used during parsing since this is concerted to HGVS
PSUB_REFERENCE = "reference"
#: The key for the position of a protein substitution. Only used during parsing since this is converted to HGVS.
PSUB_POSITION = "position"
#: The key for the variant of a protein substitution.Only used during parsing since this is converted to HGVS.
PSUB_VARIANT = "variant"
#: The key for the position at which a protein is truncated
TRUNCATION_POSITION = "position"
#: The mapping from BEL namespace codes to PyBEL internal abundance constants
#: ..seealso:: https://wiki.openbel.org/display/BELNA/Assignment+of+Encoding+%28Allowed+Functions%29+for+BEL+Namespaces
belns_encodings = {
"G": {GENE},
"R": {RNA, MIRNA},
"P": {PROTEIN},
"M": {MIRNA},
"A": {ABUNDANCE, RNA, MIRNA, PROTEIN, GENE, COMPLEX},
"B": {PATHOLOGY, BIOPROCESS},
"O": {PATHOLOGY},
"C": {COMPLEX},
}
BELNS_ENCODING_STR = "".join(sorted(belns_encodings))
PYBEL_PUBMED = "29048466"
SET_CITATION_FMT = 'SET Citation = {{"{}", "{}"}}'
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib import exceptions as nexception
from oslo_utils import uuidutils
import six
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.common import exceptions
from neutron.conf import quota
from neutron import manager
from neutron.quota import resource_registry
# Security group Exceptions
class SecurityGroupInvalidPortRange(nexception.InvalidInput):
message = _("For TCP/UDP protocols, port_range_min must be "
"<= port_range_max")
class SecurityGroupInvalidPortValue(nexception.InvalidInput):
message = _("Invalid value for port %(port)s")
class SecurityGroupInvalidIcmpValue(nexception.InvalidInput):
message = _("Invalid value for ICMP %(field)s (%(attr)s) "
"%(value)s. It must be 0 to 255.")
class SecurityGroupEthertypeConflictWithProtocol(nexception.InvalidInput):
message = _("Invalid ethertype %(ethertype)s for protocol "
"%(protocol)s.")
class SecurityGroupMissingIcmpType(nexception.InvalidInput):
message = _("ICMP code (port-range-max) %(value)s is provided"
" but ICMP type (port-range-min) is missing.")
class SecurityGroupInUse(nexception.InUse):
message = _("Security Group %(id)s %(reason)s.")
def __init__(self, **kwargs):
if 'reason' not in kwargs:
kwargs['reason'] = _("in use")
super(SecurityGroupInUse, self).__init__(**kwargs)
class SecurityGroupCannotRemoveDefault(nexception.InUse):
message = _("Insufficient rights for removing default security group.")
class SecurityGroupCannotUpdateDefault(nexception.InUse):
message = _("Updating default security group not allowed.")
class SecurityGroupDefaultAlreadyExists(nexception.InUse):
message = _("Default security group already exists.")
class SecurityGroupRuleInvalidProtocol(nexception.InvalidInput):
message = _("Security group rule protocol %(protocol)s not supported. "
"Only protocol values %(values)s and integer representations "
"[0 to 255] are supported.")
class SecurityGroupRulesNotSingleTenant(nexception.InvalidInput):
message = _("Multiple tenant_ids in bulk security group rule create"
" not allowed")
class SecurityGroupRemoteGroupAndRemoteIpPrefix(nexception.InvalidInput):
message = _("Only remote_ip_prefix or remote_group_id may "
"be provided.")
class SecurityGroupProtocolRequiredWithPorts(nexception.InvalidInput):
message = _("Must also specify protocol if port range is given.")
class SecurityGroupNotSingleGroupRules(nexception.InvalidInput):
message = _("Only allowed to update rules for "
"one security profile at a time")
class SecurityGroupNotFound(nexception.NotFound):
message = _("Security group %(id)s does not exist")
class SecurityGroupRuleNotFound(nexception.NotFound):
message = _("Security group rule %(id)s does not exist")
class DuplicateSecurityGroupRuleInPost(nexception.InUse):
message = _("Duplicate Security Group Rule in POST.")
class SecurityGroupRuleExists(nexception.InUse):
message = _("Security group rule already exists. Rule id is %(rule_id)s.")
class SecurityGroupRuleInUse(nexception.InUse):
message = _("Security Group Rule %(id)s %(reason)s.")
def __init__(self, **kwargs):
if 'reason' not in kwargs:
kwargs['reason'] = _("in use")
super(SecurityGroupRuleInUse, self).__init__(**kwargs)
class SecurityGroupRuleParameterConflict(nexception.InvalidInput):
message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s")
class SecurityGroupConflict(nexception.Conflict):
message = _("Error %(reason)s while attempting the operation.")
class SecurityGroupRuleInvalidEtherType(nexception.InvalidInput):
message = _("Security group rule for ethertype '%(ethertype)s' not "
"supported. Allowed values are %(values)s.")
def convert_protocol(value):
if value is None:
return
try:
val = int(value)
if val >= 0 and val <= 255:
# Set value of protocol number to string due to bug 1381379,
# PostgreSQL fails when it tries to compare integer with string,
# that exists in db.
return str(value)
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
except (ValueError, TypeError):
if value.lower() in sg_supported_protocols:
return value.lower()
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
except AttributeError:
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
def convert_ethertype_to_case_insensitive(value):
if isinstance(value, six.string_types):
for ethertype in sg_supported_ethertypes:
if ethertype.lower() == value.lower():
return ethertype
raise SecurityGroupRuleInvalidEtherType(
ethertype=value, values=sg_supported_ethertypes)
def convert_validate_port_value(port):
if port is None:
return port
try:
val = int(port)
except (ValueError, TypeError):
raise SecurityGroupInvalidPortValue(port=port)
if val >= 0 and val <= 65535:
return val
else:
raise SecurityGroupInvalidPortValue(port=port)
def convert_to_uuid_list_or_none(value_list):
if value_list is None:
return
for sg_id in value_list:
if not uuidutils.is_uuid_like(sg_id):
msg = _("'%s' is not an integer or uuid") % sg_id
raise nexception.InvalidInput(error_message=msg)
return value_list
def convert_ip_prefix_to_cidr(ip_prefix):
if not ip_prefix:
return
try:
cidr = netaddr.IPNetwork(ip_prefix)
return str(cidr)
except (ValueError, TypeError, netaddr.AddrFormatError):
raise exceptions.InvalidCIDR(input=ip_prefix)
def _validate_name_not_default(data, valid_values=None):
if data.lower() == "default":
raise SecurityGroupDefaultAlreadyExists()
validators.validators['type:name_not_default'] = _validate_name_not_default
sg_supported_protocols = ([None] + list(const.IP_PROTOCOL_MAP.keys()))
sg_supported_ethertypes = ['IPv4', 'IPv6']
SECURITYGROUPS = 'security_groups'
SECURITYGROUPRULES = 'security_group_rules'
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
SECURITYGROUPS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': '',
'validate': {'type:name_not_default': attr.NAME_MAX_LEN}},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': attr.DESCRIPTION_MAX_LEN},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
SECURITYGROUPRULES: {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SECURITYGROUPRULES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'security_group_id': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'required_by_policy': True},
'remote_group_id': {'allow_post': True, 'allow_put': False,
'default': None, 'is_visible': True},
'direction': {'allow_post': True, 'allow_put': False,
'is_visible': True,
'validate': {'type:values': ['ingress', 'egress']}},
'protocol': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'convert_to': convert_protocol},
'port_range_min': {'allow_post': True, 'allow_put': False,
'convert_to': convert_validate_port_value,
'default': None, 'is_visible': True},
'port_range_max': {'allow_post': True, 'allow_put': False,
'convert_to': convert_validate_port_value,
'default': None, 'is_visible': True},
'ethertype': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': 'IPv4',
'convert_to': convert_ethertype_to_case_insensitive,
'validate': {'type:values': sg_supported_ethertypes}},
'remote_ip_prefix': {'allow_post': True, 'allow_put': False,
'default': None, 'is_visible': True,
'convert_to': convert_ip_prefix_to_cidr},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
}
}
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {SECURITYGROUPS: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'convert_to': convert_to_uuid_list_or_none,
'default': const.ATTR_NOT_SPECIFIED}}}
# Register the configuration options
quota.register_quota_opts(quota.security_group_quota_opts)
class Securitygroup(extensions.ExtensionDescriptor):
"""Security group extension."""
@classmethod
def get_name(cls):
return "security-group"
@classmethod
def get_alias(cls):
return "security-group"
@classmethod
def get_description(cls):
return "The security groups extension."
@classmethod
def get_updated(cls):
return "2012-10-05T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
attr.PLURALS.update(dict(my_plurals))
exts = []
plugin = manager.NeutronManager.get_plugin()
for resource_name in ['security_group', 'security_group_rule']:
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
resource_registry.register_resource_by_name(resource_name)
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=True,
allow_pagination=True,
allow_sorting=True)
ex = extensions.ResourceExtension(collection_name,
controller,
attr_map=params)
exts.append(ex)
return exts
def update_attributes_map(self, attributes):
super(Securitygroup, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) +
list(RESOURCE_ATTRIBUTE_MAP.items()))
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class SecurityGroupPluginBase(object):
@abc.abstractmethod
def create_security_group(self, context, security_group):
pass
@abc.abstractmethod
def update_security_group(self, context, id, security_group):
pass
@abc.abstractmethod
def delete_security_group(self, context, id):
pass
@abc.abstractmethod
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
@abc.abstractmethod
def get_security_group(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_security_group_rule(self, context, security_group_rule):
pass
@abc.abstractmethod
def delete_security_group_rule(self, context, id):
pass
@abc.abstractmethod
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
@abc.abstractmethod
def get_security_group_rule(self, context, id, fields=None):
pass
|
|
""" Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1252',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
|
|
import os
import sys
import base64
import asyncio
import logging
import argparse
import regex
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.output as s_output
import synapse.lib.dyndeps as s_dyndeps
logger = logging.getLogger(__name__)
wflownamere = regex.compile(r'^([\w-]+)\.yaml$')
def chopSemVer(vers):
return tuple([int(x) for x in vers.split('.')])
def getStormStr(fn):
if not os.path.isfile(fn):
raise s_exc.NoSuchFile(mesg='Storm file {} not found'.format(fn), path=fn)
with open(fn, 'rb') as f:
return f.read().decode()
def loadOpticFiles(pkgdef, path):
pkgfiles = pkgdef['optic']['files']
abspath = s_common.genpath(path)
for root, dirs, files, in os.walk(path):
for name in files:
if name.startswith('.'): # pragma: no cover
continue
fullname = s_common.genpath(root, name)
if not os.path.isfile(fullname): # pragma: no cover
continue
pkgfname = fullname[len(abspath) + 1:]
with open(fullname, 'rb') as fd:
pkgfiles[pkgfname] = {
'file': base64.b64encode(fd.read()).decode(),
}
def loadOpticWorkflows(pkgdef, path):
wdefs = pkgdef['optic']['workflows']
for root, dirs, files in os.walk(path):
for name in files:
match = wflownamere.match(name)
if match is None:
logger.warning('Skipping workflow "%s" that does not match pattern "%s"' % (name, wflownamere.pattern))
continue
wname = match.groups()[0]
fullname = s_common.genpath(root, name)
if not os.path.isfile(fullname): # pragma: no cover
continue
wdefs[wname] = s_common.yamlload(fullname)
def tryLoadPkgProto(fp, opticdir=None, readonly=False):
'''
Try to get a Storm Package prototype from disk with or without inline documentation.
Args:
fp (str): Path to the package .yaml file on disk.
opticdir (str): Path to optional Optic module code to add to the Storm Package.
readonly (bool): If set, open files in read-only mode. If files are missing, that will raise a NoSuchFile
exception.
Returns:
dict: A Storm package definition.
'''
try:
return loadPkgProto(fp, opticdir=opticdir, readonly=readonly)
except s_exc.NoSuchFile:
return loadPkgProto(fp, opticdir=opticdir, no_docs=True, readonly=readonly)
def loadPkgProto(path, opticdir=None, no_docs=False, readonly=False):
'''
Get a Storm Package definition from disk.
Args:
fp (str): Path to the package .yaml file on disk.
opticdir (str): Path to optional Optic module code to add to the Storm Package.
no_docs (bool): If true, omit inline documentation content if it is not present on disk.
readonly (bool): If set, open files in read-only mode. If files are missing, that will raise a NoSuchFile
exception.
Returns:
dict: A Storm package definition.
'''
full = s_common.genpath(path)
pkgdef = s_common.yamlload(full)
if isinstance(pkgdef['version'], str):
pkgdef['version'] = chopSemVer(pkgdef['version'])
protodir = os.path.dirname(full)
pkgname = pkgdef.get('name')
genopts = pkgdef.pop('genopts', {})
logodef = pkgdef.get('logo')
if logodef is not None:
path = logodef.pop('path', None)
if path is not None:
with s_common.reqfile(protodir, path) as fd:
logodef['file'] = base64.b64encode(fd.read()).decode()
if logodef.get('mime') is None:
mesg = 'Mime type must be specified for logo file.'
raise s_exc.BadPkgDef(mesg=mesg)
if logodef.get('file') is None:
mesg = 'Logo def must contain path or file.'
raise s_exc.BadPkgDef(mesg=mesg)
for docdef in pkgdef.get('docs', ()):
if docdef.get('title') is None:
mesg = 'Each entry in docs must have a title.'
raise s_exc.BadPkgDef(mesg=mesg)
if no_docs:
docdef['content'] = ''
continue
path = docdef.pop('path', None)
if path is not None:
with s_common.reqfile(protodir, path) as fd:
docdef['content'] = fd.read().decode()
if docdef.get('content') is None:
mesg = 'Docs entry has no path or content.'
raise s_exc.BadPkgDef(mesg=mesg)
for mod in pkgdef.get('modules', ()):
name = mod.get('name')
basename = name
if genopts.get('dotstorm', False):
basename = f'{basename}.storm'
mod_path = s_common.genpath(protodir, 'storm', 'modules', basename)
if readonly:
mod['storm'] = getStormStr(mod_path)
else:
with s_common.genfile(mod_path) as fd:
mod['storm'] = fd.read().decode()
for extmod in pkgdef.get('external_modules', ()):
fpth = extmod.get('file_path')
if fpth is not None:
extmod['storm'] = getStormStr(fpth)
else:
path = extmod.get('package_path')
extpkg = s_dyndeps.tryDynMod(extmod.get('package'))
extmod['storm'] = extpkg.getAssetStr(path)
extname = extmod.get('name')
extmod['name'] = f'{pkgname}.{extname}'
pkgdef.setdefault('modules', [])
pkgdef['modules'].append(extmod)
pkgdef.pop('external_modules', None)
for cmd in pkgdef.get('commands', ()):
name = cmd.get('name')
basename = name
if genopts.get('dotstorm'):
basename = f'{basename}.storm'
cmd_path = s_common.genpath(protodir, 'storm', 'commands', basename)
if readonly:
cmd['storm'] = getStormStr(cmd_path)
else:
with s_common.genfile(cmd_path) as fd:
cmd['storm'] = fd.read().decode()
wflowdir = s_common.genpath(protodir, 'workflows')
if os.path.isdir(wflowdir):
pkgdef.setdefault('optic', {})
pkgdef['optic'].setdefault('workflows', {})
loadOpticWorkflows(pkgdef, wflowdir)
if opticdir is None:
opticdir = s_common.genpath(protodir, 'optic')
if os.path.isdir(opticdir):
pkgdef.setdefault('optic', {})
pkgdef['optic'].setdefault('files', {})
loadOpticFiles(pkgdef, opticdir)
return pkgdef
prog = 'synapse.tools.genpkg'
desc = 'A tool for generating/pushing storm packages from YAML prototypes.'
async def main(argv, outp=s_output.stdout):
pars = argparse.ArgumentParser()
pars.add_argument('--push', metavar='<url>', help='A telepath URL of a Cortex or PkgRepo.')
pars.add_argument('--save', metavar='<path>', help='Save the completed package JSON to a file.')
pars.add_argument('--optic', metavar='<path>', help='Load Optic module files from a directory.')
pars.add_argument('--no-build', action='store_true',
help='Treat pkgfile argument as an already-built package')
pars.add_argument('--no-docs', default=False, action='store_true',
help='Do not require docs to be present and replace any doc content with empty strings.')
pars.add_argument('pkgfile', metavar='<pkgfile>',
help='Path to a storm package prototype .yaml file, or a completed package .json/.yaml file.')
opts = pars.parse_args(argv)
if opts.no_build:
pkgdef = s_common.yamlload(opts.pkgfile)
if not pkgdef:
outp.printf(f'Unable to load pkgdef from [{opts.pkgfile}]')
return 1
if opts.save:
outp.printf(f'File {opts.pkgfile} is treated as already built (--no-build); incompatible with --save.')
return 1
else:
pkgdef = loadPkgProto(opts.pkgfile, opticdir=opts.optic, no_docs=opts.no_docs)
if not opts.save and not opts.push:
outp.printf('Neither --push nor --save provided. Nothing to do.')
return 1
if opts.save:
s_common.jssave(pkgdef, opts.save)
if opts.push:
path = s_common.genpath('~/.syn/telepath.yaml')
fini = await s_telepath.loadTeleEnv(path)
async with await s_telepath.openurl(opts.push) as core:
await core.addStormPkg(pkgdef)
if fini is not None: # pragma: no cover
await fini()
return 0
if __name__ == '__main__': # pragma: no cover
sys.exit(asyncio.run(main(sys.argv[1:])))
|
|
from __future__ import unicode_literals
import re
import json
import datetime
from moto.core import BaseBackend, BaseModel
from moto.ec2 import ec2_backends
from .utils import make_arn_for_certificate
import cryptography.x509
import cryptography.hazmat.primitives.asymmetric.rsa
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.backends import default_backend
DEFAULT_ACCOUNT_ID = 123456789012
GOOGLE_ROOT_CA = b"""-----BEGIN CERTIFICATE-----
MIIEKDCCAxCgAwIBAgIQAQAhJYiw+lmnd+8Fe2Yn3zANBgkqhkiG9w0BAQsFADBC
MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMS
R2VvVHJ1c3QgR2xvYmFsIENBMB4XDTE3MDUyMjExMzIzN1oXDTE4MTIzMTIzNTk1
OVowSTELMAkGA1UEBhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMT
HEdvb2dsZSBJbnRlcm5ldCBBdXRob3JpdHkgRzIwggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQCcKgR3XNhQkToGo4Lg2FBIvIk/8RlwGohGfuCPxfGJziHu
Wv5hDbcyRImgdAtTT1WkzoJile7rWV/G4QWAEsRelD+8W0g49FP3JOb7kekVxM/0
Uw30SvyfVN59vqBrb4fA0FAfKDADQNoIc1Fsf/86PKc3Bo69SxEE630k3ub5/DFx
+5TVYPMuSq9C0svqxGoassxT3RVLix/IGWEfzZ2oPmMrhDVpZYTIGcVGIvhTlb7j
gEoQxirsupcgEcc5mRAEoPBhepUljE5SdeK27QjKFPzOImqzTs9GA5eXA37Asd57
r0Uzz7o+cbfe9CUlwg01iZ2d+w4ReYkeN8WvjnJpAgMBAAGjggERMIIBDTAfBgNV
HSMEGDAWgBTAephojYn7qwVkDBF9qn1luMrMTjAdBgNVHQ4EFgQUSt0GFhu89mi1
dvWBtrtiGrpagS8wDgYDVR0PAQH/BAQDAgEGMC4GCCsGAQUFBwEBBCIwIDAeBggr
BgEFBQcwAYYSaHR0cDovL2cuc3ltY2QuY29tMBIGA1UdEwEB/wQIMAYBAf8CAQAw
NQYDVR0fBC4wLDAqoCigJoYkaHR0cDovL2cuc3ltY2IuY29tL2NybHMvZ3RnbG9i
YWwuY3JsMCEGA1UdIAQaMBgwDAYKKwYBBAHWeQIFATAIBgZngQwBAgIwHQYDVR0l
BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4IBAQDKSeWs
12Rkd1u+cfrP9B4jx5ppY1Rf60zWGSgjZGaOHMeHgGRfBIsmr5jfCnC8vBk97nsz
qX+99AXUcLsFJnnqmseYuQcZZTTMPOk/xQH6bwx+23pwXEz+LQDwyr4tjrSogPsB
E4jLnD/lu3fKOmc2887VJwJyQ6C9bgLxRwVxPgFZ6RGeGvOED4Cmong1L7bHon8X
fOGLVq7uZ4hRJzBgpWJSwzfVO+qFKgE4h6LPcK2kesnE58rF2rwjMvL+GMJ74N87
L9TQEOaWTPtEtyFkDbkAlDASJodYmDkFOA/MgkgMCkdm7r+0X8T/cKjhf4t5K7hl
MqO5tzHpCvX2HzLc
-----END CERTIFICATE-----"""
# Added google root CA as AWS returns chain you gave it + root CA (provided or not)
# so for now a cheap response is just give any old root CA
def datetime_to_epoch(date):
# As only Py3 has datetime.timestamp()
return int((date - datetime.datetime(1970, 1, 1)).total_seconds())
class AWSError(Exception):
TYPE = None
STATUS = 400
def __init__(self, message):
self.message = message
def response(self):
resp = {'__type': self.TYPE, 'message': self.message}
return json.dumps(resp), dict(status=self.STATUS)
class AWSValidationException(AWSError):
TYPE = 'ValidationException'
class AWSResourceNotFoundException(AWSError):
TYPE = 'ResourceNotFoundException'
class CertBundle(BaseModel):
def __init__(self, certificate, private_key, chain=None, region='us-east-1', arn=None, cert_type='IMPORTED', cert_status='ISSUED'):
self.created_at = datetime.datetime.now()
self.cert = certificate
self._cert = None
self.common_name = None
self.key = private_key
self._key = None
self.chain = chain
self.tags = {}
self._chain = None
self.type = cert_type # Should really be an enum
self.status = cert_status # Should really be an enum
# AWS always returns your chain + root CA
if self.chain is None:
self.chain = GOOGLE_ROOT_CA
else:
self.chain += b'\n' + GOOGLE_ROOT_CA
# Takes care of PEM checking
self.validate_pk()
self.validate_certificate()
if chain is not None:
self.validate_chain()
# TODO check cert is valid, or if self-signed then a chain is provided, otherwise
# raise AWSValidationException('Provided certificate is not a valid self signed. Please provide either a valid self-signed certificate or certificate chain.')
# Used for when one wants to overwrite an arn
if arn is None:
self.arn = make_arn_for_certificate(DEFAULT_ACCOUNT_ID, region)
else:
self.arn = arn
@classmethod
def generate_cert(cls, domain_name, sans=None):
if sans is None:
sans = set()
else:
sans = set(sans)
sans.add(domain_name)
sans = [cryptography.x509.DNSName(item) for item in sans]
key = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
subject = cryptography.x509.Name([
cryptography.x509.NameAttribute(cryptography.x509.NameOID.COUNTRY_NAME, u"US"),
cryptography.x509.NameAttribute(cryptography.x509.NameOID.STATE_OR_PROVINCE_NAME, u"CA"),
cryptography.x509.NameAttribute(cryptography.x509.NameOID.LOCALITY_NAME, u"San Francisco"),
cryptography.x509.NameAttribute(cryptography.x509.NameOID.ORGANIZATION_NAME, u"My Company"),
cryptography.x509.NameAttribute(cryptography.x509.NameOID.COMMON_NAME, domain_name),
])
issuer = cryptography.x509.Name([ # C = US, O = Amazon, OU = Server CA 1B, CN = Amazon
cryptography.x509.NameAttribute(cryptography.x509.NameOID.COUNTRY_NAME, u"US"),
cryptography.x509.NameAttribute(cryptography.x509.NameOID.ORGANIZATION_NAME, u"Amazon"),
cryptography.x509.NameAttribute(cryptography.x509.NameOID.ORGANIZATIONAL_UNIT_NAME, u"Server CA 1B"),
cryptography.x509.NameAttribute(cryptography.x509.NameOID.COMMON_NAME, u"Amazon"),
])
cert = cryptography.x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
key.public_key()
).serial_number(
cryptography.x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow()
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=365)
).add_extension(
cryptography.x509.SubjectAlternativeName(sans),
critical=False,
).sign(key, hashes.SHA512(), default_backend())
cert_armored = cert.public_bytes(serialization.Encoding.PEM)
private_key = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
return cls(cert_armored, private_key, cert_type='AMAZON_ISSUED', cert_status='PENDING_VALIDATION')
def validate_pk(self):
try:
self._key = serialization.load_pem_private_key(self.key, password=None, backend=default_backend())
if self._key.key_size > 2048:
AWSValidationException('The private key length is not supported. Only 1024-bit and 2048-bit are allowed.')
except Exception as err:
if isinstance(err, AWSValidationException):
raise
raise AWSValidationException('The private key is not PEM-encoded or is not valid.')
def validate_certificate(self):
try:
self._cert = cryptography.x509.load_pem_x509_certificate(self.cert, default_backend())
now = datetime.datetime.utcnow()
if self._cert.not_valid_after < now:
raise AWSValidationException('The certificate has expired, is not valid.')
if self._cert.not_valid_before > now:
raise AWSValidationException('The certificate is not in effect yet, is not valid.')
# Extracting some common fields for ease of use
# Have to search through cert.subject for OIDs
self.common_name = self._cert.subject.get_attributes_for_oid(cryptography.x509.OID_COMMON_NAME)[0].value
except Exception as err:
if isinstance(err, AWSValidationException):
raise
raise AWSValidationException('The certificate is not PEM-encoded or is not valid.')
def validate_chain(self):
try:
self._chain = []
for cert_armored in self.chain.split(b'-\n-'):
# Would leave encoded but Py2 does not have raw binary strings
cert_armored = cert_armored.decode()
# Fix missing -'s on split
cert_armored = re.sub(r'^----B', '-----B', cert_armored)
cert_armored = re.sub(r'E----$', 'E-----', cert_armored)
cert = cryptography.x509.load_pem_x509_certificate(cert_armored.encode(), default_backend())
self._chain.append(cert)
now = datetime.datetime.now()
if self._cert.not_valid_after < now:
raise AWSValidationException('The certificate chain has expired, is not valid.')
if self._cert.not_valid_before > now:
raise AWSValidationException('The certificate chain is not in effect yet, is not valid.')
except Exception as err:
if isinstance(err, AWSValidationException):
raise
raise AWSValidationException('The certificate is not PEM-encoded or is not valid.')
def check(self):
# Basically, if the certificate is pending, and then checked again after 1 min
# It will appear as if its been validated
if self.type == 'AMAZON_ISSUED' and self.status == 'PENDING_VALIDATION' and \
(datetime.datetime.now() - self.created_at).total_seconds() > 60: # 1min
self.status = 'ISSUED'
def describe(self):
# 'RenewalSummary': {}, # Only when cert is amazon issued
if self._key.key_size == 1024:
key_algo = 'RSA_1024'
elif self._key.key_size == 2048:
key_algo = 'RSA_2048'
else:
key_algo = 'EC_prime256v1'
# Look for SANs
san_obj = self._cert.extensions.get_extension_for_oid(cryptography.x509.OID_SUBJECT_ALTERNATIVE_NAME)
sans = []
if san_obj is not None:
sans = [item.value for item in san_obj.value]
result = {
'Certificate': {
'CertificateArn': self.arn,
'DomainName': self.common_name,
'InUseBy': [],
'Issuer': self._cert.issuer.get_attributes_for_oid(cryptography.x509.OID_COMMON_NAME)[0].value,
'KeyAlgorithm': key_algo,
'NotAfter': datetime_to_epoch(self._cert.not_valid_after),
'NotBefore': datetime_to_epoch(self._cert.not_valid_before),
'Serial': self._cert.serial,
'SignatureAlgorithm': self._cert.signature_algorithm_oid._name.upper().replace('ENCRYPTION', ''),
'Status': self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED.
'Subject': 'CN={0}'.format(self.common_name),
'SubjectAlternativeNames': sans,
'Type': self.type # One of IMPORTED, AMAZON_ISSUED
}
}
if self.type == 'IMPORTED':
result['Certificate']['ImportedAt'] = datetime_to_epoch(self.created_at)
else:
result['Certificate']['CreatedAt'] = datetime_to_epoch(self.created_at)
result['Certificate']['IssuedAt'] = datetime_to_epoch(self.created_at)
return result
def __str__(self):
return self.arn
def __repr__(self):
return '<Certificate>'
class AWSCertificateManagerBackend(BaseBackend):
def __init__(self, region):
super(AWSCertificateManagerBackend, self).__init__()
self.region = region
self._certificates = {}
self._idempotency_tokens = {}
def reset(self):
region = self.region
self.__dict__ = {}
self.__init__(region)
@staticmethod
def _arn_not_found(arn):
msg = 'Certificate with arn {0} not found in account {1}'.format(arn, DEFAULT_ACCOUNT_ID)
return AWSResourceNotFoundException(msg)
def _get_arn_from_idempotency_token(self, token):
"""
If token doesnt exist, return None, later it will be
set with an expiry and arn.
If token expiry has passed, delete entry and return None
Else return ARN
:param token: String token
:return: None or ARN
"""
now = datetime.datetime.now()
if token in self._idempotency_tokens:
if self._idempotency_tokens[token]['expires'] < now:
# Token has expired, new request
del self._idempotency_tokens[token]
return None
else:
return self._idempotency_tokens[token]['arn']
return None
def _set_idempotency_token_arn(self, token, arn):
self._idempotency_tokens[token] = {'arn': arn, 'expires': datetime.datetime.now() + datetime.timedelta(hours=1)}
def import_cert(self, certificate, private_key, chain=None, arn=None):
if arn is not None:
if arn not in self._certificates:
raise self._arn_not_found(arn)
else:
# Will reuse provided ARN
bundle = CertBundle(certificate, private_key, chain=chain, region=region, arn=arn)
else:
# Will generate a random ARN
bundle = CertBundle(certificate, private_key, chain=chain, region=region)
self._certificates[bundle.arn] = bundle
return bundle.arn
def get_certificates_list(self):
"""
Get list of certificates
:return: List of certificates
:rtype: list of CertBundle
"""
for arn in self._certificates.keys():
yield self.get_certificate(arn)
def get_certificate(self, arn):
if arn not in self._certificates:
raise self._arn_not_found(arn)
cert_bundle = self._certificates[arn]
cert_bundle.check()
return cert_bundle
def delete_certificate(self, arn):
if arn not in self._certificates:
raise self._arn_not_found(arn)
del self._certificates[arn]
def request_certificate(self, domain_name, domain_validation_options, idempotency_token, subject_alt_names):
if idempotency_token is not None:
arn = self._get_arn_from_idempotency_token(idempotency_token)
if arn is not None:
return arn
cert = CertBundle.generate_cert(domain_name, subject_alt_names)
if idempotency_token is not None:
self._set_idempotency_token_arn(idempotency_token, cert.arn)
self._certificates[cert.arn] = cert
return cert.arn
def add_tags_to_certificate(self, arn, tags):
# get_cert does arn check
cert_bundle = self.get_certificate(arn)
for tag in tags:
key = tag['Key']
value = tag.get('Value', None)
cert_bundle.tags[key] = value
def remove_tags_from_certificate(self, arn, tags):
# get_cert does arn check
cert_bundle = self.get_certificate(arn)
for tag in tags:
key = tag['Key']
value = tag.get('Value', None)
try:
# If value isnt provided, just delete key
if value is None:
del cert_bundle.tags[key]
# If value is provided, only delete if it matches what already exists
elif cert_bundle.tags[key] == value:
del cert_bundle.tags[key]
except KeyError:
pass
acm_backends = {}
for region, ec2_backend in ec2_backends.items():
acm_backends[region] = AWSCertificateManagerBackend(region)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from singa import singa_wrap as singa
from singa import device
from singa import tensor
from singa import opt
import numpy as np
import time
import argparse
from PIL import Image
np_dtype = {"float16": np.float16, "float32": np.float32}
singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
# Data augmentation
def augmentation(x, batch_size):
xpad = np.pad(x, [[0, 0], [0, 0], [4, 4], [4, 4]], 'symmetric')
for data_num in range(0, batch_size):
offset = np.random.randint(8, size=2)
x[data_num, :, :, :] = xpad[data_num, :,
offset[0]:offset[0] + x.shape[2],
offset[1]:offset[1] + x.shape[2]]
if_flip = np.random.randint(2)
if (if_flip):
x[data_num, :, :, :] = x[data_num, :, :, ::-1]
return x
# Calculate accuracy
def accuracy(pred, target):
# y is network output to be compared with ground truth (int)
y = np.argmax(pred, axis=1)
a = y == target
correct = np.array(a, "int").sum()
return correct
# Data partition according to the rank
def partition(global_rank, world_size, train_x, train_y, val_x, val_y):
# Partition training data
data_per_rank = train_x.shape[0] // world_size
idx_start = global_rank * data_per_rank
idx_end = (global_rank + 1) * data_per_rank
train_x = train_x[idx_start:idx_end]
train_y = train_y[idx_start:idx_end]
# Partition evaluation data
data_per_rank = val_x.shape[0] // world_size
idx_start = global_rank * data_per_rank
idx_end = (global_rank + 1) * data_per_rank
val_x = val_x[idx_start:idx_end]
val_y = val_y[idx_start:idx_end]
return train_x, train_y, val_x, val_y
# Function to all reduce NUMPY accuracy and loss from multiple devices
def reduce_variable(variable, dist_opt, reducer):
reducer.copy_from_numpy(variable)
dist_opt.all_reduce(reducer.data)
dist_opt.wait()
output = tensor.to_numpy(reducer)
return output
def resize_dataset(x, image_size):
num_data = x.shape[0]
dim = x.shape[1]
X = np.zeros(shape=(num_data, dim, image_size, image_size),
dtype=np.float32)
for n in range(0, num_data):
for d in range(0, dim):
X[n, d, :, :] = np.array(Image.fromarray(x[n, d, :, :]).resize(
(image_size, image_size), Image.BILINEAR),
dtype=np.float32)
return X
def run(global_rank,
world_size,
local_rank,
max_epoch,
batch_size,
model,
data,
sgd,
graph,
verbosity,
dist_option='plain',
spars=None,
precision='float32'):
dev = device.create_cuda_gpu_on(local_rank)
dev.SetRandSeed(0)
np.random.seed(0)
if data == 'cifar10':
from data import cifar10
train_x, train_y, val_x, val_y = cifar10.load()
elif data == 'cifar100':
from data import cifar100
train_x, train_y, val_x, val_y = cifar100.load()
elif data == 'mnist':
from data import mnist
train_x, train_y, val_x, val_y = mnist.load()
num_channels = train_x.shape[1]
image_size = train_x.shape[2]
data_size = np.prod(train_x.shape[1:train_x.ndim]).item()
num_classes = (np.max(train_y) + 1).item()
if model == 'resnet':
from model import resnet
model = resnet.resnet50(num_channels=num_channels,
num_classes=num_classes)
elif model == 'xceptionnet':
from model import xceptionnet
model = xceptionnet.create_model(num_channels=num_channels,
num_classes=num_classes)
elif model == 'cnn':
from model import cnn
model = cnn.create_model(num_channels=num_channels,
num_classes=num_classes)
elif model == 'alexnet':
from model import alexnet
model = alexnet.create_model(num_channels=num_channels,
num_classes=num_classes)
elif model == 'mlp':
import os, sys, inspect
current = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent = os.path.dirname(current)
sys.path.insert(0, parent)
from mlp import model
model = model.create_model(data_size=data_size,
num_classes=num_classes)
# For distributed training, sequential has better performance
if hasattr(sgd, "communicator"):
DIST = True
sequential = True
else:
DIST = False
sequential = False
if DIST:
train_x, train_y, val_x, val_y = partition(global_rank, world_size,
train_x, train_y, val_x,
val_y)
'''
# check dataset shape correctness
if global_rank == 0:
print("Check the shape of dataset:")
print(train_x.shape)
print(train_y.shape)
'''
if model.dimension == 4:
tx = tensor.Tensor(
(batch_size, num_channels, model.input_size, model.input_size), dev,
singa_dtype[precision])
elif model.dimension == 2:
tx = tensor.Tensor((batch_size, data_size), dev, singa_dtype[precision])
np.reshape(train_x, (train_x.shape[0], -1))
np.reshape(val_x, (val_x.shape[0], -1))
ty = tensor.Tensor((batch_size,), dev, tensor.int32)
num_train_batch = train_x.shape[0] // batch_size
num_val_batch = val_x.shape[0] // batch_size
idx = np.arange(train_x.shape[0], dtype=np.int32)
# Attach model to graph
model.set_optimizer(sgd)
model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
dev.SetVerbosity(verbosity)
# Training and evaluation loop
for epoch in range(max_epoch):
start_time = time.time()
np.random.shuffle(idx)
if global_rank == 0:
print('Starting Epoch %d:' % (epoch))
# Training phase
train_correct = np.zeros(shape=[1], dtype=np.float32)
test_correct = np.zeros(shape=[1], dtype=np.float32)
train_loss = np.zeros(shape=[1], dtype=np.float32)
model.train()
for b in range(num_train_batch):
# Generate the patch data in this iteration
x = train_x[idx[b * batch_size:(b + 1) * batch_size]]
if model.dimension == 4:
x = augmentation(x, batch_size)
if (image_size != model.input_size):
x = resize_dataset(x, model.input_size)
x = x.astype(np_dtype[precision])
y = train_y[idx[b * batch_size:(b + 1) * batch_size]]
# Copy the patch data into input tensors
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
# Train the model
out, loss = model(tx, ty, dist_option, spars)
train_correct += accuracy(tensor.to_numpy(out), y)
train_loss += tensor.to_numpy(loss)[0]
if DIST:
# Reduce the evaluation accuracy and loss from multiple devices
reducer = tensor.Tensor((1,), dev, tensor.float32)
train_correct = reduce_variable(train_correct, sgd, reducer)
train_loss = reduce_variable(train_loss, sgd, reducer)
if global_rank == 0:
print('Training loss = %f, training accuracy = %f' %
(train_loss, train_correct /
(num_train_batch * batch_size * world_size)),
flush=True)
# Evaluation phase
model.eval()
for b in range(num_val_batch):
x = val_x[b * batch_size:(b + 1) * batch_size]
if model.dimension == 4:
if (image_size != model.input_size):
x = resize_dataset(x, model.input_size)
x = x.astype(np_dtype[precision])
y = val_y[b * batch_size:(b + 1) * batch_size]
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
out_test = model(tx)
test_correct += accuracy(tensor.to_numpy(out_test), y)
if DIST:
# Reduce the evaulation accuracy from multiple devices
test_correct = reduce_variable(test_correct, sgd, reducer)
# Output the evaluation accuracy
if global_rank == 0:
print('Evaluation accuracy = %f, Elapsed Time = %fs' %
(test_correct / (num_val_batch * batch_size * world_size),
time.time() - start_time),
flush=True)
dev.PrintTimeProfiling()
if __name__ == '__main__':
# Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
parser = argparse.ArgumentParser(
description='Training using the autograd and graph.')
parser.add_argument(
'model',
choices=['cnn', 'resnet', 'xceptionnet', 'mlp', 'alexnet'],
default='cnn')
parser.add_argument('data',
choices=['mnist', 'cifar10', 'cifar100'],
default='mnist')
parser.add_argument('-p',
choices=['float32', 'float16'],
default='float32',
dest='precision')
parser.add_argument('-m',
'--max-epoch',
default=10,
type=int,
help='maximum epochs',
dest='max_epoch')
parser.add_argument('-b',
'--batch-size',
default=64,
type=int,
help='batch size',
dest='batch_size')
parser.add_argument('-l',
'--learning-rate',
default=0.005,
type=float,
help='initial learning rate',
dest='lr')
# Determine which gpu to use
parser.add_argument('-i',
'--device-id',
default=0,
type=int,
help='which GPU to use',
dest='device_id')
parser.add_argument('-g',
'--disable-graph',
default='True',
action='store_false',
help='disable graph',
dest='graph')
parser.add_argument('-v',
'--log-verbosity',
default=0,
type=int,
help='logging verbosity',
dest='verbosity')
args = parser.parse_args()
sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5, dtype=singa_dtype[args.precision])
run(0,
1,
args.device_id,
args.max_epoch,
args.batch_size,
args.model,
args.data,
sgd,
args.graph,
args.verbosity,
precision=args.precision)
|
|
"""The tests for the emulated Hue component."""
import asyncio
import json
from ipaddress import ip_address
from unittest.mock import patch
from aiohttp.hdrs import CONTENT_TYPE
import pytest
from tests.common import get_test_instance_port
from homeassistant import const, setup
from homeassistant.components import (
fan, http, light, script, emulated_hue, media_player, cover, climate)
from homeassistant.components.emulated_hue import Config
from homeassistant.components.emulated_hue.hue_api import (
HUE_API_STATE_ON, HUE_API_STATE_BRI, HUE_API_STATE_HUE, HUE_API_STATE_SAT,
HueUsernameView, HueOneLightStateView,
HueAllLightsStateView, HueOneLightChangeView, HueAllGroupsStateView)
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.util.dt as dt_util
from datetime import timedelta
from tests.common import async_fire_time_changed
HTTP_SERVER_PORT = get_test_instance_port()
BRIDGE_SERVER_PORT = get_test_instance_port()
BRIDGE_URL_BASE = 'http://127.0.0.1:{}'.format(BRIDGE_SERVER_PORT) + '{}'
JSON_HEADERS = {CONTENT_TYPE: const.CONTENT_TYPE_JSON}
@pytest.fixture
def hass_hue(loop, hass):
"""Set up a Home Assistant instance for these tests."""
# We need to do this to get access to homeassistant/turn_(on,off)
loop.run_until_complete(setup.async_setup_component(
hass, 'homeassistant', {}))
loop.run_until_complete(setup.async_setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: HTTP_SERVER_PORT}}))
with patch('homeassistant.components'
'.emulated_hue.UPNPResponderThread'):
loop.run_until_complete(
setup.async_setup_component(hass, emulated_hue.DOMAIN, {
emulated_hue.DOMAIN: {
emulated_hue.CONF_LISTEN_PORT: BRIDGE_SERVER_PORT,
emulated_hue.CONF_EXPOSE_BY_DEFAULT: True
}
}))
loop.run_until_complete(
setup.async_setup_component(hass, light.DOMAIN, {
'light': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, script.DOMAIN, {
'script': {
'set_kitchen_light': {
'sequence': [
{
'service_template':
"light.turn_{{ requested_state }}",
'data_template': {
'entity_id': 'light.kitchen_lights',
'brightness': "{{ requested_level }}"
}
}
]
}
}
}))
loop.run_until_complete(
setup.async_setup_component(hass, climate.DOMAIN, {
'climate': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, media_player.DOMAIN, {
'media_player': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, fan.DOMAIN, {
'fan': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, cover.DOMAIN, {
'cover': [
{
'platform': 'demo',
}
]
}))
# Kitchen light is explicitly excluded from being exposed
kitchen_light_entity = hass.states.get('light.kitchen_lights')
attrs = dict(kitchen_light_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = False
hass.states.async_set(
kitchen_light_entity.entity_id, kitchen_light_entity.state,
attributes=attrs)
# Ceiling Fan is explicitly excluded from being exposed
ceiling_fan_entity = hass.states.get('fan.ceiling_fan')
attrs = dict(ceiling_fan_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = True
hass.states.async_set(
ceiling_fan_entity.entity_id, ceiling_fan_entity.state,
attributes=attrs)
# Expose the script
script_entity = hass.states.get('script.set_kitchen_light')
attrs = dict(script_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = True
hass.states.async_set(
script_entity.entity_id, script_entity.state, attributes=attrs
)
# Expose cover
cover_entity = hass.states.get('cover.living_room_window')
attrs = dict(cover_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = False
hass.states.async_set(
cover_entity.entity_id, cover_entity.state, attributes=attrs
)
# Expose Hvac
hvac_entity = hass.states.get('climate.hvac')
attrs = dict(hvac_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = False
hass.states.async_set(
hvac_entity.entity_id, hvac_entity.state, attributes=attrs
)
# Expose HeatPump
hp_entity = hass.states.get('climate.heatpump')
attrs = dict(hp_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE_HIDDEN] = False
hass.states.async_set(
hp_entity.entity_id, hp_entity.state, attributes=attrs
)
return hass
@pytest.fixture
def hue_client(loop, hass_hue, aiohttp_client):
"""Create web client for emulated hue api."""
web_app = hass_hue.http.app
config = Config(None, {
emulated_hue.CONF_TYPE: emulated_hue.TYPE_ALEXA,
emulated_hue.CONF_ENTITIES: {
'light.bed_light': {
emulated_hue.CONF_ENTITY_HIDDEN: True
},
'cover.living_room_window': {
emulated_hue.CONF_ENTITY_HIDDEN: False
}
}
})
HueUsernameView().register(web_app, web_app.router)
HueAllLightsStateView(config).register(web_app, web_app.router)
HueOneLightStateView(config).register(web_app, web_app.router)
HueOneLightChangeView(config).register(web_app, web_app.router)
HueAllGroupsStateView(config).register(web_app, web_app.router)
return loop.run_until_complete(aiohttp_client(web_app))
@asyncio.coroutine
def test_discover_lights(hue_client):
"""Test the discovery of lights."""
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
devices = set(val['uniqueid'] for val in result_json.values())
# Make sure the lights we added to the config are there
assert 'light.ceiling_lights' in devices
assert 'light.bed_light' not in devices
assert 'script.set_kitchen_light' in devices
assert 'light.kitchen_lights' not in devices
assert 'media_player.living_room' in devices
assert 'media_player.bedroom' in devices
assert 'media_player.walkman' in devices
assert 'media_player.lounge_room' in devices
assert 'fan.living_room_fan' in devices
assert 'fan.ceiling_fan' not in devices
assert 'cover.living_room_window' in devices
assert 'climate.hvac' in devices
assert 'climate.heatpump' in devices
assert 'climate.ecobee' not in devices
@asyncio.coroutine
def test_get_light_state(hass_hue, hue_client):
"""Test the getting of light state."""
# Turn office light on and set to 127 brightness, and set light color
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{
const.ATTR_ENTITY_ID: 'light.ceiling_lights',
light.ATTR_BRIGHTNESS: 127,
light.ATTR_RGB_COLOR: (1, 2, 7)
},
blocking=True)
office_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert office_json['state'][HUE_API_STATE_ON] is True
assert office_json['state'][HUE_API_STATE_BRI] == 127
assert office_json['state'][HUE_API_STATE_HUE] == 41869
assert office_json['state'][HUE_API_STATE_SAT] == 217
# Check all lights view
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
assert 'light.ceiling_lights' in result_json
assert result_json['light.ceiling_lights']['state'][HUE_API_STATE_BRI] == \
127
# Turn office light off
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{
const.ATTR_ENTITY_ID: 'light.ceiling_lights'
},
blocking=True)
office_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert office_json['state'][HUE_API_STATE_ON] is False
assert office_json['state'][HUE_API_STATE_BRI] == 0
assert office_json['state'][HUE_API_STATE_HUE] == 0
assert office_json['state'][HUE_API_STATE_SAT] == 0
# Make sure bedroom light isn't accessible
yield from perform_get_light_state(
hue_client, 'light.bed_light', 404)
# Make sure kitchen light isn't accessible
yield from perform_get_light_state(
hue_client, 'light.kitchen_lights', 404)
@asyncio.coroutine
def test_put_light_state(hass_hue, hue_client):
"""Test the setting of light states."""
yield from perform_put_test_on_ceiling_lights(hass_hue, hue_client)
# Turn the bedroom light on first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{const.ATTR_ENTITY_ID: 'light.ceiling_lights',
light.ATTR_BRIGHTNESS: 153},
blocking=True)
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_ON
assert ceiling_lights.attributes[light.ATTR_BRIGHTNESS] == 153
# update light state through api
yield from perform_put_light_state(
hass_hue, hue_client,
'light.ceiling_lights', True,
hue=4369, saturation=127, brightness=123)
# go through api to get the state back
ceiling_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert ceiling_json['state'][HUE_API_STATE_BRI] == 123
assert ceiling_json['state'][HUE_API_STATE_HUE] == 4369
assert ceiling_json['state'][HUE_API_STATE_SAT] == 127
# Go through the API to turn it off
ceiling_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.ceiling_lights', False)
ceiling_result_json = yield from ceiling_result.json()
assert ceiling_result.status == 200
assert 'application/json' in ceiling_result.headers['content-type']
assert len(ceiling_result_json) == 1
# Check to make sure the state changed
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_OFF
ceiling_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert ceiling_json['state'][HUE_API_STATE_BRI] == 0
assert ceiling_json['state'][HUE_API_STATE_HUE] == 0
assert ceiling_json['state'][HUE_API_STATE_SAT] == 0
# Make sure we can't change the bedroom light state
bedroom_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.bed_light', True)
assert bedroom_result.status == 404
# Make sure we can't change the kitchen light state
kitchen_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.kitchen_light', True)
assert kitchen_result.status == 404
@asyncio.coroutine
def test_put_light_state_script(hass_hue, hue_client):
"""Test the setting of script variables."""
# Turn the kitchen light off first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'light.kitchen_lights'},
blocking=True)
# Emulated hue converts 0-100% to 0-255.
level = 23
brightness = round(level * 255 / 100)
script_result = yield from perform_put_light_state(
hass_hue, hue_client,
'script.set_kitchen_light', True, brightness)
script_result_json = yield from script_result.json()
assert script_result.status == 200
assert len(script_result_json) == 2
kitchen_light = hass_hue.states.get('light.kitchen_lights')
assert kitchen_light.state == 'on'
assert kitchen_light.attributes[light.ATTR_BRIGHTNESS] == level
@asyncio.coroutine
def test_put_light_state_climate_set_temperature(hass_hue, hue_client):
"""Test setting climate temperature."""
brightness = 19
temperature = round(brightness / 255 * 100)
hvac_result = yield from perform_put_light_state(
hass_hue, hue_client,
'climate.hvac', True, brightness)
hvac_result_json = yield from hvac_result.json()
assert hvac_result.status == 200
assert len(hvac_result_json) == 2
hvac = hass_hue.states.get('climate.hvac')
assert hvac.state == climate.const.HVAC_MODE_COOL
assert hvac.attributes[climate.ATTR_TEMPERATURE] == temperature
# Make sure we can't change the ecobee temperature since it's not exposed
ecobee_result = yield from perform_put_light_state(
hass_hue, hue_client,
'climate.ecobee', True)
assert ecobee_result.status == 404
@asyncio.coroutine
def test_put_light_state_media_player(hass_hue, hue_client):
"""Test turning on media player and setting volume."""
# Turn the music player off first
yield from hass_hue.services.async_call(
media_player.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'media_player.walkman'},
blocking=True)
# Emulated hue converts 0.0-1.0 to 0-255.
level = 0.25
brightness = round(level * 255)
mp_result = yield from perform_put_light_state(
hass_hue, hue_client,
'media_player.walkman', True, brightness)
mp_result_json = yield from mp_result.json()
assert mp_result.status == 200
assert len(mp_result_json) == 2
walkman = hass_hue.states.get('media_player.walkman')
assert walkman.state == 'playing'
assert walkman.attributes[media_player.ATTR_MEDIA_VOLUME_LEVEL] == level
async def test_close_cover(hass_hue, hue_client):
"""Test opening cover ."""
COVER_ID = "cover.living_room_window"
# Turn the office light off first
await hass_hue.services.async_call(
cover.DOMAIN, const.SERVICE_CLOSE_COVER,
{const.ATTR_ENTITY_ID: COVER_ID},
blocking=True)
cover_test = hass_hue.states.get(COVER_ID)
assert cover_test.state == 'closing'
for _ in range(7):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass_hue, future)
await hass_hue.async_block_till_done()
cover_test = hass_hue.states.get(COVER_ID)
assert cover_test.state == 'closed'
# Go through the API to turn it on
cover_result = await perform_put_light_state(
hass_hue, hue_client,
COVER_ID, True, 100)
assert cover_result.status == 200
assert 'application/json' in cover_result.headers['content-type']
for _ in range(7):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass_hue, future)
await hass_hue.async_block_till_done()
cover_result_json = await cover_result.json()
assert len(cover_result_json) == 2
# Check to make sure the state changed
cover_test_2 = hass_hue.states.get(COVER_ID)
assert cover_test_2.state == 'open'
async def test_set_position_cover(hass_hue, hue_client):
"""Test setting postion cover ."""
COVER_ID = "cover.living_room_window"
# Turn the office light off first
await hass_hue.services.async_call(
cover.DOMAIN, const.SERVICE_CLOSE_COVER,
{const.ATTR_ENTITY_ID: COVER_ID},
blocking=True)
cover_test = hass_hue.states.get(COVER_ID)
assert cover_test.state == 'closing'
for _ in range(7):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass_hue, future)
await hass_hue.async_block_till_done()
cover_test = hass_hue.states.get(COVER_ID)
assert cover_test.state == 'closed'
level = 20
brightness = round(level/100*255)
# Go through the API to open
cover_result = await perform_put_light_state(
hass_hue, hue_client,
COVER_ID, False, brightness)
assert cover_result.status == 200
assert 'application/json' in cover_result.headers['content-type']
cover_result_json = await cover_result.json()
assert len(cover_result_json) == 2
assert True, cover_result_json[0]['success'][
'/lights/cover.living_room_window/state/on']
assert cover_result_json[1]['success'][
'/lights/cover.living_room_window/state/bri'] == level
for _ in range(100):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass_hue, future)
await hass_hue.async_block_till_done()
# Check to make sure the state changed
cover_test_2 = hass_hue.states.get(COVER_ID)
assert cover_test_2.state == 'open'
assert cover_test_2.attributes.get('current_position') == level
@asyncio.coroutine
def test_put_light_state_fan(hass_hue, hue_client):
"""Test turning on fan and setting speed."""
# Turn the fan off first
yield from hass_hue.services.async_call(
fan.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'fan.living_room_fan'},
blocking=True)
# Emulated hue converts 0-100% to 0-255.
level = 43
brightness = round(level * 255 / 100)
fan_result = yield from perform_put_light_state(
hass_hue, hue_client,
'fan.living_room_fan', True, brightness)
fan_result_json = yield from fan_result.json()
assert fan_result.status == 200
assert len(fan_result_json) == 2
living_room_fan = hass_hue.states.get('fan.living_room_fan')
assert living_room_fan.state == 'on'
assert living_room_fan.attributes[fan.ATTR_SPEED] == fan.SPEED_MEDIUM
# pylint: disable=invalid-name
@asyncio.coroutine
def test_put_with_form_urlencoded_content_type(hass_hue, hue_client):
"""Test the form with urlencoded content."""
# Needed for Alexa
yield from perform_put_test_on_ceiling_lights(
hass_hue, hue_client, 'application/x-www-form-urlencoded')
# Make sure we fail gracefully when we can't parse the data
data = {'key1': 'value1', 'key2': 'value2'}
result = yield from hue_client.put(
'/api/username/lights/light.ceiling_lights/state',
headers={
'content-type': 'application/x-www-form-urlencoded'
},
data=data,
)
assert result.status == 400
@asyncio.coroutine
def test_entity_not_found(hue_client):
"""Test for entity which are not found."""
result = yield from hue_client.get(
'/api/username/lights/not.existant_entity')
assert result.status == 404
result = yield from hue_client.put(
'/api/username/lights/not.existant_entity/state')
assert result.status == 404
@asyncio.coroutine
def test_allowed_methods(hue_client):
"""Test the allowed methods."""
result = yield from hue_client.get(
'/api/username/lights/light.ceiling_lights/state')
assert result.status == 405
result = yield from hue_client.put(
'/api/username/lights/light.ceiling_lights')
assert result.status == 405
result = yield from hue_client.put(
'/api/username/lights')
assert result.status == 405
@asyncio.coroutine
def test_proper_put_state_request(hue_client):
"""Test the request to set the state."""
# Test proper on value parsing
result = yield from hue_client.put(
'/api/username/lights/{}/state'.format(
'light.ceiling_lights'),
data=json.dumps({HUE_API_STATE_ON: 1234}))
assert result.status == 400
# Test proper brightness value parsing
result = yield from hue_client.put(
'/api/username/lights/{}/state'.format(
'light.ceiling_lights'),
data=json.dumps({
HUE_API_STATE_ON: True,
HUE_API_STATE_BRI: 'Hello world!'
}))
assert result.status == 400
@asyncio.coroutine
def test_get_empty_groups_state(hue_client):
"""Test the request to get groups endpoint."""
# Test proper on value parsing
result = yield from hue_client.get(
'/api/username/groups')
assert result.status == 200
result_json = yield from result.json()
assert result_json == {}
# pylint: disable=invalid-name
async def perform_put_test_on_ceiling_lights(hass_hue, hue_client,
content_type='application/json'):
"""Test the setting of a light."""
# Turn the office light off first
await hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'light.ceiling_lights'},
blocking=True)
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_OFF
# Go through the API to turn it on
office_result = await perform_put_light_state(
hass_hue, hue_client,
'light.ceiling_lights', True, 56, content_type)
assert office_result.status == 200
assert 'application/json' in office_result.headers['content-type']
office_result_json = await office_result.json()
assert len(office_result_json) == 2
# Check to make sure the state changed
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_ON
assert ceiling_lights.attributes[light.ATTR_BRIGHTNESS] == 56
@asyncio.coroutine
def perform_get_light_state(client, entity_id, expected_status):
"""Test the getting of a light state."""
result = yield from client.get('/api/username/lights/{}'.format(entity_id))
assert result.status == expected_status
if expected_status == 200:
assert 'application/json' in result.headers['content-type']
return (yield from result.json())
return None
@asyncio.coroutine
def perform_put_light_state(hass_hue, client, entity_id, is_on,
brightness=None, content_type='application/json',
hue=None, saturation=None):
"""Test the setting of a light state."""
req_headers = {'Content-Type': content_type}
data = {HUE_API_STATE_ON: is_on}
if brightness is not None:
data[HUE_API_STATE_BRI] = brightness
if hue is not None:
data[HUE_API_STATE_HUE] = hue
if saturation is not None:
data[HUE_API_STATE_SAT] = saturation
result = yield from client.put(
'/api/username/lights/{}/state'.format(entity_id), headers=req_headers,
data=json.dumps(data).encode())
# Wait until state change is complete before continuing
yield from hass_hue.async_block_till_done()
return result
async def test_external_ip_blocked(hue_client):
"""Test external IP blocked."""
with patch('homeassistant.components.http.real_ip.ip_address',
return_value=ip_address('45.45.45.45')):
result = await hue_client.get('/api/username/lights')
assert result.status == 400
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FarmersOperations(object):
"""FarmersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
ids=None, # type: Optional[List[str]]
names=None, # type: Optional[List[str]]
property_filters=None, # type: Optional[List[str]]
statuses=None, # type: Optional[List[str]]
min_created_date_time=None, # type: Optional[datetime.datetime]
max_created_date_time=None, # type: Optional[datetime.datetime]
min_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_page_size=50, # type: Optional[int]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FarmerListResponse"]
"""Returns a paginated list of farmer resources.
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FarmerListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.agrifood.farming.models.FarmerListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FarmerListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FarmerListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/farmers'} # type: ignore
def get(
self,
farmer_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Farmer"
"""Gets a specified farmer resource.
:param farmer_id: ID of the associated farmer.
:type farmer_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Farmer, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Farmer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Farmer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Farmer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/farmers/{farmerId}'} # type: ignore
def create_or_update(
self,
farmer_id, # type: str
farmer=None, # type: Optional["_models.Farmer"]
**kwargs # type: Any
):
# type: (...) -> "_models.Farmer"
"""Creates or updates a farmer resource.
:param farmer_id: ID of the farmer resource.
:type farmer_id: str
:param farmer: Farmer resource payload to create or update.
:type farmer: ~azure.agrifood.farming.models.Farmer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Farmer, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Farmer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Farmer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/merge-patch+json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if farmer is not None:
body_content = self._serialize.body(farmer, 'Farmer')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('Farmer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Farmer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/farmers/{farmerId}'} # type: ignore
def delete(
self,
farmer_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a specified farmer resource.
:param farmer_id: ID of farmer to be deleted.
:type farmer_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/farmers/{farmerId}'} # type: ignore
def get_cascade_delete_job_details(
self,
job_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CascadeDeleteJob"
"""Get a cascade delete job for specified farmer.
:param job_id: ID of the job.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CascadeDeleteJob, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.CascadeDeleteJob
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CascadeDeleteJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get_cascade_delete_job_details.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CascadeDeleteJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cascade_delete_job_details.metadata = {'url': '/farmers/cascade-delete/{jobId}'} # type: ignore
def _create_cascade_delete_job_initial(
self,
job_id, # type: str
farmer_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CascadeDeleteJob"
cls = kwargs.pop('cls', None) # type: ClsType["_models.CascadeDeleteJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self._create_cascade_delete_job_initial.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['farmerId'] = self._serialize.query("farmer_id", farmer_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CascadeDeleteJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_cascade_delete_job_initial.metadata = {'url': '/farmers/cascade-delete/{jobId}'} # type: ignore
def begin_create_cascade_delete_job(
self,
job_id, # type: str
farmer_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.CascadeDeleteJob"]
"""Create a cascade delete job for specified farmer.
:param job_id: Job ID supplied by end user.
:type job_id: str
:param farmer_id: ID of the farmer to be deleted.
:type farmer_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either CascadeDeleteJob or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.agrifood.farming.models.CascadeDeleteJob]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CascadeDeleteJob"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_cascade_delete_job_initial(
job_id=job_id,
farmer_id=farmer_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CascadeDeleteJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
if polling is True: polling_method = LROBasePolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_cascade_delete_job.metadata = {'url': '/farmers/cascade-delete/{jobId}'} # type: ignore
|
|
""":mod:`libfeedly.api`
~~~~~~~~~~~~~~~~~~~~~~~
"""
import requests
import time
try:
import simplejson as json
except ImportError:
import json
from .subscription import Subscription
from .stream import Stream
from .item import Item
from .utils import user_id, tag_id, category_id, feed_id, escape
from .compat import xmap
__all__ = 'API',
class API(object):
"""
"""
def __init__(self, prefix="http://cloud.feedly.com", version="v3"):
self._prefix = prefix
self._version = version
self._session = requests.Session()
self._session.headers['content-type'] = 'application/json'
def raw_get(self, *args, **kwds):
return self._session.get(*args, **kwds)
def _wrap_data(self, kwds):
if not kwds.get('data'):
return
sess = self._session
content_type = kwds['headers'].get('content-type') or \
kwds['headers'].get('Content-Type') or \
sess.headers.get('content-type')
if content_type == 'application/json':
kwds['data'] = json.dumps(kwds['data'])
def raw_post(self, *args, **kwds):
self._wrap_data(kwds)
return self._session.post(*args, **kwds)
def raw_put(self, *args, **kwds):
self._wrap_data(kwds)
return self._session.put(*args, **kwds)
def raw_delete(self, *args, **kwds):
return self._session.delete(*args, **kwds)
def get(self, uri_path, headers=None, params=None):
headers = headers or {}
params = params or {}
_append_ck(params)
uri = '%s/%s/%s' % (self._prefix, self._version, uri_path)
return self.raw_get(uri, headers=headers, params=params)
def post(self, uri_path, headers=None, params=None, data=None):
headers = headers or {}
params = params or {}
data = data or {}
_append_ck(params)
uri = '%s/%s/%s' % (self._prefix, self._version, uri_path)
return self.raw_post(uri, headers=headers, params=params, data=data)
def put(self, uri_path, headers=None, params=None, data=None):
headers = headers or {}
params = params or {}
data = data or {}
_append_ck(params)
uri = '%s/%s/%s' % (self._prefix, self._version, uri_path)
return self.raw_put(uri, headers=headers, params=params, data=data)
def delete(self, uri_path, headers=None, params=None):
headers = headers or {}
params = params or {}
_append_ck(params)
uri = '%s/%s/%s' % (self._prefix, self._version, uri_path)
return self.raw_delete(uri, headers=headers, params=params)
def feed(self, uri):
uri_path = 'feeds/%s' % feed_id(uri, escape=True)
resp = self.get(uri_path)
if resp.status_code != 200:
raise APIError('Invalid input')
return resp.json()
def make_auth_url(self, client_id='feedly',
redirect_uri='https://cloud.feedly.com/feedly.html',
scope='https://cloud.feedly.com/subscriptions',
response_type='code', provider='google'):
params = dict(client_id=client_id, redirect_uri=redirect_uri,
scope=scope, response_type=response_type, provider=provider,
migrate='false')
resp = self.get('auth/auth', params=params)
if resp.status_code != 200:
raise APIError('Not authorization')
return resp.url
def create_token(self, code,
client_id='feedly',
client_secret='0XP4XQ07VVMDWBKUHTJM4WUQ',
grant_type='authorization_code',
redirect_uri='http://www.feedly.com/feedly.html'):
"""
"""
data = dict(client_id=client_id, client_secret=client_secret,
grant_type=grant_type, redirect_uri=redirect_uri,
code=code)
headers = {'content-type': 'application/x-www-form-urlencoded'}
resp = self.post('auth/token', headers=headers, data=data)
if resp.status_code != 200:
raise APIError('Not authorization')
json = resp.json()
return dict(id=json['id'], access_token=json['access_token'],
refresh_token=json['refresh_token'],
expire=int(time.time() + json['expires_in']))
def refresh_token(self, refresh_token,
client_id='feedly',
client_secret='0XP4XQ07VVMDWBKUHTJM4WUQ',
grant_type='refresh_token'):
print("DEBUG: %s" % refresh_token)
headers = {'content-type': 'application/x-www-form-urlencoded'}
data = dict(client_id=client_id, client_secret=client_secret,
grant_type='refresh_token', refresh_token=refresh_token)
resp = self.post('auth/token', headers=headers, data=data)
if resp.status_code != 200:
raise APIError('Not authorization')
json = resp.json()
return dict(id=json['id'], access_token=json['access_token'],
expire=int(time.time() + json['expires_in']))
@property
def auth_key(self):
return (self._session.headers or {}).get('Authorization', 'Oauth ')[6:]
@auth_key.setter
def auth_key(self, key):
if getattr(self, '_profile', None):
del self._profile
if not key:
del self._session.headers['Authorization']
return
self._session.headers['Authorization'] = 'OAuth %s' % key
self.profile
@property
def profile(self):
if getattr(self, '_profile', None):
return self._profile
resp = self.get('profile')
if resp.status_code != 200:
raise APIError('Not authorization')
self._profile = resp.json()
return self._profile
@property
def user_id(self):
return user_id(self.profile['id'])
@property
def subscriptions(self):
resp = self.get('subscriptions')
if resp.status_code != 200:
raise APIError('Not authorization')
for subscription in resp.json():
yield Subscription(api=self, **subscription)
@property
def categories(self):
categories = {'global.uncategorized': dict(subscriptions=[])}
for subscription in self.subscriptions:
if not len(subscription.categories):
categories['global.uncategorized']['subscriptions'].append(subscription)
for label in subscription.categories:
category = categories[label] = categories.get(
label,
dict(id=category_id(self.user_id, label),
subscriptions=[])
)
category['subscriptions'].append(subscription)
return categories
def subscribe(self, uri, categories=None):
info = self.feed(uri)
categories = xmap(self._category, categories or [])
data = dict(id=info['id'], title=info['title'], categories=categories)
resp = self.post('subscriptions', data=data)
if resp.status_code != 200:
raise APIError('Not authorization')
data['website'] = info['website']
return Subscription(api=self, **data)
def unsubscribe(self, uri):
resp = self.delete('subscriptions/%s' % feed_id(uri, escape=True))
if resp.status_code != 200:
raise APIError('Not authorization')
return True
def contents(self, stream_id, count=20, unread_only=False,
ranked='newest', continuation=None):
stream_id = stream_id.encode('utf-8')
uri_path = 'streams/%s/contents' % escape(stream_id)
count = int(count) or 20
if count < 0:
count = 20
if not isinstance(unread_only, bool):
unread_only = False
unread_only = str(unread_only).lower()
if ranked not in ['newest', 'oldest']:
ranked = 'newest'
params = dict(count=count, unreadOnly=unread_only, ranked=ranked)
if continuation:
params['continuation'] = continuation
resp = self.get(uri_path, params=params)
if resp.status_code != 200:
raise APIError('Not authorization')
resp = resp.json()
items = (Item(api=self, **item) for item in resp.get('items', []))
return items, resp.get('continuation')
def category(self, category):
return Stream(category_id(self.user_id, category), api=self)
def tag(self, tag):
return Stream(tag_id(self.user_id, tag), api=self)
@property
def all(self):
return self.category('global.all')
@property
def saved(self):
return self.tag('global.saved')
def mark_as_read(self, entry_ids):
if not len(entry_ids):
# TODO: throw invalid item_id
return
data = dict(action='markAsRead', type='entries', entryIds=entry_ids)
resp = self.post('markers', data=data)
if resp.status_code != 200:
raise APIError
def keep_unread(self, entry_ids):
if not len(entry_ids):
# TODO: throw invalid item_id
return
data = dict(action='keepUnread', type='entries', entryIds=entry_ids)
resp = self.post('markers', data=data)
if resp.status_code != 200:
raise APIError
def all_mark_as_read(self, feed_ids, asOf=None):
if not len(feed_ids):
# TODO: throw invalid item_id
return
if not asOf:
asOf = int(time.time()) * 1000
data = dict(action='markAsRead', type='feeds', feedIds=feed_ids,
asOf=asOf)
resp = self.post('markers', data=data)
if resp.status_code != 200:
raise APIError
def tagging(self, tag, item_id):
tag = tag.encode('utf-8')
uri_path = 'tags/%s' % tag_id(self.user_id, tag, escape=True)
resp = self.put(uri_path, data=dict(entryId=item_id))
if resp.status_code != 200:
raise APIError
def untagging(self, tag, item_id):
uri_path = 'tags/%s/%s' % (tag_id(self.user_id, tag, escape=True),
escape(item_id))
resp = self.delete(uri_path)
if resp.status_code != 200:
raise APIError
def _category(self, label):
return dict(id=category_id(self.user_id, label), label=label)
def _append_ck(params):
if not params.get('ck'):
params['ck'] = int(time.time())
|
|
#!/usr/bin/env python
import os
# check if NEOS update is required
while 1:
if (not os.path.isfile("/VERSION") or int(open("/VERSION").read()) < 3) and not os.path.isfile("/sdcard/noupdate"):
os.system("curl -o /tmp/updater https://openpilot.comma.ai/updater && chmod +x /tmp/updater && /tmp/updater")
else:
break
import sys
import time
import importlib
import subprocess
import signal
import traceback
import usb1
from multiprocessing import Process
from selfdrive.services import service_list
import hashlib
import zmq
from setproctitle import setproctitle
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.thermal import read_thermal
from selfdrive.registration import register
from selfdrive.version import version
import common.crash as crash
from common.params import Params
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"uploader": "selfdrive.loggerd.uploader",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"loggerd": ("loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("logcatd", ["./logcatd"]),
"proclogd": ("proclogd", ["./proclogd"]),
"boardd": ("boardd", ["./boardd"]), # switch to c++ boardd
"ui": ("ui", ["./ui"]),
"visiond": ("visiond", ["./visiond"]),
"sensord": ("sensord", ["./sensord"]), }
running = {}
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'sensord',
'radard',
'visiond',
'proclogd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes
print "registering", name
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
# ****************** process management functions ******************
def launcher(proc, gctx):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# exec the process
mod.main(gctx)
except KeyboardInterrupt:
cloudlog.info("child %s got ctrl-c" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, basestring):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc, gctx))
else:
pdir, pargs = proc
cwd = os.path.dirname(os.path.realpath(__file__))
if pdir is not None:
cwd = os.path.join(cwd, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date > /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
manage_baseui(False)
for name in running.keys():
kill_managed_process(name)
sys.exit(0)
baseui_running = False
def manage_baseui(start):
global baseui_running
if start and not baseui_running:
cloudlog.info("starting baseui")
os.system("am start -n com.baseui/.MainActivity")
baseui_running = True
elif not start and baseui_running:
cloudlog.info("stopping baseui")
os.system("am force-stop com.baseui")
baseui_running = False
# ****************** run loop ******************
def manager_init():
global gctx
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.bind_global(dongle_id=dongle_id, version=version)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version)
os.system("mkdir -p "+ROOT)
# set gctx
gctx = {}
def manager_thread():
global baseui_running
# now loop
context = zmq.Context()
thermal_sock = messaging.pub_sock(context, service_list['thermal'].port)
health_sock = messaging.sub_sock(context, service_list['health'].port)
cloudlog.info("manager start")
cloudlog.info(dict(os.environ))
start_managed_process("logmessaged")
start_managed_process("logcatd")
start_managed_process("tombstoned")
start_managed_process("uploader")
start_managed_process("ui")
manage_baseui(True)
panda = False
if os.getenv("NOBOARD") is None:
# *** wait for the board ***
panda = wait_for_device() == 0x2300
# flash the device
if os.getenv("NOPROG") is None:
# checkout the matching panda repo
rootdir = os.path.dirname(os.path.abspath(__file__))
os.system("cd %s && git submodule init && git submodule update" % rootdir)
# flash the board
boarddir = os.path.dirname(os.path.abspath(__file__))+"/../panda/board/"
mkfile = "Makefile" if panda else "Makefile.legacy"
print "using", mkfile
os.system("cd %s && make -f %s" % (boarddir, mkfile))
start_managed_process("boardd")
started = False
logger_dead = False
count = 0
# set 5 second timeout on health socket
# 5x slower than expected
health_sock.RCVTIMEO = 5000
while 1:
# get health of board, log this in "thermal"
td = messaging.recv_sock(health_sock, wait=True)
print td
# replace thermald
msg = read_thermal()
# loggerd is gated based on free space
statvfs = os.statvfs(ROOT)
avail = (statvfs.f_bavail * 1.0)/statvfs.f_blocks
# thermal message now also includes free space
msg.thermal.freeSpace = avail
with open("/sys/class/power_supply/battery/capacity") as f:
msg.thermal.batteryPercent = int(f.read())
with open("/sys/class/power_supply/battery/status") as f:
msg.thermal.batteryStatus = f.read().strip()
thermal_sock.send(msg.to_bytes())
print msg
# TODO: add car battery voltage check
max_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
# uploader is gated based on the phone temperature
if max_temp > 85.0:
cloudlog.info("over temp: %r", max_temp)
kill_managed_process("uploader")
elif max_temp < 70.0:
start_managed_process("uploader")
if avail < 0.05:
logger_dead = True
# start constellation of processes when the car starts
# with 2% left, we killall, otherwise the phone is bricked
if td is not None and td.health.started and avail > 0.02:
if not started:
Params().car_start()
started = True
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
manage_baseui(False)
else:
manage_baseui(True)
started = False
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# shutdown if the battery gets lower than 10%, we aren't running, and we are discharging
if msg.thermal.batteryPercent < 5 and msg.thermal.batteryStatus == "Discharging":
os.system('LD_LIBRARY_PATH="" svc power shutdown')
# check the status of baseui
baseui_running = 'com.baseui' in subprocess.check_output(["ps"])
# check the status of all processes, did any of them die?
for p in running:
cloudlog.debug(" running %s %s" % (p, running[p]))
# report to server once per minute
if (count%60) == 0:
cloudlog.event("STATUS_PACKET",
running=running.keys(),
count=count,
health=(td.to_dict() if td else None),
thermal=msg.to_dict())
count += 1
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-3", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
# optional, build the c++ binaries and preimport the python for speed
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd="../cereal")
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
proc = managed_processes[p]
if isinstance(proc, basestring):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=proc[0])
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.info("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=proc[0])
subprocess.check_call(["make", "-j4"], cwd=proc[0])
# install apks
installed = get_installed_apks()
for app in os.listdir("../apk/"):
if ".apk" in app:
app = app.split(".apk")[0]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed:
apk_path = "../apk/"+app+".apk"
if os.path.isfile(apk_path):
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
for do_uninstall in [False, True]:
if do_uninstall:
cloudlog.info("needing to uninstall %s" % app)
os.system("pm uninstall %s" % app)
ret = os.system("cp %s /sdcard/%s.apk && pm install -r /sdcard/%s.apk && rm /sdcard/%s.apk" % (apk_path, app, app, app))
if ret == 0:
break
assert ret == 0
def wait_for_device():
while 1:
try:
context = usb1.USBContext()
for device in context.getDeviceList(skip_on_error=True):
if (device.getVendorID() == 0xbbaa and device.getProductID() == 0xddcc) or \
(device.getVendorID() == 0x0483 and device.getProductID() == 0xdf11):
bcd = device.getbcdDevice()
handle = device.open()
handle.claimInterface(0)
cloudlog.info("found board")
handle.close()
return bcd
except Exception as e:
print "exception", e,
print "waiting..."
time.sleep(1)
def main():
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("NOBOARD") is not None:
del managed_processes['boardd']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process)
except ImportError:
pass
params = Params()
params.manager_start()
manager_init()
manager_prepare()
if os.getenv("PREPAREONLY") is not None:
sys.exit(0)
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
"""
Netconfigit and NetworkDevice classes
"""
__license__ = "MIT License"
__author__ = "Eric Griffin"
__copyright__ = "Copyright (C) 2014, Fluent Trade Technologies"
__version__ = "1.1"
import os
import sys
import time
import shutil
import logging
import os.path
import inspect
import threading
import telnetlib
from xml.dom import minidom
from datetime import datetime
import paramiko
from git import *
import aescrypt
import threadpool
from modules import tftpy
# define a global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Netconfigit(object):
"""Contains configuration data and execution functions for archiving network device configs
Holds a list of NetworkDevice objects with their associated access information and the jobs to run.
Holds global configuration information.
Manages a pool of worker threads used for accessing network devices
Contains accessory functions related to archiving device configs
:param config_file: xml configuration file
:param _password: decryption password
"""
def __init__(self, config_file, _password):
"""Netconfigit constructor
Initializes member variables and reads and parses XML configuration file.
Starts local tftp server and creates temporary directory for holding configs.
Initializes the device threadpool.
:param config_file: XML configuration file path
:param _password: decryption password
"""
# initialize member variables
self.device_list = [] # list of device objects defined in the configuration file
self.device_count = 0 # the number of devices defined in the configuration file
self.success_list = [] # the list of device actions that have succeeded
self.failure_list = [] # the list of device actions that have failed
self.config = 0 # the minidom XML configuration data structure
self.config_devices = 0 # pointer to the device elements in the config minidom structure
self.tftp_thread = 0 # thread pool for running the local tftp server
self.device_threadpool = 0 # thread pool for running device actions
self.password = "" # decryption password
self.verbose = 0 # verbose logging flag
self.logfile = "./Netconfigit.log" # logfile relative path
self.plaintext_passwords = "" # boolean value allows use of plaintext passwords in config xml
self.transfer_ip = "" # IP address of the local tftp and/or scp server
self.scp_username = "" # username used for scp transfer to the local machine
self.scp_password = "" # password used for scp transfer to the local machine
self.scp_chown = "" # the group and user to which uploaded files' ownership should be changed
self.ssh_port = 22 # port used to ssh to local machine - used by chown
self.repo_path = "" # absolute path to the configuration repository
self.repo_password = "" # password for accessing the repository
self.repository = None # GitPython repository object
self.tftp_port = "69" # port used by local tftp server
self.tftp_root = "" # root directory used by local tftp server
self.using_git = 0 # boolean is set to true if the repository directory is a Git repository
self.tempdir = ".netconfigit/" # temporary path for downloading configs
self.time_start = datetime.now() # starting time timestamp used for calculating total running-time
self.time_stop = None # stopping time timestamp used for calculating total running-time
self.time_timestamp = time.time() # starting time timestamp
# formatted timestamp
self.timestamp = datetime.fromtimestamp(self.time_timestamp).strftime('%Y-%m-%d %H:%M:%S')
# create the object used for encrypting/decrypting passwords
self.password = aescrypt.AESCrypt(_password)
# parse xml configuration file
self.config = minidom.parse(config_file)
logging.info("\nUsing %s", config_file)
# check and load options from XML
self.options = self.load_options()
# check existence of devices in configuration
if self.config.getElementsByTagName('device'):
self.config_devices = self.config.getElementsByTagName('device')
else:
print "\nNo devices specified - quitting"
exit(1)
# load devices from XML configuration into device_list
load_err = self.load_devices_xml()
if load_err != "0":
print load_err
print "Configuration errors detected - quitting"
exit(1)
# create temporary directory for receiving configs
self.tempdir = os.path.dirname(self.tftp_root + self.tempdir)
try:
os.stat(self.tempdir)
except os.error:
os.mkdir(self.tempdir)
logger.info("Creating temporary directory " + self.tempdir)
# initialize the thread used for the local tftp server and start the server
self.tftp_thread = threadpool.ThreadPool(1)
self.tftp_thread.add_task(self.tftp_server)
# initialize the thread pool used by device actions
logger.info("Creating %s device threads", 20)
self.device_threadpool = threadpool.ThreadPool(20)
def run_nc(self):
"""Runs the jobs associated with each device
Creates a new threaded task for each device.
"""
# process each device in its own threaded task
for device in self.device_list:
self.device_threadpool.add_task(self.process_actions, device)
def stop_nc(self):
"""Cleans up after the running of all device actions
Waits until all device threads are finished.
Copies the configs from the temporary location into the repo location.
Removes the temporary folder structure.
"""
# wait until all worker threads are finished
self.device_threadpool.wait_completion()
self.tftp_thread.tasks.empty()
# count downloaded files
# TODO: count downloaded files
# copy downloaded files to repo root
for src_dir, dirs, files in os.walk(self.tempdir):
dst_dir = src_dir.replace(self.tempdir, self.repo_path)
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(src_file, dst_dir)
# delete the temporary directory structure
try:
shutil.rmtree(self.tempdir)
except os.error:
# wait a few seconds and try again if the OS hasn't released a lock on the folders
time.sleep(5)
shutil.rmtree(self.tempdir)
# Calculate total running-time
self.time_stop = datetime.now()
running_time = self.time_stop - self.time_start
# print results and write them to log file
with open(self.logfile, "a") as results:
# print a timestamp and total running time
results.write("\n-------------------------\n\n")
print "-------------------------\n"
print self.timestamp
results.write(self.timestamp + "\n")
print "Elapsed time: " + str(running_time)
results.write("Elapsed Time: " + str(running_time) + "\n\n")
# completed actions
print "\nCompleted:"
results.write("Completed:\n")
if len(self.success_list) == 0:
print "\tNONE"
results.write("\tNONE\n")
else:
for success in self.success_list:
for success_device, success_action in success.items():
print "\t" + success_device + " - " + success_action
results.write("\t" + success_device + " - " + success_action + "\n")
# failed actions
print "\nFailed:"
results.write("\nFailed:\n")
if len(self.failure_list) == 0:
print "\tNONE\n"
results.write("\tNONE\n")
else:
for failure in self.failure_list:
for failure_device, failure_action in failure.items():
print "\t" + failure_device + " - " + failure_action
results.write("\t" + failure_device + " - " + failure_action + "\n")
def load_options(self):
"""Loads options from the XML configuration tree
:return: err: error code
"""
err = 0
# read options from XML
self.logfile = self.get_element_attribute(self.config, "logging", "path")
self.plaintext_passwords = self.get_element_attribute(self.config, "passwords", "plaintext")
self.transfer_ip = self.get_element_attribute(self.config, "transfer", "ip")
self.scp_username = self.get_element_attribute(self.config, "transfer", "username")
self.scp_password = self.get_element_attribute(self.config, "transfer", "password")
self.scp_chown = self.get_element_attribute(self.config, "transfer", "chown")
self.repo_path = self.get_element_attribute(self.config, "repository", "path")
self.tftp_port = self.get_element_attribute(self.config, "transfer", "tftp_port")
self.tftp_root = self.get_element_attribute(self.config, "transfer", "tftp_root")
# check for existence of repo path and assign it as the tftp server's root
if self.repo_path != "NULL":
self.tftp_root = self.repo_path
else:
print "Repository path is not specified."
exit(1)
# make sure the repo path exists
if not os.path.isdir(self.repo_path):
print "Repository path does not exist."
exit(1)
# check whether the repo path is under Git control
git_path_test = self.repo_path + "/.git"
# .git directory does not exist - not a Git repository
if not os.path.isdir(git_path_test):
self.using_git = 0
logger.warning("%s is not a Git repository", self.repo_path)
else:
# repo will be committed/pulled/pushed when everything is done
self.using_git = 1
# create a GitPython repository object
self.repository = Repo(self.repo_path)
# read the repository password from config xml
self.repo_password = self.get_element_attribute(self.config, "repository", "password")
# if repo is under Git try to decode the repo password
if self.plaintext_passwords != "true" and self.repo_password != "NULL":
# if repo password ciphertext is invalid length
if len(self.repo_password) != 24:
print "Encrypted repository password must be a multiple of 16 bytes in length."
exit(1)
else:
self.repo_password = self.password.decode(self.repo_password)
# decrypt transfer password
if self.plaintext_passwords != "true":
if len(self.scp_password) != 24:
print "Encrypted transfer password must be a multiple of 16 bytes in length."
exit(1)
else:
self.scp_password = self.password.decode(self.scp_password)
return err
def tftp_server(self):
"""Creates and starts a local tftp server
Creates a TftpServer object and starts it bound to the IP and port of the calling object
"""
server = tftpy.TftpServer(self.tempdir)
try:
logger.info("Starting tftp server on %s:%s with root %s",
self.transfer_ip, int(self.tftp_port), self.tempdir)
server.listen(self.transfer_ip, int(self.tftp_port))
except tftpy.TftpException, err:
logger.error("Could not start tftp server on %s:%s with root %s",
self.transfer_ip, int(self.tftp_port), self.tempdir)
logger.error("%s", str(err))
sys.exit(1)
except KeyboardInterrupt:
pass
@staticmethod
def get_element_attribute(parent_element, element, attribute):
"""Reads and returns the value of an XML attribute under a given parent node
:param parent_element: the parent XML element under which to search for the element and attribute
:param element: the XML element who's attribute will be returned
:param attribute: the XML attribute who's value will be returned
:return: retval: the searched attributes value
"""
retval = "NULL"
try:
elements = parent_element.getElementsByTagName(str(element))
except AttributeError:
# if element doesn't exist
return retval
try:
for element in elements:
retval = element.attributes[str(attribute)].value
except:
retval = "NULL"
return retval
@staticmethod
def get_all_element_attribute_values(parent_element, element, attribute):
"""Reads and returns a list of matching sub-elements' attribute values from a parent element
:param parent_element: the parent XML element under which to search
:param element: the XML element to search for
:param attribute: the name of the XML attribute which should be added to the list to be returned
:return: values: list of given attribute values for a given element under the specified parent-element
"""
values = []
try:
elements = parent_element.getElementsByTagName(str(element))
except AttributeError:
# if element doesn't exist
return values
for element in elements:
values.append(element.attributes[str(attribute)].value)
return values
def load_devices_xml(self):
"""Loads devices and associated data from XML configuration
Reads XML for device elements and associated data
Decrypts encrypted passwords in device data
Checks for errors in the device configuration
Creates new NetworkDevice objects and populates member variables
Adds devices to self.device_list
:return: err: error string
"""
err = "0"
for config_device in self.config_devices:
self.device_count += 1
#check the name attribute for the device and create a NetworkDevice object
try:
# create a new NetworkDevice object
device = NetworkDevice(config_device.attributes['name'].value)
device.enabled = config_device.attributes['enabled'].value
except AttributeError:
logger.warning("No name attribute for device #%s", str(self.device_count))
err = "No name attribute for device #" + str(self.device_count)
continue
# populate member variables from XML
device.type = config_device.attributes['type'].value
device.manufacturer = config_device.attributes['manufacturer'].value
device.ip = self.get_element_attribute(config_device, "access", "ip")
device.hostname = self.get_element_attribute(config_device, "access", "hostname")
device.access_type = self.get_element_attribute(config_device, "access", "type")
device.port = self.get_element_attribute(config_device, "access", "port")
device.login_user = self.get_element_attribute(config_device, "access", "username")
device.login_pass = self.get_element_attribute(config_device, "access", "password")
device.enable_password = self.get_element_attribute(config_device, "access", "enable")
device.actions = self.get_all_element_attribute_values(config_device, "action", "type")
# decrypt passwords
if self.plaintext_passwords != "true":
if len(device.login_pass) != 24:
print "Encrypted passwords must be a multiple of 16 bytes in length."
exit(1)
else:
device.login_pass = self.password.decode(device.login_pass)
if len(device.enable_password) != 24 and len(device.enable_password) > 4:
print "Encrypted passwords must be a multiple of 16 bytes in length."
exit(1)
elif len(device.enable_password) == 24:
device.enable_password = self.password.decode(device.enable_password)
# check for errors in config
if device.manufacturer == "NULL":
logger.warning("Must specify device manufacturer for device %s", str(device.name))
err = "Must specify device manufacturer for device " + str(device.name)
if device.ip == "NULL":
logger.warning("Must specify either an IP address or hostname for device %s", str(device.name))
err = "Must specify either an IP address or hostname for device " + str(device.name)
continue
if device.login_user == "NULL" or device.login_pass == "NULL":
logger.warning("Must supply username and password for device %s", str(device.name))
err = "Must supply username and password for device " + str(device.name)
continue
# add the device to the list of devices
self.device_list.append(device)
return err
def git_commit_push(self):
"""Synchronizes changes with Git repository
Performs a Git commit with all changes
Performs a Git pull from origin
Performs a Git push to origin
:return: err: error code
"""
err = 0
# stage all changes to the commit
self.repository.git.add(".")
try:
# commit the changes to Git
print self.repository.git.commit('-a', m="Network configuration updates")
logger.info("Commit completed")
err = 1
except:
logger.warning("No changes to commit to Git")
try:
# pause to ensure commit has finished
time.sleep(5)
# pull and then push to Git origin
origin = self.repository.remotes.origin
origin.pull()
origin.push()
print "Git pull-push completed."
except:
logger.warning("Could not complete Git pull-push from origin")
return err
def chown_config(self, _device, _file):
"""Changes ownership of a file on the local system
Calls "chown" and assigns user and group defined in config as owner of file passed to function
:param _device: the device who's config is being chown-ed - to determine absolute file path
:param _file: the filename who's ownership is changed
:return: err: error code
"""
err = 0
# create an ssh connection to the local machine
client = paramiko.SSHClient()
client.get_transport()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(self.transfer_ip, self.ssh_port, username=self.scp_username, password=self.scp_password)
chown_command = "chown " + self.scp_chown + ":" + self.scp_chown + " " \
+ self.repo_path + "/" + _device.name + "/" + _file
logger.info("chown %s with %s", _device.name, self.scp_chown)
try:
#issue the chown command
client.exec_command(chown_command)
except:
logger.error("Could not chown %s with %s", _device.name, self.scp_chown)
return err
@staticmethod
def get_ssh_client_channel(_device):
"""Creates an SSH session to a device
Creates an SSHClient object and initiates the connection
:param _device: the device
:return: client, channel: the client session and ssh channel
"""
client = paramiko.SSHClient()
client.get_transport()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(_device.ip, port=int(_device.port), username=_device.login_user,
password=_device.login_pass, look_for_keys=False)
channel = client.invoke_shell()
return client, channel
def process_actions(self, _device):
"""Processes actions associated with a device
Adds the modules in the devices subfolder to the path
Dynamically loads the module defined by the device_manufacturer
Creates device object for the manufacturer device
Calls run_action() method for the device object for each action associated with the device
:param _device: the device
:return: err: error code
"""
err = 0
manufacturer_init = None
if _device.enabled == "0":
return err
# include modules from 'devices' subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(
inspect.getfile(inspect.currentframe()))[0], "devices")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
# load the manufacturer module dynamically
device_class = _device.manufacturer.title()
try:
dynamic_module = __import__(device_class.lower())
# create an object of the manufacturer class dynamically
manufacturer_init = getattr(dynamic_module, device_class)(_device, self)
except:
print "Device manufacturer " + device_class + " not implemented."
err = 1
if err == 0:
# run each action for the device
for action in _device.actions:
logger.info("Running action %s on %s", action, _device.name)
run_action = getattr(manufacturer_init, 'run_action')(action)
# TODO: check returned run_action value to determine whether action succeeded
return err
class NetworkDevice(threading.Thread):
"""Defines remote access to a network device
Contains data needed for connecting to and retrieving configs from a network device
"""
def __init__(self, _name):
"""Class constructor
:param _name: the name of the device
"""
threading.Thread.__init__(self)
self.name = _name # the name of the device
self.enabled = "0" # whether device should be included when netconfigit is run
self.type = "" # type of device (switch, router, etc.) - used by specific implementation
self.manufacturer = "" # device manufacturer (cisco, arista, etc.)
self.ip = "" # IP address or hostname of the device
self.access_type = "ssh" # method of access used to communicate with the device
self.port = 22 # port used in conjunction with access_type
self.login_user = "" # device login username
self.login_pass = "" # device login password
self.enable_password = "" # device enable password
self.succeeded = 0 # set if device actions succeed after run
self.actions = [] # list of actions defined in the config associated with the device
|
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = '[email protected] (Kenton Varda)'
import math
import struct
from google.protobuf.internal import containers
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask, result_type):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = buffer[pos]
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(bits, result_type):
"""Like _VarintDecoder() but decodes signed values."""
signbit = 1 << (bits - 1)
mask = (1 << bits) - 1
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = buffer[pos]
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = (result ^ signbit) - signbit
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
# All 32-bit and 64-bit values are represented as int.
_DecodeVarint = _VarintDecoder((1 << 64) - 1, int)
_DecodeSignedVarint = _SignedVarintDecoder(64, int)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int)
_DecodeSignedVarint32 = _SignedVarintDecoder(32, int)
def ReadTag(buffer, pos):
"""Read a tag from the memoryview, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
Args:
buffer: memoryview object of the encoded bytes
pos: int of the current position to start from
Returns:
Tuple[bytes, int] of the tag data and new position.
"""
start = pos
while buffer[pos] & 0x80:
pos += 1
pos += 1
tag_bytes = buffer[start:pos].tobytes()
return tag_bytes, pos
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default,
clear_if_default=False):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(new_value, pos) = decode_value(buffer, pos)
if pos > end:
raise _DecodeError('Truncated message.')
if clear_if_default and not new_value:
field_dict.pop(key, None)
else:
field_dict[key] = new_value
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
"""Decode serialized float to a float and new position.
Args:
buffer: memoryview of the serialized bytes
pos: int, position in the memory view to start at.
Returns:
Tuple[float, int] of the deserialized float value and new position
in the serialized data.
"""
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos].tobytes()
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'):
# If at least one significand bit is set...
if float_bytes[0:3] != b'\x00\x00\x80':
return (math.nan, new_pos)
# If sign bit is set...
if float_bytes[3:4] == b'\xFF':
return (-math.inf, new_pos)
return (math.inf, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
"""Decode serialized double to a double and new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
Returns:
Tuple[float, int] of the decoded double value and new position
in the serialized data.
"""
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos].tobytes()
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (math.nan, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
def EnumDecoder(field_number, is_repeated, is_packed, key, new_default,
clear_if_default=False):
"""Returns a decoder for enum field."""
enum_type = key.enum_type
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
"""Decode serialized packed enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
value_start_pos = pos
(element, pos) = _DecodeSignedVarint32(buffer, pos)
# pylint: disable=protected-access
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos].tobytes()))
if message._unknown_field_set is None:
message._unknown_field_set = containers.UnknownFieldSet()
message._unknown_field_set._add(
field_number, wire_format.WIRETYPE_VARINT, element)
# pylint: enable=protected-access
if pos > endpoint:
if element in enum_type.values_by_number:
del value[-1] # Discard corrupt value.
else:
del message._unknown_fields[-1]
# pylint: disable=protected-access
del message._unknown_field_set._values[-1]
# pylint: enable=protected-access
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
"""Decode serialized repeated enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = _DecodeSignedVarint32(buffer, pos)
# pylint: disable=protected-access
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append(
(tag_bytes, buffer[pos:new_pos].tobytes()))
if message._unknown_field_set is None:
message._unknown_field_set = containers.UnknownFieldSet()
message._unknown_field_set._add(
field_number, wire_format.WIRETYPE_VARINT, element)
# pylint: enable=protected-access
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
"""Decode serialized repeated enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value_start_pos = pos
(enum_value, pos) = _DecodeSignedVarint32(buffer, pos)
if pos > end:
raise _DecodeError('Truncated message.')
if clear_if_default and not enum_value:
field_dict.pop(key, None)
return pos
# pylint: disable=protected-access
if enum_value in enum_type.values_by_number:
field_dict[key] = enum_value
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos].tobytes()))
if message._unknown_field_set is None:
message._unknown_field_set = containers.UnknownFieldSet()
message._unknown_field_set._add(
field_number, wire_format.WIRETYPE_VARINT, enum_value)
# pylint: enable=protected-access
return pos
return DecodeField
# --------------------------------------------------------------------
Int32Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default,
clear_if_default=False):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
def _ConvertToUnicode(memview):
"""Convert byte to unicode."""
byte_str = memview.tobytes()
try:
value = str(byte_str, 'utf-8')
except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
return value
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(_ConvertToUnicode(buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
if clear_if_default and not size:
field_dict.pop(key, None)
else:
field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos])
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default,
clear_if_default=False):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos].tobytes())
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
if clear_if_default and not size:
field_dict.pop(key, None)
else:
field_dict[key] = buffer[pos:new_pos].tobytes()
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(descriptor):
"""Returns a decoder for a MessageSet item.
The parameter is the message Descriptor.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
"""Decode serialized message set to its value and new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = message.Extensions._FindExtensionByNumber(type_id)
# pylint: disable=protected-access
if extension is not None:
value = field_dict.get(extension)
if value is None:
message_type = extension.message_type
if not hasattr(message_type, '_concrete_class'):
# pylint: disable=protected-access
message._FACTORY.GetPrototype(message_type)
value = field_dict.setdefault(
extension, message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append(
(MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos].tobytes()))
if message._unknown_field_set is None:
message._unknown_field_set = containers.UnknownFieldSet()
message._unknown_field_set._add(
type_id,
wire_format.WIRETYPE_LENGTH_DELIMITED,
buffer[message_start:message_end].tobytes())
# pylint: enable=protected-access
return pos
return DecodeItem
# --------------------------------------------------------------------
def MapDecoder(field_descriptor, new_default, is_message_map):
"""Returns a decoder for a map field."""
key = field_descriptor
tag_bytes = encoder.TagBytes(field_descriptor.number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
local_DecodeVarint = _DecodeVarint
# Can't read _concrete_class yet; might not be initialized.
message_type = field_descriptor.message_type
def DecodeMap(buffer, pos, end, message, field_dict):
submsg = message_type._concrete_class()
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
submsg.Clear()
if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
if is_message_map:
value[submsg.key].CopyFrom(submsg.value)
else:
value[submsg.key] = submsg.value
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeMap
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
# Previously ord(buffer[pos]) raised IndexError when pos is out of range.
# With this code, ord(b'') raises TypeError. Both are handled in
# python_message.py to generate a 'Truncated message' error.
while ord(buffer[pos:pos+1].tobytes()) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _DecodeFixed64(buffer, pos):
"""Decode a fixed64."""
new_pos = pos + 8
return (struct.unpack('<Q', buffer[pos:new_pos])[0], new_pos)
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _DecodeUnknownFieldSet(buffer, pos, end_pos=None):
"""Decode UnknownFieldSet. Returns the UnknownFieldSet and new position."""
unknown_field_set = containers.UnknownFieldSet()
while end_pos is None or pos < end_pos:
(tag_bytes, pos) = ReadTag(buffer, pos)
(tag, _) = _DecodeVarint(tag_bytes, 0)
field_number, wire_type = wire_format.UnpackTag(tag)
if wire_type == wire_format.WIRETYPE_END_GROUP:
break
(data, pos) = _DecodeUnknownField(buffer, pos, wire_type)
# pylint: disable=protected-access
unknown_field_set._add(field_number, wire_type, data)
return (unknown_field_set, pos)
def _DecodeUnknownField(buffer, pos, wire_type):
"""Decode a unknown field. Returns the UnknownField and new position."""
if wire_type == wire_format.WIRETYPE_VARINT:
(data, pos) = _DecodeVarint(buffer, pos)
elif wire_type == wire_format.WIRETYPE_FIXED64:
(data, pos) = _DecodeFixed64(buffer, pos)
elif wire_type == wire_format.WIRETYPE_FIXED32:
(data, pos) = _DecodeFixed32(buffer, pos)
elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED:
(size, pos) = _DecodeVarint(buffer, pos)
data = buffer[pos:pos+size].tobytes()
pos += size
elif wire_type == wire_format.WIRETYPE_START_GROUP:
(data, pos) = _DecodeUnknownFieldSet(buffer, pos)
elif wire_type == wire_format.WIRETYPE_END_GROUP:
return (0, -1)
else:
raise _DecodeError('Wrong wire type in tag.')
return (data, pos)
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _DecodeFixed32(buffer, pos):
"""Decode a fixed32."""
new_pos = pos + 4
return (struct.unpack('<I', buffer[pos:new_pos])[0], new_pos)
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Snapshot Build Bisect Tool
This script bisects a snapshot archive using binary search. It starts at
a bad revision (it will try to guess HEAD) and asks for a last known-good
revision. It will then binary search across this revision range by downloading,
unzipping, and opening Chromium for you. After testing the specific revision,
it will ask you whether it is good or bad before continuing the search.
"""
# The base URL for stored build archives.
CHROMIUM_BASE_URL = ('http://commondatastorage.googleapis.com'
'/chromium-browser-snapshots')
WEBKIT_BASE_URL = ('http://commondatastorage.googleapis.com'
'/chromium-webkit-snapshots')
ASAN_BASE_URL = ('http://commondatastorage.googleapis.com'
'/chromium-browser-asan')
# The base URL for official builds.
OFFICIAL_BASE_URL = 'http://master.chrome.corp.google.com/official_builds'
# URL template for viewing changelogs between revisions.
CHANGELOG_URL = ('http://build.chromium.org'
'/f/chromium/perf/dashboard/ui/changelog.html'
'?url=/trunk/src&range=%d%%3A%d')
# URL template for viewing changelogs between official versions.
OFFICIAL_CHANGELOG_URL = ('http://omahaproxy.appspot.com/changelog'
'?old_version=%s&new_version=%s')
# DEPS file URL.
DEPS_FILE = 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d'
# Blink changelogs URL.
BLINK_CHANGELOG_URL = ('http://build.chromium.org'
'/f/chromium/perf/dashboard/ui/changelog_blink.html'
'?url=/trunk&range=%d%%3A%d')
DONE_MESSAGE_GOOD_MIN = ('You are probably looking for a change made after %s ('
'known good), but no later than %s (first known bad).')
DONE_MESSAGE_GOOD_MAX = ('You are probably looking for a change made after %s ('
'known bad), but no later than %s (first known good).')
CHROMIUM_GITHASH_TO_SVN_URL = (
'https://chromium.googlesource.com/chromium/src/+/%s?format=json')
BLINK_GITHASH_TO_SVN_URL = (
'https://chromium.googlesource.com/chromium/blink/+/%s?format=json')
GITHASH_TO_SVN_URL = {
'chromium': CHROMIUM_GITHASH_TO_SVN_URL,
'blink': BLINK_GITHASH_TO_SVN_URL,
}
# Search pattern to be matched in the JSON output from
# CHROMIUM_GITHASH_TO_SVN_URL to get the chromium revision (svn revision).
CHROMIUM_SEARCH_PATTERN = (
r'.*git-svn-id: svn://svn.chromium.org/chrome/trunk/src@(\d+) ')
# Search pattern to be matched in the json output from
# BLINK_GITHASH_TO_SVN_URL to get the blink revision (svn revision).
BLINK_SEARCH_PATTERN = (
r'.*git-svn-id: svn://svn.chromium.org/blink/trunk@(\d+) ')
SEARCH_PATTERN = {
'chromium': CHROMIUM_SEARCH_PATTERN,
'blink': BLINK_SEARCH_PATTERN,
}
###############################################################################
import json
import optparse
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import urllib
from distutils.version import LooseVersion
from xml.etree import ElementTree
import zipfile
class PathContext(object):
"""A PathContext is used to carry the information used to construct URLs and
paths when dealing with the storage server and archives."""
def __init__(self, base_url, platform, good_revision, bad_revision,
is_official, is_asan, use_local_repo, flash_path = None,
pdf_path = None):
super(PathContext, self).__init__()
# Store off the input parameters.
self.base_url = base_url
self.platform = platform # What's passed in to the '-a/--archive' option.
self.good_revision = good_revision
self.bad_revision = bad_revision
self.is_official = is_official
self.is_asan = is_asan
self.build_type = 'release'
self.flash_path = flash_path
# Dictionary which stores svn revision number as key and it's
# corresponding git hash as value. This data is populated in
# _FetchAndParse and used later in GetDownloadURL while downloading
# the build.
self.githash_svn_dict = {}
self.pdf_path = pdf_path
# The name of the ZIP file in a revision directory on the server.
self.archive_name = None
# If the script is run from a local Chromium checkout,
# "--use-local-repo" option can be used to make the script run faster.
# It uses "git svn find-rev <SHA1>" command to convert git hash to svn
# revision number.
self.use_local_repo = use_local_repo
# Set some internal members:
# _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
# _archive_extract_dir = Uncompressed directory in the archive_name file.
# _binary_name = The name of the executable to run.
if self.platform in ('linux', 'linux64', 'linux-arm'):
self._binary_name = 'chrome'
elif self.platform == 'mac':
self.archive_name = 'chrome-mac.zip'
self._archive_extract_dir = 'chrome-mac'
elif self.platform == 'win':
self.archive_name = 'chrome-win32.zip'
self._archive_extract_dir = 'chrome-win32'
self._binary_name = 'chrome.exe'
else:
raise Exception('Invalid platform: %s' % self.platform)
if is_official:
if self.platform == 'linux':
self._listing_platform_dir = 'precise32bit/'
self.archive_name = 'chrome-precise32bit.zip'
self._archive_extract_dir = 'chrome-precise32bit'
elif self.platform == 'linux64':
self._listing_platform_dir = 'precise64bit/'
self.archive_name = 'chrome-precise64bit.zip'
self._archive_extract_dir = 'chrome-precise64bit'
elif self.platform == 'mac':
self._listing_platform_dir = 'mac/'
self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome'
elif self.platform == 'win':
self._listing_platform_dir = 'win/'
else:
if self.platform in ('linux', 'linux64', 'linux-arm'):
self.archive_name = 'chrome-linux.zip'
self._archive_extract_dir = 'chrome-linux'
if self.platform == 'linux':
self._listing_platform_dir = 'Linux/'
elif self.platform == 'linux64':
self._listing_platform_dir = 'Linux_x64/'
elif self.platform == 'linux-arm':
self._listing_platform_dir = 'Linux_ARM_Cross-Compile/'
elif self.platform == 'mac':
self._listing_platform_dir = 'Mac/'
self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
elif self.platform == 'win':
self._listing_platform_dir = 'Win/'
def GetASANPlatformDir(self):
"""ASAN builds are in directories like "linux-release", or have filenames
like "asan-win32-release-277079.zip". This aligns to our platform names
except in the case of Windows where they use "win32" instead of "win"."""
if self.platform == 'win':
return 'win32'
else:
return self.platform
def GetListingURL(self, marker=None):
"""Returns the URL for a directory listing, with an optional marker."""
marker_param = ''
if marker:
marker_param = '&marker=' + str(marker)
if self.is_asan:
prefix = '%s-%s' % (self.GetASANPlatformDir(), self.build_type)
return self.base_url + '/?delimiter=&prefix=' + prefix + marker_param
else:
return (self.base_url + '/?delimiter=/&prefix=' +
self._listing_platform_dir + marker_param)
def GetDownloadURL(self, revision):
"""Gets the download URL for a build archive of a specific revision."""
if self.is_asan:
return '%s/%s-%s/%s-%d.zip' % (
ASAN_BASE_URL, self.GetASANPlatformDir(), self.build_type,
self.GetASANBaseName(), revision)
if self.is_official:
return '%s/%s/%s%s' % (
OFFICIAL_BASE_URL, revision, self._listing_platform_dir,
self.archive_name)
else:
if str(revision) in self.githash_svn_dict:
revision = self.githash_svn_dict[str(revision)]
return '%s/%s%s/%s' % (self.base_url, self._listing_platform_dir,
revision, self.archive_name)
def GetLastChangeURL(self):
"""Returns a URL to the LAST_CHANGE file."""
return self.base_url + '/' + self._listing_platform_dir + 'LAST_CHANGE'
def GetASANBaseName(self):
"""Returns the base name of the ASAN zip file."""
if 'linux' in self.platform:
return 'asan-symbolized-%s-%s' % (self.GetASANPlatformDir(),
self.build_type)
else:
return 'asan-%s-%s' % (self.GetASANPlatformDir(), self.build_type)
def GetLaunchPath(self, revision):
"""Returns a relative path (presumably from the archive extraction location)
that is used to run the executable."""
if self.is_asan:
extract_dir = '%s-%d' % (self.GetASANBaseName(), revision)
else:
extract_dir = self._archive_extract_dir
return os.path.join(extract_dir, self._binary_name)
def ParseDirectoryIndex(self):
"""Parses the Google Storage directory listing into a list of revision
numbers."""
def _FetchAndParse(url):
"""Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
next-marker is not None, then the listing is a partial listing and another
fetch should be performed with next-marker being the marker= GET
parameter."""
handle = urllib.urlopen(url)
document = ElementTree.parse(handle)
# All nodes in the tree are namespaced. Get the root's tag name to extract
# the namespace. Etree does namespaces as |{namespace}tag|.
root_tag = document.getroot().tag
end_ns_pos = root_tag.find('}')
if end_ns_pos == -1:
raise Exception('Could not locate end namespace for directory index')
namespace = root_tag[:end_ns_pos + 1]
# Find the prefix (_listing_platform_dir) and whether or not the list is
# truncated.
prefix_len = len(document.find(namespace + 'Prefix').text)
next_marker = None
is_truncated = document.find(namespace + 'IsTruncated')
if is_truncated is not None and is_truncated.text.lower() == 'true':
next_marker = document.find(namespace + 'NextMarker').text
# Get a list of all the revisions.
revisions = []
githash_svn_dict = {}
if self.is_asan:
asan_regex = re.compile(r'.*%s-(\d+)\.zip$' % (self.GetASANBaseName()))
# Non ASAN builds are in a <revision> directory. The ASAN builds are
# flat
all_prefixes = document.findall(namespace + 'Contents/' +
namespace + 'Key')
for prefix in all_prefixes:
m = asan_regex.match(prefix.text)
if m:
try:
revisions.append(int(m.group(1)))
except ValueError:
pass
else:
all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
namespace + 'Prefix')
# The <Prefix> nodes have content of the form of
# |_listing_platform_dir/revision/|. Strip off the platform dir and the
# trailing slash to just have a number.
for prefix in all_prefixes:
revnum = prefix.text[prefix_len:-1]
try:
if not revnum.isdigit():
git_hash = revnum
revnum = self.GetSVNRevisionFromGitHash(git_hash)
githash_svn_dict[revnum] = git_hash
if revnum is not None:
revnum = int(revnum)
revisions.append(revnum)
except ValueError:
pass
return (revisions, next_marker, githash_svn_dict)
# Fetch the first list of revisions.
(revisions, next_marker, self.githash_svn_dict) = _FetchAndParse(
self.GetListingURL())
# If the result list was truncated, refetch with the next marker. Do this
# until an entire directory listing is done.
while next_marker:
next_url = self.GetListingURL(next_marker)
(new_revisions, next_marker, new_dict) = _FetchAndParse(next_url)
revisions.extend(new_revisions)
self.githash_svn_dict.update(new_dict)
return revisions
def _GetSVNRevisionFromGitHashWithoutGitCheckout(self, git_sha1, depot):
json_url = GITHASH_TO_SVN_URL[depot] % git_sha1
response = urllib.urlopen(json_url)
if response.getcode() == 200:
try:
data = json.loads(response.read()[4:])
except ValueError:
print 'ValueError for JSON URL: %s' % json_url
raise ValueError
else:
raise ValueError
if 'message' in data:
message = data['message'].split('\n')
message = [line for line in message if line.strip()]
search_pattern = re.compile(SEARCH_PATTERN[depot])
result = search_pattern.search(message[len(message)-1])
if result:
return result.group(1)
print 'Failed to get svn revision number for %s' % git_sha1
raise ValueError
def _GetSVNRevisionFromGitHashFromGitCheckout(self, git_sha1, depot):
def _RunGit(command, path):
command = ['git'] + command
if path:
original_path = os.getcwd()
os.chdir(path)
shell = sys.platform.startswith('win')
proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, _) = proc.communicate()
if path:
os.chdir(original_path)
return (output, proc.returncode)
path = None
if depot == 'blink':
path = os.path.join(os.getcwd(), 'third_party', 'WebKit')
if os.path.basename(os.getcwd()) == 'src':
command = ['svn', 'find-rev', git_sha1]
(git_output, return_code) = _RunGit(command, path)
if not return_code:
return git_output.strip('\n')
raise ValueError
else:
print ('Script should be run from src folder. ' +
'Eg: python tools/bisect-builds.py -g 280588 -b 280590' +
'--archive linux64 --use-local-repo')
sys.exit(1)
def GetSVNRevisionFromGitHash(self, git_sha1, depot='chromium'):
if not self.use_local_repo:
return self._GetSVNRevisionFromGitHashWithoutGitCheckout(git_sha1, depot)
else:
return self._GetSVNRevisionFromGitHashFromGitCheckout(git_sha1, depot)
def GetRevList(self):
"""Gets the list of revision numbers between self.good_revision and
self.bad_revision."""
# Download the revlist and filter for just the range between good and bad.
minrev = min(self.good_revision, self.bad_revision)
maxrev = max(self.good_revision, self.bad_revision)
revlist_all = map(int, self.ParseDirectoryIndex())
revlist = [x for x in revlist_all if x >= int(minrev) and x <= int(maxrev)]
revlist.sort()
# Set good and bad revisions to be legit revisions.
if revlist:
if self.good_revision < self.bad_revision:
self.good_revision = revlist[0]
self.bad_revision = revlist[-1]
else:
self.bad_revision = revlist[0]
self.good_revision = revlist[-1]
# Fix chromium rev so that the deps blink revision matches REVISIONS file.
if self.base_url == WEBKIT_BASE_URL:
revlist_all.sort()
self.good_revision = FixChromiumRevForBlink(revlist,
revlist_all,
self,
self.good_revision)
self.bad_revision = FixChromiumRevForBlink(revlist,
revlist_all,
self,
self.bad_revision)
return revlist
def GetOfficialBuildsList(self):
"""Gets the list of official build numbers between self.good_revision and
self.bad_revision."""
# Download the revlist and filter for just the range between good and bad.
minrev = min(self.good_revision, self.bad_revision)
maxrev = max(self.good_revision, self.bad_revision)
handle = urllib.urlopen(OFFICIAL_BASE_URL)
dirindex = handle.read()
handle.close()
build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex)
final_list = []
i = 0
parsed_build_numbers = [LooseVersion(x) for x in build_numbers]
for build_number in sorted(parsed_build_numbers):
path = (OFFICIAL_BASE_URL + '/' + str(build_number) + '/' +
self._listing_platform_dir + self.archive_name)
i = i + 1
try:
connection = urllib.urlopen(path)
connection.close()
if build_number > maxrev:
break
if build_number >= minrev:
final_list.append(str(build_number))
except urllib.HTTPError:
pass
return final_list
def UnzipFilenameToDir(filename, directory):
"""Unzip |filename| to |directory|."""
cwd = os.getcwd()
if not os.path.isabs(filename):
filename = os.path.join(cwd, filename)
zf = zipfile.ZipFile(filename)
# Make base.
if not os.path.isdir(directory):
os.mkdir(directory)
os.chdir(directory)
# Extract files.
for info in zf.infolist():
name = info.filename
if name.endswith('/'): # dir
if not os.path.isdir(name):
os.makedirs(name)
else: # file
directory = os.path.dirname(name)
if not os.path.isdir(directory):
os.makedirs(directory)
out = open(name, 'wb')
out.write(zf.read(name))
out.close()
# Set permissions. Permission info in external_attr is shifted 16 bits.
os.chmod(name, info.external_attr >> 16L)
os.chdir(cwd)
def FetchRevision(context, rev, filename, quit_event=None, progress_event=None):
"""Downloads and unzips revision |rev|.
@param context A PathContext instance.
@param rev The Chromium revision number/tag to download.
@param filename The destination for the downloaded file.
@param quit_event A threading.Event which will be set by the master thread to
indicate that the download should be aborted.
@param progress_event A threading.Event which will be set by the master thread
to indicate that the progress of the download should be
displayed.
"""
def ReportHook(blocknum, blocksize, totalsize):
if quit_event and quit_event.isSet():
raise RuntimeError('Aborting download of revision %s' % str(rev))
if progress_event and progress_event.isSet():
size = blocknum * blocksize
if totalsize == -1: # Total size not known.
progress = 'Received %d bytes' % size
else:
size = min(totalsize, size)
progress = 'Received %d of %d bytes, %.2f%%' % (
size, totalsize, 100.0 * size / totalsize)
# Send a \r to let all progress messages use just one line of output.
sys.stdout.write('\r' + progress)
sys.stdout.flush()
download_url = context.GetDownloadURL(rev)
try:
urllib.urlretrieve(download_url, filename, ReportHook)
if progress_event and progress_event.isSet():
print
except RuntimeError:
pass
def RunRevision(context, revision, zip_file, profile, num_runs, command, args):
"""Given a zipped revision, unzip it and run the test."""
print 'Trying revision %s...' % str(revision)
# Create a temp directory and unzip the revision into it.
cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
UnzipFilenameToDir(zip_file, tempdir)
os.chdir(tempdir)
# Run the build as many times as specified.
testargs = ['--user-data-dir=%s' % profile] + args
# The sandbox must be run as root on Official Chrome, so bypass it.
if ((context.is_official or context.flash_path or context.pdf_path) and
context.platform.startswith('linux')):
testargs.append('--no-sandbox')
if context.flash_path:
testargs.append('--ppapi-flash-path=%s' % context.flash_path)
# We have to pass a large enough Flash version, which currently needs not
# be correct. Instead of requiring the user of the script to figure out and
# pass the correct version we just spoof it.
testargs.append('--ppapi-flash-version=99.9.999.999')
# TODO(vitalybuka): Remove in the future. See crbug.com/395687.
if context.pdf_path:
shutil.copy(context.pdf_path,
os.path.dirname(context.GetLaunchPath(revision)))
testargs.append('--enable-print-preview')
runcommand = []
for token in shlex.split(command):
if token == '%a':
runcommand.extend(testargs)
else:
runcommand.append(
token.replace('%p', os.path.abspath(context.GetLaunchPath(revision))).
replace('%s', ' '.join(testargs)))
results = []
for _ in range(num_runs):
subproc = subprocess.Popen(runcommand,
bufsize=-1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = subproc.communicate()
results.append((subproc.returncode, stdout, stderr))
os.chdir(cwd)
try:
shutil.rmtree(tempdir, True)
except Exception:
pass
for (returncode, stdout, stderr) in results:
if returncode:
return (returncode, stdout, stderr)
return results[0]
# The arguments official_builds, status, stdout and stderr are unused.
# They are present here because this function is passed to Bisect which then
# calls it with 5 arguments.
# pylint: disable=W0613
def AskIsGoodBuild(rev, official_builds, status, stdout, stderr):
"""Asks the user whether build |rev| is good or bad."""
# Loop until we get a response that we can parse.
while True:
response = raw_input('Revision %s is '
'[(g)ood/(b)ad/(r)etry/(u)nknown/(q)uit]: ' %
str(rev))
if response and response in ('g', 'b', 'r', 'u'):
return response
if response and response == 'q':
raise SystemExit()
def IsGoodASANBuild(rev, official_builds, status, stdout, stderr):
"""Determine if an ASAN build |rev| is good or bad
Will examine stderr looking for the error message emitted by ASAN. If not
found then will fallback to asking the user."""
if stderr:
bad_count = 0
for line in stderr.splitlines():
print line
if line.find('ERROR: AddressSanitizer:') != -1:
bad_count += 1
if bad_count > 0:
print 'Revision %d determined to be bad.' % rev
return 'b'
return AskIsGoodBuild(rev, official_builds, status, stdout, stderr)
class DownloadJob(object):
"""DownloadJob represents a task to download a given Chromium revision."""
def __init__(self, context, name, rev, zip_file):
super(DownloadJob, self).__init__()
# Store off the input parameters.
self.context = context
self.name = name
self.rev = rev
self.zip_file = zip_file
self.quit_event = threading.Event()
self.progress_event = threading.Event()
self.thread = None
def Start(self):
"""Starts the download."""
fetchargs = (self.context,
self.rev,
self.zip_file,
self.quit_event,
self.progress_event)
self.thread = threading.Thread(target=FetchRevision,
name=self.name,
args=fetchargs)
self.thread.start()
def Stop(self):
"""Stops the download which must have been started previously."""
assert self.thread, 'DownloadJob must be started before Stop is called.'
self.quit_event.set()
self.thread.join()
os.unlink(self.zip_file)
def WaitFor(self):
"""Prints a message and waits for the download to complete. The download
must have been started previously."""
assert self.thread, 'DownloadJob must be started before WaitFor is called.'
print 'Downloading revision %s...' % str(self.rev)
self.progress_event.set() # Display progress of download.
self.thread.join()
def Bisect(context,
num_runs=1,
command='%p %a',
try_args=(),
profile=None,
interactive=True,
evaluate=AskIsGoodBuild):
"""Given known good and known bad revisions, run a binary search on all
archived revisions to determine the last known good revision.
@param context PathContext object initialized with user provided parameters.
@param num_runs Number of times to run each build for asking good/bad.
@param try_args A tuple of arguments to pass to the test application.
@param profile The name of the user profile to run with.
@param interactive If it is false, use command exit code for good or bad
judgment of the argument build.
@param evaluate A function which returns 'g' if the argument build is good,
'b' if it's bad or 'u' if unknown.
Threading is used to fetch Chromium revisions in the background, speeding up
the user's experience. For example, suppose the bounds of the search are
good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
whether revision 50 is good or bad, the next revision to check will be either
25 or 75. So, while revision 50 is being checked, the script will download
revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is
known:
- If rev 50 is good, the download of rev 25 is cancelled, and the next test
is run on rev 75.
- If rev 50 is bad, the download of rev 75 is cancelled, and the next test
is run on rev 25.
"""
if not profile:
profile = 'profile'
good_rev = context.good_revision
bad_rev = context.bad_revision
cwd = os.getcwd()
print 'Downloading list of known revisions...',
if not context.use_local_repo:
print '(use --use-local-repo for speed if you have a local checkout)'
else:
print
_GetDownloadPath = lambda rev: os.path.join(cwd,
'%s-%s' % (str(rev), context.archive_name))
if context.is_official:
revlist = context.GetOfficialBuildsList()
else:
revlist = context.GetRevList()
# Get a list of revisions to bisect across.
if len(revlist) < 2: # Don't have enough builds to bisect.
msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
raise RuntimeError(msg)
# Figure out our bookends and first pivot point; fetch the pivot revision.
minrev = 0
maxrev = len(revlist) - 1
pivot = maxrev / 2
rev = revlist[pivot]
zip_file = _GetDownloadPath(rev)
fetch = DownloadJob(context, 'initial_fetch', rev, zip_file)
fetch.Start()
fetch.WaitFor()
# Binary search time!
while fetch and fetch.zip_file and maxrev - minrev > 1:
if bad_rev < good_rev:
min_str, max_str = 'bad', 'good'
else:
min_str, max_str = 'good', 'bad'
print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str,
revlist[maxrev], max_str)
# Pre-fetch next two possible pivots
# - down_pivot is the next revision to check if the current revision turns
# out to be bad.
# - up_pivot is the next revision to check if the current revision turns
# out to be good.
down_pivot = int((pivot - minrev) / 2) + minrev
down_fetch = None
if down_pivot != pivot and down_pivot != minrev:
down_rev = revlist[down_pivot]
down_fetch = DownloadJob(context, 'down_fetch', down_rev,
_GetDownloadPath(down_rev))
down_fetch.Start()
up_pivot = int((maxrev - pivot) / 2) + pivot
up_fetch = None
if up_pivot != pivot and up_pivot != maxrev:
up_rev = revlist[up_pivot]
up_fetch = DownloadJob(context, 'up_fetch', up_rev,
_GetDownloadPath(up_rev))
up_fetch.Start()
# Run test on the pivot revision.
status = None
stdout = None
stderr = None
try:
(status, stdout, stderr) = RunRevision(context,
rev,
fetch.zip_file,
profile,
num_runs,
command,
try_args)
except Exception, e:
print >> sys.stderr, e
# Call the evaluate function to see if the current revision is good or bad.
# On that basis, kill one of the background downloads and complete the
# other, as described in the comments above.
try:
if not interactive:
if status:
answer = 'b'
print 'Bad revision: %s' % rev
else:
answer = 'g'
print 'Good revision: %s' % rev
else:
answer = evaluate(rev, context.is_official, status, stdout, stderr)
if ((answer == 'g' and good_rev < bad_rev)
or (answer == 'b' and bad_rev < good_rev)):
fetch.Stop()
minrev = pivot
if down_fetch:
down_fetch.Stop() # Kill the download of the older revision.
fetch = None
if up_fetch:
up_fetch.WaitFor()
pivot = up_pivot
fetch = up_fetch
elif ((answer == 'b' and good_rev < bad_rev)
or (answer == 'g' and bad_rev < good_rev)):
fetch.Stop()
maxrev = pivot
if up_fetch:
up_fetch.Stop() # Kill the download of the newer revision.
fetch = None
if down_fetch:
down_fetch.WaitFor()
pivot = down_pivot
fetch = down_fetch
elif answer == 'r':
pass # Retry requires no changes.
elif answer == 'u':
# Nuke the revision from the revlist and choose a new pivot.
fetch.Stop()
revlist.pop(pivot)
maxrev -= 1 # Assumes maxrev >= pivot.
if maxrev - minrev > 1:
# Alternate between using down_pivot or up_pivot for the new pivot
# point, without affecting the range. Do this instead of setting the
# pivot to the midpoint of the new range because adjacent revisions
# are likely affected by the same issue that caused the (u)nknown
# response.
if up_fetch and down_fetch:
fetch = [up_fetch, down_fetch][len(revlist) % 2]
elif up_fetch:
fetch = up_fetch
else:
fetch = down_fetch
fetch.WaitFor()
if fetch == up_fetch:
pivot = up_pivot - 1 # Subtracts 1 because revlist was resized.
else:
pivot = down_pivot
zip_file = fetch.zip_file
if down_fetch and fetch != down_fetch:
down_fetch.Stop()
if up_fetch and fetch != up_fetch:
up_fetch.Stop()
else:
assert False, 'Unexpected return value from evaluate(): ' + answer
except SystemExit:
print 'Cleaning up...'
for f in [_GetDownloadPath(revlist[down_pivot]),
_GetDownloadPath(revlist[up_pivot])]:
try:
os.unlink(f)
except OSError:
pass
sys.exit(0)
rev = revlist[pivot]
return (revlist[minrev], revlist[maxrev], context)
def GetBlinkDEPSRevisionForChromiumRevision(rev):
"""Returns the blink revision that was in REVISIONS file at
chromium revision |rev|."""
# . doesn't match newlines without re.DOTALL, so this is safe.
blink_re = re.compile(r'webkit_revision\D*(\d+)')
url = urllib.urlopen(DEPS_FILE % rev)
m = blink_re.search(url.read())
url.close()
if m:
return int(m.group(1))
else:
raise Exception('Could not get Blink revision for Chromium rev %d' % rev)
def GetBlinkRevisionForChromiumRevision(context, rev):
"""Returns the blink revision that was in REVISIONS file at
chromium revision |rev|."""
def _IsRevisionNumber(revision):
if isinstance(revision, int):
return True
else:
return revision.isdigit()
if str(rev) in context.githash_svn_dict:
rev = context.githash_svn_dict[str(rev)]
file_url = '%s/%s%s/REVISIONS' % (context.base_url,
context._listing_platform_dir, rev)
url = urllib.urlopen(file_url)
if url.getcode() == 200:
try:
data = json.loads(url.read())
except ValueError:
print 'ValueError for JSON URL: %s' % file_url
raise ValueError
else:
raise ValueError
url.close()
if 'webkit_revision' in data:
blink_rev = data['webkit_revision']
if not _IsRevisionNumber(blink_rev):
blink_rev = int(context.GetSVNRevisionFromGitHash(blink_rev, 'blink'))
return blink_rev
else:
raise Exception('Could not get blink revision for cr rev %d' % rev)
def FixChromiumRevForBlink(revisions_final, revisions, self, rev):
"""Returns the chromium revision that has the correct blink revision
for blink bisect, DEPS and REVISIONS file might not match since
blink snapshots point to tip of tree blink.
Note: The revisions_final variable might get modified to include
additional revisions."""
blink_deps_rev = GetBlinkDEPSRevisionForChromiumRevision(rev)
while (GetBlinkRevisionForChromiumRevision(self, rev) > blink_deps_rev):
idx = revisions.index(rev)
if idx > 0:
rev = revisions[idx-1]
if rev not in revisions_final:
revisions_final.insert(0, rev)
revisions_final.sort()
return rev
def GetChromiumRevision(context, url):
"""Returns the chromium revision read from given URL."""
try:
# Location of the latest build revision number
latest_revision = urllib.urlopen(url).read()
if latest_revision.isdigit():
return int(latest_revision)
return context.GetSVNRevisionFromGitHash(latest_revision)
except Exception:
print 'Could not determine latest revision. This could be bad...'
return 999999999
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Perform binary search on the snapshot builds to find a minimal\n'
'range of revisions where a behavior change happened. The\n'
'behaviors are described as "good" and "bad".\n'
'It is NOT assumed that the behavior of the later revision is\n'
'the bad one.\n'
'\n'
'Revision numbers should use\n'
' Official versions (e.g. 1.0.1000.0) for official builds. (-o)\n'
' SVN revisions (e.g. 123456) for chromium builds, from trunk.\n'
' Use base_trunk_revision from http://omahaproxy.appspot.com/\n'
' for earlier revs.\n'
' Chrome\'s about: build number and omahaproxy branch_revision\n'
' are incorrect, they are from branches.\n'
'\n'
'Tip: add "-- --no-first-run" to bypass the first run prompts.')
parser = optparse.OptionParser(usage=usage)
# Strangely, the default help output doesn't include the choice list.
choices = ['mac', 'win', 'linux', 'linux64', 'linux-arm']
# linux-chromiumos lacks a continuous archive http://crbug.com/78158
parser.add_option('-a', '--archive',
choices=choices,
help='The buildbot archive to bisect [%s].' %
'|'.join(choices))
parser.add_option('-o',
action='store_true',
dest='official_builds',
help='Bisect across official Chrome builds (internal '
'only) instead of Chromium archives.')
parser.add_option('-b', '--bad',
type='str',
help='A bad revision to start bisection. '
'May be earlier or later than the good revision. '
'Default is HEAD.')
parser.add_option('-f', '--flash_path',
type='str',
help='Absolute path to a recent Adobe Pepper Flash '
'binary to be used in this bisection (e.g. '
'on Windows C:\...\pepflashplayer.dll and on Linux '
'/opt/google/chrome/PepperFlash/'
'libpepflashplayer.so).')
parser.add_option('-d', '--pdf_path',
type='str',
help='Absolute path to a recent PDF plugin '
'binary to be used in this bisection (e.g. '
'on Windows C:\...\pdf.dll and on Linux '
'/opt/google/chrome/libpdf.so). Option also enables '
'print preview.')
parser.add_option('-g', '--good',
type='str',
help='A good revision to start bisection. ' +
'May be earlier or later than the bad revision. ' +
'Default is 0.')
parser.add_option('-p', '--profile', '--user-data-dir',
type='str',
default='profile',
help='Profile to use; this will not reset every run. '
'Defaults to a clean profile.')
parser.add_option('-t', '--times',
type='int',
default=1,
help='Number of times to run each build before asking '
'if it\'s good or bad. Temporary profiles are reused.')
parser.add_option('-c', '--command',
type='str',
default='%p %a',
help='Command to execute. %p and %a refer to Chrome '
'executable and specified extra arguments '
'respectively. Use %s to specify all extra arguments '
'as one string. Defaults to "%p %a". Note that any '
'extra paths specified should be absolute.')
parser.add_option('-l', '--blink',
action='store_true',
help='Use Blink bisect instead of Chromium. ')
parser.add_option('', '--not-interactive',
action='store_true',
default=False,
help='Use command exit code to tell good/bad revision.')
parser.add_option('--asan',
dest='asan',
action='store_true',
default=False,
help='Allow the script to bisect ASAN builds')
parser.add_option('--use-local-repo',
dest='use_local_repo',
action='store_true',
default=False,
help='Allow the script to convert git SHA1 to SVN '
'revision using "git svn find-rev <SHA1>" '
'command from a Chromium checkout.')
(opts, args) = parser.parse_args()
if opts.archive is None:
print 'Error: missing required parameter: --archive'
print
parser.print_help()
return 1
if opts.asan:
supported_platforms = ['linux', 'mac', 'win']
if opts.archive not in supported_platforms:
print 'Error: ASAN bisecting only supported on these platforms: [%s].' % (
'|'.join(supported_platforms))
return 1
if opts.official_builds:
print 'Error: Do not yet support bisecting official ASAN builds.'
return 1
if opts.asan:
base_url = ASAN_BASE_URL
elif opts.blink:
base_url = WEBKIT_BASE_URL
else:
base_url = CHROMIUM_BASE_URL
# Create the context. Initialize 0 for the revisions as they are set below.
context = PathContext(base_url, opts.archive, opts.good, opts.bad,
opts.official_builds, opts.asan, opts.use_local_repo,
opts.flash_path, opts.pdf_path)
# Pick a starting point, try to get HEAD for this.
if not opts.bad:
context.bad_revision = '999.0.0.0'
context.bad_revision = GetChromiumRevision(
context, context.GetLastChangeURL())
# Find out when we were good.
if not opts.good:
context.good_revision = '0.0.0.0' if opts.official_builds else 0
if opts.flash_path:
msg = 'Could not find Flash binary at %s' % opts.flash_path
assert os.path.exists(opts.flash_path), msg
if opts.pdf_path:
msg = 'Could not find PDF binary at %s' % opts.pdf_path
assert os.path.exists(opts.pdf_path), msg
if opts.official_builds:
context.good_revision = LooseVersion(context.good_revision)
context.bad_revision = LooseVersion(context.bad_revision)
else:
context.good_revision = int(context.good_revision)
context.bad_revision = int(context.bad_revision)
if opts.times < 1:
print('Number of times to run (%d) must be greater than or equal to 1.' %
opts.times)
parser.print_help()
return 1
if opts.asan:
evaluator = IsGoodASANBuild
else:
evaluator = AskIsGoodBuild
# Save these revision numbers to compare when showing the changelog URL
# after the bisect.
good_rev = context.good_revision
bad_rev = context.bad_revision
(min_chromium_rev, max_chromium_rev, context) = Bisect(
context, opts.times, opts.command, args, opts.profile,
not opts.not_interactive, evaluator)
# Get corresponding blink revisions.
try:
min_blink_rev = GetBlinkRevisionForChromiumRevision(context,
min_chromium_rev)
max_blink_rev = GetBlinkRevisionForChromiumRevision(context,
max_chromium_rev)
except Exception:
# Silently ignore the failure.
min_blink_rev, max_blink_rev = 0, 0
if opts.blink:
# We're done. Let the user know the results in an official manner.
if good_rev > bad_rev:
print DONE_MESSAGE_GOOD_MAX % (str(min_blink_rev), str(max_blink_rev))
else:
print DONE_MESSAGE_GOOD_MIN % (str(min_blink_rev), str(max_blink_rev))
print 'BLINK CHANGELOG URL:'
print ' ' + BLINK_CHANGELOG_URL % (max_blink_rev, min_blink_rev)
else:
# We're done. Let the user know the results in an official manner.
if good_rev > bad_rev:
print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev),
str(max_chromium_rev))
else:
print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev),
str(max_chromium_rev))
if min_blink_rev != max_blink_rev:
print ('NOTE: There is a Blink roll in the range, '
'you might also want to do a Blink bisect.')
print 'CHANGELOG URL:'
if opts.official_builds:
print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
else:
print ' ' + CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.target_assigner."""
import numpy as np
import tensorflow as tf
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_list
from object_detection.core import region_similarity_calculator
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as targetassigner
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.utils import test_case
class TargetAssignerTest(test_case.TestCase):
def test_assign_agnostic(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder, unmatched_cls_target=None)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0]], dtype=np.float32)
anchor_stddevs = np.array(3 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.3)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder, unmatched_cls_target=None)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
anchor_stddevs = np.array(3 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]], dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [1, 1, 0]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_agnostic_with_keypoints(self):
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder, unmatched_cls_target=None)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder, unmatched_cls_target=None)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_multiclass(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist,
groundtruth_labels)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
anchor_stddevs = np.array(4 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]]
exp_cls_weights = [1, 1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners,
groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_multiclass_with_groundtruth_weights(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
groundtruth_labels, groundtruth_weights):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist,
groundtruth_labels,
groundtruth_weights)
(_, cls_weights, _, reg_weights, _) = result
return (cls_weights, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
anchor_stddevs = np.array(4 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32)
exp_cls_weights = [0.3, 0., 1, 0.5] # background class gets weight of 1.
exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0.
(cls_weights_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners,
groundtruth_labels,
groundtruth_weights])
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_assign_multidimensional_class_targets(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([[0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist,
groundtruth_labels)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
anchor_stddevs = np.array(4 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 1], [1, .5]]], np.float32)
exp_cls_targets = [[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 0], [0, 0]],
[[0, 1], [1, .5]]]
exp_cls_weights = [1, 1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners,
groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_empty_groundtruth(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([0, 0, 0], tf.float32)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist,
groundtruth_labels)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
groundtruth_labels = np.zeros((0, 3), dtype=np.float32)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]],
dtype=np.float32)
anchor_stddevs = np.array(4 * [4 * [.1]], dtype=np.float32)
exp_cls_targets = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
exp_cls_weights = [1, 1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
exp_reg_weights = [0, 0, 0, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners,
groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]])
prior_stddevs = tf.constant(4 * [4 * [.1]])
priors = box_list.BoxList(prior_means)
priors.add_field('stddev', prior_stddevs)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.5, 0.8],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], tf.float32)
with self.assertRaisesRegexp(ValueError, 'Unequal shapes'):
target_assigner.assign(priors, boxes, groundtruth_labels,
num_valid_rows=3)
def test_raises_error_on_invalid_groundtruth_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]])
prior_stddevs = tf.constant([[1.0, 1.0, 1.0, 1.0]])
priors = box_list.BoxList(prior_means)
priors.add_field('stddev', prior_stddevs)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32)
with self.assertRaises(ValueError):
target_assigner.assign(priors, boxes, groundtruth_labels,
num_valid_rows=3)
class BatchTargetAssignerTest(test_case.TestCase):
def _get_agnostic_target_assigner(self):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
return targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=None)
def _get_multi_class_target_assigner(self, num_classes):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([1] + num_classes * [0], tf.float32)
return targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
def _get_multi_dimensional_target_assigner(self, target_dimensions):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant(np.zeros(target_dimensions),
tf.float32)
return targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
def test_batch_assign_targets(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_boxlist1,
groundtruth_boxlist2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [None, None]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
agnostic_target_assigner = self._get_agnostic_target_assigner()
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
agnostic_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1, 1, 1],
[1, 1, 1, 1]]
exp_cls_targets = [[[1], [0], [0], [0]],
[[0], [1], [1], [0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_boxlist1,
groundtruth_boxlist2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_boxlist1,
groundtruth_boxlist2, class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
multiclass_target_assigner = self._get_multi_class_target_assigner(
num_classes=3)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1, 1, 1],
[1, 1, 1, 1]]
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_boxlist1,
groundtruth_boxlist2,
class_targets1,
class_targets2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets_with_padded_groundtruth(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_boxlist1,
groundtruth_boxlist2, class_targets1, class_targets2,
groundtruth_weights1, groundtruth_weights2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
gt_weights = [groundtruth_weights1, groundtruth_weights2]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
multiclass_target_assigner = self._get_multi_class_target_assigner(
num_classes=3)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, gt_weights)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2],
[0., 0., 0., 0.]], dtype=np.float32)
groundtruth_weights1 = np.array([1, 0], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842],
[0, 0, 0, 0]],
dtype=np.float32)
groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1, 1, 1],
[1, 1, 1, 1]]
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_boxlist1,
groundtruth_boxlist2,
class_targets1,
class_targets2,
groundtruth_weights1,
groundtruth_weights2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multidimensional_targets(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_boxlist1,
groundtruth_boxlist2, class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
multiclass_target_assigner = self._get_multi_dimensional_target_assigner(
target_dimensions=(2, 3))
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1, 1, 1],
[1, 1, 1, 1]]
exp_cls_targets = [[[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]]],
[[[0., 0., 0.],
[0., 0., 0.]],
[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 1.],
[0., 0., 1.]],
[[0., 0., 0.],
[0., 0., 0.]]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_boxlist1,
groundtruth_boxlist2,
class_targets1,
class_targets2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_empty_groundtruth(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
gt_class_targets):
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
gt_box_batch = [groundtruth_boxlist]
gt_class_targets_batch = [gt_class_targets]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
multiclass_target_assigner = self._get_multi_class_target_assigner(
num_classes=3)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist,
gt_box_batch, gt_class_targets_batch)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1]]
exp_cls_targets = [[[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_reg_weights = [[0, 0]]
num_classes = 3
pad = 1
gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32)
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(
graph_fn, [anchor_means, anchor_stddevs, groundtruth_box_corners,
gt_class_targets])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
class CreateTargetAssignerTest(tf.test.TestCase):
def test_create_target_assigner(self):
"""Tests that named constructor gives working target assigners.
TODO(rathodv): Make this test more general.
"""
corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
prior_stddevs = tf.constant([[1.0, 1.0, 1.0, 1.0]])
priors.add_field('stddev', prior_stddevs)
multibox_ta = (targetassigner
.create_target_assigner('Multibox', stage='proposal'))
multibox_ta.assign(priors, groundtruth)
# No tests on output, as that may vary arbitrarily as new target assigners
# are added. As long as it is constructed correctly and runs without errors,
# tests on the individual assigners cover correctness of the assignments.
anchors = box_list.BoxList(tf.constant(corners))
faster_rcnn_proposals_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='proposal'))
faster_rcnn_proposals_ta.assign(anchors, groundtruth)
fast_rcnn_ta = (targetassigner
.create_target_assigner('FastRCNN'))
fast_rcnn_ta.assign(anchors, groundtruth)
faster_rcnn_detection_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='detection'))
faster_rcnn_detection_ta.assign(anchors, groundtruth)
with self.assertRaises(ValueError):
targetassigner.create_target_assigner('InvalidDetector',
stage='invalid_stage')
if __name__ == '__main__':
tf.test.main()
|
|
#!/usr/bin/env vpython
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
import sys
import unittest
from test_support import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components import utils
from components.auth import ipaddr
from components.auth import model
from components.auth.proto import realms_pb2
from test_support import test_case
class IdentityTest(test_case.TestCase):
"""Tests for Identity class."""
def test_immutable(self):
# Note that it's still possible to add new attributes to |ident|. To fix
# this we'd have to add __slots__ = () to Identity and to BytesSerializable
# (it inherits from). Since adding extra attributes to an instance doesn't
# harm any expected behavior of Identity (like equality operator or
# serialization) we ignore this hole in immutability.
ident = model.Identity(model.IDENTITY_USER, '[email protected]')
self.assertTrue(isinstance(ident, tuple))
with self.assertRaises(AttributeError):
ident.kind = model.IDENTITY_USER
with self.assertRaises(AttributeError):
ident.name = '[email protected]'
def test_equality(self):
# Identities are compared by values, not by reference.
ident1 = model.Identity(model.IDENTITY_USER, '[email protected]')
ident2 = model.Identity(model.IDENTITY_USER, '[email protected]')
ident3 = model.Identity(model.IDENTITY_USER, '[email protected]')
self.assertEqual(ident1, ident2)
self.assertNotEqual(ident1, ident3)
# Verify that adding extra attribute doesn't change equality relation.
ident1.extra = 1
ident2.extra = 2
self.assertEqual(ident1, ident2)
def test_validation(self):
# Unicode with ASCII data is ok.
ok_identities = (
(unicode(model.IDENTITY_USER), '[email protected]'),
(model.IDENTITY_USER, u'[email protected]'),
(model.IDENTITY_USER, r'abc%[email protected]'),
(model.IDENTITY_USER, r'ABC_DEF@ABC_DEF.com'),
(model.IDENTITY_SERVICE, 'domain.com:app-id'),
(model.IDENTITY_PROJECT, 'project-123_name'),
)
for kind, name in ok_identities:
ident = model.Identity(kind, name)
# Should be 'str', not 'unicode'
self.assertEqual(type(ident.kind), str)
self.assertEqual(type(ident.name), str)
# And data should match.
self.assertEqual(kind, ident.kind)
self.assertEqual(name, ident.name)
# Nasty stuff.
bad_identities = (
('unknown-kind', '[email protected]'),
(model.IDENTITY_ANONYMOUS, 'not-anonymous'),
(model.IDENTITY_BOT, 'bad bot name - spaces'),
(model.IDENTITY_SERVICE, 'spaces everywhere'),
(model.IDENTITY_USER, 'even here'),
(model.IDENTITY_USER, u'\u043f\u0440\u0438\u0432\u0435\u0442'),
(model.IDENTITY_PROJECT, 'UPPER_not_allowed'),
)
for kind, name in bad_identities:
with self.assertRaises(ValueError):
model.Identity(kind, name)
def test_serialization(self):
# Identity object goes through serialize-deserialize process unchanged.
good_cases = (
model.Identity(model.IDENTITY_USER, '[email protected]'),
model.Anonymous,
)
for case in good_cases:
self.assertEqual(case, model.Identity.from_bytes(case.to_bytes()))
# Malformed data causes ValueError.
bad_cases = (
'',
'[email protected]',
'user:',
':[email protected]',
'user::[email protected]',
)
for case in bad_cases:
with self.assertRaises(ValueError):
model.Identity.from_bytes(case)
class IdentityGlobTest(test_case.TestCase):
"""Tests for IdentityGlob class."""
def test_immutable(self):
# See comment in IdentityTest.test_immutable regarding existing hole in
# immutability.
glob = model.IdentityGlob(model.IDENTITY_USER, '*@example.com')
self.assertTrue(isinstance(glob, tuple))
with self.assertRaises(AttributeError):
glob.kind = model.IDENTITY_USER
with self.assertRaises(AttributeError):
glob.pattern = '*@example.com'
def test_equality(self):
# IdentityGlobs are compared by values, not by reference.
glob1 = model.IdentityGlob(model.IDENTITY_USER, '*@example.com')
glob2 = model.IdentityGlob(model.IDENTITY_USER, '*@example.com')
glob3 = model.IdentityGlob(model.IDENTITY_USER, '*[email protected]')
self.assertEqual(glob1, glob2)
self.assertNotEqual(glob1, glob3)
# Verify that adding extra attribute doesn't change equality relation.
glob1.extra = 1
glob2.extra = 2
self.assertEqual(glob1, glob2)
def test_validation(self):
# Unicode with ASCII data is ok.
ok_globs = (
(unicode(model.IDENTITY_USER), '*@example.com'),
(model.IDENTITY_USER, u'*@example.com'),
)
for kind, pattern in ok_globs:
glob = model.IdentityGlob(kind, pattern)
# Should be 'str', not 'unicode'
self.assertEqual(type(glob.kind), str)
self.assertEqual(type(glob.pattern), str)
# And data should match.
self.assertEqual(kind, glob.kind)
self.assertEqual(pattern, glob.pattern)
# Nasty stuff.
bad_globs = (
('unknown-kind', '*@example.com'),
(model.IDENTITY_USER, ''),
(model.IDENTITY_USER, u'\u043f\u0440\u0438\u0432\u0435\u0442')
)
for kind, pattern in bad_globs:
with self.assertRaises(ValueError):
model.IdentityGlob(kind, pattern)
def test_serialization(self):
# IdentityGlob object goes through serialize-deserialize process unchanged.
glob = model.IdentityGlob(model.IDENTITY_USER, '*@example.com')
self.assertEqual(glob, model.IdentityGlob.from_bytes(glob.to_bytes()))
# Malformed data causes ValueError.
bad_cases = (
'',
'user*@example.com',
'user:',
':*@example.com',
)
for case in bad_cases:
with self.assertRaises(ValueError):
model.IdentityGlob.from_bytes(case)
def test_match(self):
glob = model.IdentityGlob(model.IDENTITY_USER, '*@example.com')
self.assertTrue(
glob.match(model.Identity(model.IDENTITY_USER, '[email protected]')))
self.assertFalse(
glob.match(model.Identity(model.IDENTITY_BOT, '[email protected]')))
self.assertFalse(
glob.match(model.Identity(model.IDENTITY_USER, '[email protected]')))
class GroupNameTest(test_case.TestCase):
def test_valid(self):
self.assertTrue(model.is_valid_group_name('zdb/q9'))
self.assertTrue(model.is_valid_group_name('blah-blah-zzz'))
self.assertTrue(model.is_valid_group_name('buuble/[email protected]'))
self.assertTrue(model.is_valid_group_name('09az_-.@'))
def test_invalid(self):
self.assertFalse(model.is_valid_group_name(''))
self.assertFalse(model.is_valid_group_name('aBC'))
self.assertFalse(model.is_valid_group_name('//'))
self.assertFalse(model.is_valid_group_name('what//now'))
class AuthSecretTest(test_case.TestCase):
"""Tests for AuthSecret class."""
def setUp(self):
super(AuthSecretTest, self).setUp()
self.mock(model.logging, 'warning', lambda *_args: None)
def test_bootstrap_works(self):
# Creating it for a first time.
ent1 = model.AuthSecret.bootstrap('test_secret', length=127)
self.assertTrue(ent1)
self.assertEqual(ent1.key.string_id(), 'test_secret')
self.assertEqual(ent1.key.parent().string_id(), 'local')
self.assertEqual(1, len(ent1.values))
self.assertEqual(127, len(ent1.values[0]))
# Getting same one.
ent2 = model.AuthSecret.bootstrap('test_secret')
self.assertEqual(ent1, ent2)
def make_group(group_id, nested=(), owners=model.ADMIN_GROUP, store=True):
"""Makes a new AuthGroup to use in test, puts it in datastore."""
entity = model.AuthGroup(
key=model.group_key(group_id), nested=nested, owners=owners)
if store:
entity.put()
return entity
class GroupBootstrapTest(test_case.TestCase):
"""Test for bootstrap_group function."""
def test_group_bootstrap_empty(self):
mocked_now = datetime.datetime(2014, 01, 01)
self.mock_now(mocked_now)
added = model.bootstrap_group('some-group', [], 'Blah description')
self.assertTrue(added)
ent = model.group_key('some-group').get()
self.assertEqual(
{
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'created_by': model.get_service_self_identity(),
'created_ts': mocked_now,
'description': 'Blah description',
'globs': [],
'members': [],
'modified_by': model.get_service_self_identity(),
'modified_ts': mocked_now,
'nested': [],
'owners': u'administrators',
},
ent.to_dict())
def test_group_bootstrap_non_empty(self):
ident1 = model.Identity(model.IDENTITY_USER, '[email protected]')
ident2 = model.Identity(model.IDENTITY_USER, '[email protected]')
mocked_now = datetime.datetime(2014, 01, 01)
self.mock_now(mocked_now)
added = model.bootstrap_group(
'some-group', [ident1, ident2], 'Blah description')
self.assertTrue(added)
ent = model.group_key('some-group').get()
self.assertEqual(
{
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'created_by': model.get_service_self_identity(),
'created_ts': mocked_now,
'description': 'Blah description',
'globs': [],
'members': [ident1, ident2],
'modified_by': model.get_service_self_identity(),
'modified_ts': mocked_now,
'nested': [],
'owners': u'administrators',
},
ent.to_dict())
class FindGroupReferencesTest(test_case.TestCase):
"""Tests for find_referencing_groups function."""
def test_missing_group(self):
"""Non existent group is not references by anything."""
self.assertEqual(set(), model.find_referencing_groups('Missing group'))
def test_not_referenced(self):
"""Existing orphaned groups is not referenced."""
# Some mix of groups with references.
make_group('Group 1')
make_group('Group 2')
make_group('Group 3', nested=('Group 1', 'Group 2'))
make_group('Group 4', nested=('Group 3',))
# And a group that is not referenced by anything.
make_group('Standalone')
# Should not be referenced.
self.assertEqual(set(), model.find_referencing_groups('Standalone'))
def test_referenced_as_nested_group(self):
"""If group is nested into another group, it's referenced."""
# Some mix of groups with references, including group to be tested.
make_group('Referenced')
make_group('Group 1')
make_group('Group 2', nested=('Referenced', 'Group 1'))
make_group('Group 3', nested=('Group 2',))
make_group('Group 4', nested=('Referenced',))
# Only direct references are returned.
self.assertEqual(
set(['Group 2', 'Group 4']),
model.find_referencing_groups('Referenced'))
def test_referenced_as_owner(self):
"""If a group owns another group, it is referenced."""
make_group('Referenced')
make_group('Group 1', owners='Referenced')
make_group('Group 2', owners='Referenced')
make_group('Group 3', owners='Group 1')
self.assertEqual(
set(['Group 1', 'Group 2']),
model.find_referencing_groups('Referenced'))
class FindDependencyCycleTest(test_case.TestCase):
"""Tests for find_group_dependency_cycle function."""
def test_empty(self):
group = make_group('A', store=False)
self.assertEqual([], model.find_group_dependency_cycle(group))
def test_no_cycles(self):
make_group('A')
make_group('B', nested=('A',))
group = make_group('C', nested=('B',), store=False)
self.assertEqual([], model.find_group_dependency_cycle(group))
def test_self_reference(self):
group = make_group('A', nested=('A',), store=False)
self.assertEqual(['A'], model.find_group_dependency_cycle(group))
def test_simple_cycle(self):
make_group('A', nested=('B',))
group = make_group('B', nested=('A',), store=False)
self.assertEqual(['B', 'A'], model.find_group_dependency_cycle(group))
def test_long_cycle(self):
make_group('A', nested=('B',))
make_group('B', nested=('C',))
make_group('C', nested=('D',))
group = make_group('D', nested=('A',), store=False)
self.assertEqual(
['D', 'A', 'B', 'C'], model.find_group_dependency_cycle(group))
def test_diamond_no_cycles(self):
make_group('A')
make_group('B1', nested=('A',))
make_group('B2', nested=('A',))
group = make_group('C', nested=('B1', 'B2'), store=False)
self.assertEqual([], model.find_group_dependency_cycle(group))
def test_diamond_with_cycles(self):
make_group('A', nested=('C',))
make_group('B1', nested=('A',))
make_group('B2', nested=('A',))
group = make_group('C', nested=('B1', 'B2'), store=False)
self.assertEqual(['C', 'B1', 'A'], model.find_group_dependency_cycle(group))
class IpWhitelistTest(test_case.TestCase):
"""Tests for AuthIPWhitelist related functions."""
def test_bootstrap_ip_whitelist_empty(self):
self.assertIsNone(model.ip_whitelist_key('list').get())
mocked_now = datetime.datetime(2014, 01, 01)
self.mock_now(mocked_now)
ret = model.bootstrap_ip_whitelist('list', [], 'comment')
self.assertTrue(ret)
ent = model.ip_whitelist_key('list').get()
self.assertTrue(ent)
self.assertEqual({
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'created_by': model.get_service_self_identity(),
'created_ts': mocked_now,
'description': u'comment',
'modified_by': model.get_service_self_identity(),
'modified_ts': mocked_now,
'subnets': [],
}, ent.to_dict())
def test_bootstrap_ip_whitelist(self):
self.assertIsNone(model.ip_whitelist_key('list').get())
mocked_now = datetime.datetime(2014, 01, 01)
self.mock_now(mocked_now)
ret = model.bootstrap_ip_whitelist(
'list', ['192.168.0.0/24', '127.0.0.1/32'], 'comment')
self.assertTrue(ret)
ent = model.ip_whitelist_key('list').get()
self.assertTrue(ent)
self.assertEqual({
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'created_by': model.get_service_self_identity(),
'created_ts': mocked_now,
'description': u'comment',
'modified_by': model.get_service_self_identity(),
'modified_ts': mocked_now,
'subnets': [u'192.168.0.0/24', u'127.0.0.1/32'],
}, ent.to_dict())
def test_bootstrap_ip_whitelist_bad_subnet(self):
self.assertFalse(model.bootstrap_ip_whitelist('list', ['not a subnet']))
def test_bootstrap_ip_whitelist_assignment_new(self):
self.mock_now(datetime.datetime(2014, 01, 01))
ret = model.bootstrap_ip_whitelist_assignment(
model.Identity(model.IDENTITY_USER, '[email protected]'),
'some ip whitelist', 'some comment')
self.assertTrue(ret)
self.assertEqual(
{
'assignments': [
{
'comment': 'some comment',
'created_by': model.get_service_self_identity(),
'created_ts': datetime.datetime(2014, 1, 1),
'identity': model.Identity(model.IDENTITY_USER, '[email protected]'),
'ip_whitelist': 'some ip whitelist',
},
],
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'modified_by': model.get_service_self_identity(),
'modified_ts': datetime.datetime(2014, 1, 1),
}, model.ip_whitelist_assignments_key().get().to_dict())
def test_bootstrap_ip_whitelist_assignment_modify(self):
self.mock_now(datetime.datetime(2014, 01, 01))
ret = model.bootstrap_ip_whitelist_assignment(
model.Identity(model.IDENTITY_USER, '[email protected]'),
'some ip whitelist', 'some comment')
self.assertTrue(ret)
ret = model.bootstrap_ip_whitelist_assignment(
model.Identity(model.IDENTITY_USER, '[email protected]'),
'another ip whitelist', 'another comment')
self.assertTrue(ret)
self.assertEqual(
{
'assignments': [
{
'comment': 'another comment',
'created_by': model.get_service_self_identity(),
'created_ts': datetime.datetime(2014, 1, 1),
'identity': model.Identity(model.IDENTITY_USER, '[email protected]'),
'ip_whitelist': 'another ip whitelist',
},
],
'auth_db_rev': 2,
'auth_db_prev_rev': 1,
'modified_by': model.get_service_self_identity(),
'modified_ts': datetime.datetime(2014, 1, 1),
}, model.ip_whitelist_assignments_key().get().to_dict())
def test_fetch_ip_whitelists_empty(self):
assignments, whitelists = model.fetch_ip_whitelists()
self.assertEqual(model.ip_whitelist_assignments_key(), assignments.key)
self.assertEqual(0, len(assignments.assignments))
self.assertEqual([], whitelists)
def test_fetch_ip_whitelists_non_empty(self):
ent = model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key())
def add(identity, **kwargs):
kwargs['identity'] = model.Identity.from_bytes(identity)
ent.assignments.append(
model.AuthIPWhitelistAssignments.Assignment(**kwargs))
add('user:[email protected]', ip_whitelist='A')
add('user:[email protected]', ip_whitelist='A')
add('user:[email protected]', ip_whitelist='B')
add('user:[email protected]', ip_whitelist='missing')
ent.put()
def store_whitelist(name):
model.AuthIPWhitelist(key=model.ip_whitelist_key(name)).put()
store_whitelist('A')
store_whitelist('B')
store_whitelist('bots')
assignments, whitelists = model.fetch_ip_whitelists()
self.assertEqual(ent.to_dict(), assignments.to_dict())
self.assertEqual(['A', 'B', 'bots'], [e.key.id() for e in whitelists])
class AuditLogTest(test_case.TestCase):
"""Tests to verify replicate_auth_db() keeps historical copies of entities."""
def grab_log(self, original_cls):
copies = original_cls.get_historical_copy_class().query(
ancestor=model.root_key()).fetch()
# All keys under correct historical_revision_key().
for c in copies:
self.assertEqual(
ndb.Key('Rev', c.auth_db_rev, parent=model.root_key()),
c.key.parent())
return {x.key: x.to_dict() for x in copies}
def setUp(self):
super(AuditLogTest, self).setUp()
self.mock_now(datetime.datetime(2015, 1, 1, 1, 1))
def test_global_config_log(self):
@ndb.transactional
def modify(**kwargs):
e = model.root_key().get() or model.AuthGlobalConfig(key=model.root_key())
e.populate(**kwargs)
e.record_revision(
modified_by=model.Identity.from_bytes('user:[email protected]'),
modified_ts=utils.utcnow(),
comment='Comment')
e.put()
model.replicate_auth_db()
# Global config is never deleted, so test only modifications.
modify(oauth_client_id='1', oauth_additional_client_ids=[])
modify(oauth_client_id='2', oauth_additional_client_ids=['a'])
modify(oauth_client_id='3', oauth_additional_client_ids=['a', 'b'])
modify(oauth_client_id='4', oauth_additional_client_ids=[])
modify(oauth_client_id='4', security_config='zzz')
# Final state.
self.assertEqual({
'auth_db_rev': 5,
'auth_db_prev_rev': 4,
'oauth_additional_client_ids': [],
'oauth_client_id': u'4',
'oauth_client_secret': u'',
'security_config': 'zzz',
'token_server_url': u'',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
}, model.root_key().get().to_dict())
# Copies in the history.
cpy = lambda rev: ndb.Key(
'Rev', rev, 'AuthGlobalConfigHistory', 'root', parent=model.root_key())
self.assertEqual({
cpy(1): {
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'oauth_additional_client_ids': [],
'oauth_client_id': u'1',
'oauth_client_secret': u'',
'security_config': None,
'token_server_url': u'',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(2): {
'auth_db_rev': 2,
'auth_db_prev_rev': 1,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'oauth_additional_client_ids': [u'a'],
'oauth_client_id': u'2',
'oauth_client_secret': u'',
'security_config': None,
'token_server_url': u'',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(3): {
'auth_db_rev': 3,
'auth_db_prev_rev': 2,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'oauth_additional_client_ids': [u'a', u'b'],
'oauth_client_id': u'3',
'oauth_client_secret': u'',
'security_config': None,
'token_server_url': u'',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(4): {
'auth_db_rev': 4,
'auth_db_prev_rev': 3,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'oauth_additional_client_ids': [],
'oauth_client_id': u'4',
'oauth_client_secret': u'',
'security_config': None,
'token_server_url': u'',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(5): {
'auth_db_rev': 5,
'auth_db_prev_rev': 4,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'oauth_additional_client_ids': [],
'oauth_client_id': u'4',
'oauth_client_secret': u'',
'security_config': 'zzz',
'token_server_url': u'',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
}, self.grab_log(model.AuthGlobalConfig))
def test_groups_log(self):
ident_a = model.Identity.from_bytes('user:[email protected]')
ident_b = model.Identity.from_bytes('user:[email protected]')
glob_a = model.IdentityGlob.from_bytes('user:*@a.com')
glob_b = model.IdentityGlob.from_bytes('user:*@b.com')
@ndb.transactional
def modify(name, commit=True, **kwargs):
k = model.group_key(name)
e = k.get()
if not e:
e = model.AuthGroup(
key=k,
created_by=ident_a,
created_ts=utils.utcnow())
e.record_revision(
modified_by=ident_a,
modified_ts=utils.utcnow(),
comment='Comment')
e.populate(**kwargs)
e.put()
if commit:
model.replicate_auth_db()
@ndb.transactional
def remove(name, commit=True):
e = model.group_key(name).get()
if e:
e.record_deletion(
modified_by=model.Identity.from_bytes('user:[email protected]'),
modified_ts=utils.utcnow(),
comment='Comment')
e.key.delete()
if commit:
model.replicate_auth_db()
modify('A', members=[])
modify('A', members=[ident_a], globs=[glob_a])
modify('B', members=[ident_b], globs=[glob_b])
modify('A', nested=['B'])
@ndb.transactional
def batch():
modify('B', commit=False, description='Blah')
remove('A', commit=True)
batch()
modify('B', members=[ident_a, ident_b], globs=[glob_a, glob_b])
# Final state.
self.assertIsNone(model.group_key('A').get())
self.assertEqual({
'auth_db_rev': 6,
'auth_db_prev_rev': 5,
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'Blah',
'globs': [
model.IdentityGlob(kind='user', pattern='*@a.com'),
model.IdentityGlob(kind='user', pattern='*@b.com'),
],
'members': [
model.Identity(kind='user', name='[email protected]'),
model.Identity(kind='user', name='[email protected]'),
],
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'nested': [],
'owners': u'administrators',
}, model.group_key('B').get().to_dict())
# Copies in the history.
cpy = lambda name, rev: ndb.Key(
'Rev', rev, 'AuthGroupHistory', name, parent=model.root_key())
self.assertEqual({
cpy('A', 1): {
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'',
'globs': [],
'members': [],
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'nested': [],
'owners': u'administrators',
},
cpy('A', 2): {
'auth_db_rev': 2,
'auth_db_prev_rev': 1,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'',
'globs': [glob_a],
'members': [ident_a],
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'nested': [],
'owners': u'administrators',
},
cpy('B', 3): {
'auth_db_rev': 3,
'auth_db_prev_rev': None,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'',
'globs': [glob_b],
'members': [ident_b],
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'nested': [],
'owners': u'administrators',
},
cpy('A', 4): {
'auth_db_rev': 4,
'auth_db_prev_rev': 2,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'',
'globs': [glob_a],
'members': [ident_a],
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'nested': [u'B'],
'owners': u'administrators',
},
# Batch revision.
cpy('A', 5): {
'auth_db_rev': 5,
'auth_db_prev_rev': 4,
'auth_db_app_version': u'v1a',
'auth_db_deleted': True,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'',
'globs': [glob_a],
'members': [ident_a],
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'nested': [u'B'],
'owners': u'administrators',
},
cpy('B', 5): {
'auth_db_rev': 5,
'auth_db_prev_rev': 3,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'Blah',
'globs': [glob_b],
'members': [ident_b],
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'nested': [],
'owners': u'administrators',
},
# /end of batch revision
cpy('B', 6): {
'auth_db_rev': 6,
'auth_db_prev_rev': 5,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'Blah',
'globs': [glob_a, glob_b],
'members': [ident_a, ident_b],
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'nested': [],
'owners': u'administrators',
},
}, self.grab_log(model.AuthGroup))
def test_ip_whitelist_log(self):
@ndb.transactional
def modify(name, **kwargs):
k = model.ip_whitelist_key(name)
e = k.get()
if not e:
e = model.AuthIPWhitelist(
key=k,
created_by=model.Identity.from_bytes('user:[email protected]'),
created_ts=utils.utcnow())
e.record_revision(
modified_by=model.Identity.from_bytes('user:[email protected]'),
modified_ts=utils.utcnow(),
comment='Comment')
e.populate(**kwargs)
e.put()
model.replicate_auth_db()
@ndb.transactional
def remove(name):
e = model.ip_whitelist_key(name).get()
if e:
e.record_deletion(
modified_by=model.Identity.from_bytes('user:[email protected]'),
modified_ts=utils.utcnow(),
comment='Comment')
e.key.delete()
model.replicate_auth_db()
# Very similar to test_groups_log, so do less test cases.
modify('A', subnets=['127.0.0.1/32'])
modify('A', description='Blah')
modify('A', subnets=['1.0.0.0/32'])
remove('A')
# Copies in the history.
cpy = lambda name, rev: ndb.Key(
'Rev', rev, 'AuthIPWhitelistHistory', name, parent=model.root_key())
self.assertEqual({
cpy('A', 1): {
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'',
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'subnets': [u'127.0.0.1/32'],
},
cpy('A', 2): {
'auth_db_rev': 2,
'auth_db_prev_rev': 1,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'Blah',
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'subnets': [u'127.0.0.1/32'],
},
cpy('A', 3): {
'auth_db_rev': 3,
'auth_db_prev_rev': 2,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'Blah',
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'subnets': [u'1.0.0.0/32'],
},
cpy('A', 4): {
'auth_db_rev': 4,
'auth_db_prev_rev': 3,
'auth_db_app_version': u'v1a',
'auth_db_deleted': True,
'auth_db_change_comment': u'Comment',
'created_by': model.Identity(kind='user', name='[email protected]'),
'created_ts': datetime.datetime(2015, 1, 1, 1, 1),
'description': u'Blah',
'modified_by': model.Identity(kind='user', name='[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
'subnets': [u'1.0.0.0/32'],
},
}, self.grab_log(model.AuthIPWhitelist))
def test_ip_whitelist_assignment_log(self):
# AuthIPWhitelistAssignments is special, it has LocalStructuredProperty.
@ndb.transactional
def modify(assignments):
key = model.ip_whitelist_assignments_key()
e = key.get() or model.AuthIPWhitelistAssignments(key=key)
e.record_revision(
modified_by=model.Identity.from_bytes('user:[email protected]'),
modified_ts=datetime.datetime(2015, 1, 1, 1, 1),
comment='Comment')
e.assignments = assignments
e.put()
model.replicate_auth_db()
Assignment = model.AuthIPWhitelistAssignments.Assignment
modify([])
modify([
Assignment(
identity=model.Identity.from_bytes('user:[email protected]'),
ip_whitelist='bots',
comment='Blah'),
])
modify([])
cpy = lambda rev: ndb.Key(
'Rev', rev, 'AuthIPWhitelistAssignmentsHistory', 'default',
parent=model.root_key())
self.assertEqual({
cpy(1): {
'assignments': [],
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(2): {
'assignments': [{
'comment': u'Blah',
'created_by': None,
'created_ts': None,
'identity': model.Identity(kind='user', name='[email protected]'),
'ip_whitelist': u'bots',
}],
'auth_db_rev': 2,
'auth_db_prev_rev': 1,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(3): {
'assignments': [],
'auth_db_rev': 3,
'auth_db_prev_rev': 2,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
}, self.grab_log(model.AuthIPWhitelistAssignments))
def test_realms_globals_log(self):
@ndb.transactional
def modify(permissions):
key = model.realms_globals_key()
e = key.get() or model.AuthRealmsGlobals(key=key)
e.record_revision(
modified_by=model.Identity.from_bytes('user:[email protected]'),
modified_ts=datetime.datetime(2015, 1, 1, 1, 1),
comment='Comment')
e.permissions = permissions
e.put()
model.replicate_auth_db()
modify([])
modify([realms_pb2.Permission(name='luci.dev.p1')])
modify([])
cpy = lambda rev: ndb.Key(
'Rev', rev, 'AuthRealmsGlobalsHistory', 'globals',
parent=model.root_key())
self.assertEqual({
cpy(1): {
'permissions': [],
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(2): {
'permissions': [realms_pb2.Permission(name='luci.dev.p1')],
'auth_db_rev': 2,
'auth_db_prev_rev': 1,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(3): {
'permissions': [],
'auth_db_rev': 3,
'auth_db_prev_rev': 2,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
}, self.grab_log(model.AuthRealmsGlobals))
def test_project_realms_log(self):
PROJECT_ID = 'pid'
@ndb.transactional
def modify(realms, config_rev, perms_rev):
key = model.project_realms_key(PROJECT_ID)
e = key.get() or model.AuthProjectRealms(key=key)
if realms:
e.realms = realms
e.config_rev = config_rev
e.perms_rev = perms_rev
e.record_revision(
modified_by=model.Identity.from_bytes('user:[email protected]'),
modified_ts=datetime.datetime(2015, 1, 1, 1, 1),
comment='Comment')
e.put()
else:
e.record_deletion(
modified_by=model.Identity.from_bytes('user:[email protected]'),
modified_ts=datetime.datetime(2015, 1, 1, 1, 1),
comment='Comment')
e.key.delete()
model.replicate_auth_db()
realms_v1 = realms_pb2.Realms(permissions=[{'name': 'p1'}])
realms_v2 = realms_pb2.Realms(permissions=[{'name': 'p2'}])
modify(realms_v1, 'rev1', 'rev1')
modify(realms_v2, 'rev2', 'rev2')
modify(None, None, None) # delete
cpy = lambda rev: ndb.Key(
'Rev', rev, 'AuthProjectRealmsHistory', PROJECT_ID,
parent=model.root_key())
self.assertEqual({
cpy(1): {
'realms': realms_v1,
'config_rev': u'rev1',
'perms_rev': u'rev1',
'auth_db_rev': 1,
'auth_db_prev_rev': None,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(2): {
'realms': realms_v2,
'config_rev': u'rev2',
'perms_rev': u'rev2',
'auth_db_rev': 2,
'auth_db_prev_rev': 1,
'auth_db_app_version': u'v1a',
'auth_db_deleted': False,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
cpy(3): {
'realms': realms_v2,
'config_rev': u'rev2',
'perms_rev': u'rev2',
'auth_db_rev': 3,
'auth_db_prev_rev': 2,
'auth_db_app_version': u'v1a',
'auth_db_deleted': True,
'auth_db_change_comment': u'Comment',
'modified_by': model.Identity.from_bytes('user:[email protected]'),
'modified_ts': datetime.datetime(2015, 1, 1, 1, 1),
},
}, self.grab_log(model.AuthProjectRealms))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List
from unittest import mock
from google.api_core.retry import Retry
from airflow.providers.google.cloud.operators.vertex_ai.auto_ml import (
CreateAutoMLForecastingTrainingJobOperator,
CreateAutoMLImageTrainingJobOperator,
CreateAutoMLTabularTrainingJobOperator,
CreateAutoMLTextTrainingJobOperator,
CreateAutoMLVideoTrainingJobOperator,
DeleteAutoMLTrainingJobOperator,
ListAutoMLTrainingJobOperator,
)
from airflow.providers.google.cloud.operators.vertex_ai.custom_job import (
CreateCustomContainerTrainingJobOperator,
CreateCustomPythonPackageTrainingJobOperator,
CreateCustomTrainingJobOperator,
DeleteCustomTrainingJobOperator,
ListCustomTrainingJobOperator,
)
from airflow.providers.google.cloud.operators.vertex_ai.dataset import (
CreateDatasetOperator,
DeleteDatasetOperator,
ExportDataOperator,
ImportDataOperator,
ListDatasetsOperator,
UpdateDatasetOperator,
)
VERTEX_AI_PATH = "airflow.providers.google.cloud.operators.vertex_ai.{}"
TIMEOUT = 120
RETRY = mock.MagicMock(Retry)
METADATA = [("key", "value")]
TASK_ID = "test_task_id"
GCP_PROJECT = "test-project"
GCP_LOCATION = "test-location"
GCP_CONN_ID = "test-conn"
DELEGATE_TO = "test-delegate-to"
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
STAGING_BUCKET = "gs://test-vertex-ai-bucket"
DISPLAY_NAME = "display_name_1" # Create random display name
DISPLAY_NAME_2 = "display_nmae_2"
ARGS = ["--tfds", "tf_flowers:3.*.*"]
CONTAINER_URI = "gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest"
REPLICA_COUNT = 1
MACHINE_TYPE = "n1-standard-4"
ACCELERATOR_TYPE = "ACCELERATOR_TYPE_UNSPECIFIED"
ACCELERATOR_COUNT = 0
TRAINING_FRACTION_SPLIT = 0.7
TEST_FRACTION_SPLIT = 0.15
VALIDATION_FRACTION_SPLIT = 0.15
COMMAND_2 = ['echo', 'Hello World']
TEST_API_ENDPOINT: str = "test-api-endpoint"
TEST_PIPELINE_JOB: str = "test-pipeline-job"
TEST_TRAINING_PIPELINE: str = "test-training-pipeline"
TEST_PIPELINE_JOB_ID: str = "test-pipeline-job-id"
PYTHON_PACKAGE = "/files/trainer-0.1.tar.gz"
PYTHON_PACKAGE_CMDARGS = "test-python-cmd"
PYTHON_PACKAGE_GCS_URI = "gs://test-vertex-ai-bucket/trainer-0.1.tar.gz"
PYTHON_MODULE_NAME = "trainer.task"
TRAINING_PIPELINE_ID = "test-training-pipeline-id"
CUSTOM_JOB_ID = "test-custom-job-id"
TEST_DATASET = {
"display_name": "test-dataset-name",
"metadata_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml",
"metadata": "test-image-dataset",
}
TEST_DATASET_ID = "test-dataset-id"
TEST_EXPORT_CONFIG = {
"annotationsFilter": "test-filter",
"gcs_destination": {"output_uri_prefix": "airflow-system-tests-data"},
}
TEST_IMPORT_CONFIG = [
{
"data_item_labels": {
"test-labels-name": "test-labels-value",
},
"import_schema_uri": "test-shema-uri",
"gcs_source": {"uris": ['test-string']},
},
{},
]
TEST_UPDATE_MASK = "test-update-mask"
TEST_TRAINING_TARGET_COLUMN = "target"
TEST_TRAINING_TIME_COLUMN = "time"
TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN = "time_series_identifier"
TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS: List[str] = []
TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS: List[str] = []
TEST_TRAINING_FORECAST_HORIZON = 10
TEST_TRAINING_DATA_GRANULARITY_UNIT = "day"
TEST_TRAINING_DATA_GRANULARITY_COUNT = 1
class TestVertexAICreateCustomContainerTrainingJobOperator:
@mock.patch(VERTEX_AI_PATH.format("custom_job.CustomJobHook"))
def test_execute(self, mock_hook):
op = CreateCustomContainerTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
args=ARGS,
container_uri=CONTAINER_URI,
model_serving_container_image_uri=CONTAINER_URI,
command=COMMAND_2,
model_display_name=DISPLAY_NAME_2,
replica_count=REPLICA_COUNT,
machine_type=MACHINE_TYPE,
accelerator_type=ACCELERATOR_TYPE,
accelerator_count=ACCELERATOR_COUNT,
training_fraction_split=TRAINING_FRACTION_SPLIT,
validation_fraction_split=VALIDATION_FRACTION_SPLIT,
test_fraction_split=TEST_FRACTION_SPLIT,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.create_custom_container_training_job.assert_called_once_with(
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
args=ARGS,
container_uri=CONTAINER_URI,
model_serving_container_image_uri=CONTAINER_URI,
command=COMMAND_2,
dataset=None,
model_display_name=DISPLAY_NAME_2,
replica_count=REPLICA_COUNT,
machine_type=MACHINE_TYPE,
accelerator_type=ACCELERATOR_TYPE,
accelerator_count=ACCELERATOR_COUNT,
training_fraction_split=TRAINING_FRACTION_SPLIT,
validation_fraction_split=VALIDATION_FRACTION_SPLIT,
test_fraction_split=TEST_FRACTION_SPLIT,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model_serving_container_predict_route=None,
model_serving_container_health_route=None,
model_serving_container_command=None,
model_serving_container_args=None,
model_serving_container_environment_variables=None,
model_serving_container_ports=None,
model_description=None,
model_instance_schema_uri=None,
model_parameters_schema_uri=None,
model_prediction_schema_uri=None,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
# RUN
annotation_schema_uri=None,
model_labels=None,
base_output_dir=None,
service_account=None,
network=None,
bigquery_destination=None,
environment_variables=None,
boot_disk_type='pd-ssd',
boot_disk_size_gb=100,
training_filter_split=None,
validation_filter_split=None,
test_filter_split=None,
predefined_split_column_name=None,
timestamp_split_column_name=None,
tensorboard=None,
sync=True,
)
class TestVertexAICreateCustomPythonPackageTrainingJobOperator:
@mock.patch(VERTEX_AI_PATH.format("custom_job.CustomJobHook"))
def test_execute(self, mock_hook):
op = CreateCustomPythonPackageTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
python_package_gcs_uri=PYTHON_PACKAGE_GCS_URI,
python_module_name=PYTHON_MODULE_NAME,
container_uri=CONTAINER_URI,
args=ARGS,
model_serving_container_image_uri=CONTAINER_URI,
model_display_name=DISPLAY_NAME_2,
replica_count=REPLICA_COUNT,
machine_type=MACHINE_TYPE,
accelerator_type=ACCELERATOR_TYPE,
accelerator_count=ACCELERATOR_COUNT,
training_fraction_split=TRAINING_FRACTION_SPLIT,
validation_fraction_split=VALIDATION_FRACTION_SPLIT,
test_fraction_split=TEST_FRACTION_SPLIT,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.create_custom_python_package_training_job.assert_called_once_with(
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
args=ARGS,
container_uri=CONTAINER_URI,
model_serving_container_image_uri=CONTAINER_URI,
python_package_gcs_uri=PYTHON_PACKAGE_GCS_URI,
python_module_name=PYTHON_MODULE_NAME,
dataset=None,
model_display_name=DISPLAY_NAME_2,
replica_count=REPLICA_COUNT,
machine_type=MACHINE_TYPE,
accelerator_type=ACCELERATOR_TYPE,
accelerator_count=ACCELERATOR_COUNT,
training_fraction_split=TRAINING_FRACTION_SPLIT,
validation_fraction_split=VALIDATION_FRACTION_SPLIT,
test_fraction_split=TEST_FRACTION_SPLIT,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model_serving_container_predict_route=None,
model_serving_container_health_route=None,
model_serving_container_command=None,
model_serving_container_args=None,
model_serving_container_environment_variables=None,
model_serving_container_ports=None,
model_description=None,
model_instance_schema_uri=None,
model_parameters_schema_uri=None,
model_prediction_schema_uri=None,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
# RUN
annotation_schema_uri=None,
model_labels=None,
base_output_dir=None,
service_account=None,
network=None,
bigquery_destination=None,
environment_variables=None,
boot_disk_type='pd-ssd',
boot_disk_size_gb=100,
training_filter_split=None,
validation_filter_split=None,
test_filter_split=None,
predefined_split_column_name=None,
timestamp_split_column_name=None,
tensorboard=None,
sync=True,
)
class TestVertexAICreateCustomTrainingJobOperator:
@mock.patch(VERTEX_AI_PATH.format("custom_job.CustomJobHook"))
def test_execute(self, mock_hook):
op = CreateCustomTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
script_path=PYTHON_PACKAGE,
args=PYTHON_PACKAGE_CMDARGS,
container_uri=CONTAINER_URI,
model_serving_container_image_uri=CONTAINER_URI,
requirements=[],
replica_count=1,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.create_custom_training_job.assert_called_once_with(
staging_bucket=STAGING_BUCKET,
display_name=DISPLAY_NAME,
args=PYTHON_PACKAGE_CMDARGS,
container_uri=CONTAINER_URI,
model_serving_container_image_uri=CONTAINER_URI,
script_path=PYTHON_PACKAGE,
requirements=[],
dataset=None,
model_display_name=None,
replica_count=REPLICA_COUNT,
machine_type=MACHINE_TYPE,
accelerator_type=ACCELERATOR_TYPE,
accelerator_count=ACCELERATOR_COUNT,
training_fraction_split=None,
validation_fraction_split=None,
test_fraction_split=None,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model_serving_container_predict_route=None,
model_serving_container_health_route=None,
model_serving_container_command=None,
model_serving_container_args=None,
model_serving_container_environment_variables=None,
model_serving_container_ports=None,
model_description=None,
model_instance_schema_uri=None,
model_parameters_schema_uri=None,
model_prediction_schema_uri=None,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
# RUN
annotation_schema_uri=None,
model_labels=None,
base_output_dir=None,
service_account=None,
network=None,
bigquery_destination=None,
environment_variables=None,
boot_disk_type='pd-ssd',
boot_disk_size_gb=100,
training_filter_split=None,
validation_filter_split=None,
test_filter_split=None,
predefined_split_column_name=None,
timestamp_split_column_name=None,
tensorboard=None,
sync=True,
)
class TestVertexAIDeleteCustomTrainingJobOperator:
@mock.patch(VERTEX_AI_PATH.format("custom_job.CustomJobHook"))
def test_execute(self, mock_hook):
op = DeleteCustomTrainingJobOperator(
task_id=TASK_ID,
training_pipeline_id=TRAINING_PIPELINE_ID,
custom_job_id=CUSTOM_JOB_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.delete_training_pipeline.assert_called_once_with(
training_pipeline=TRAINING_PIPELINE_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.delete_custom_job.assert_called_once_with(
custom_job=CUSTOM_JOB_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAIListCustomTrainingJobOperator:
@mock.patch(VERTEX_AI_PATH.format("custom_job.CustomJobHook"))
def test_execute(self, mock_hook):
page_token = "page_token"
page_size = 42
filter = "filter"
read_mask = "read_mask"
op = ListCustomTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
filter=filter,
read_mask=read_mask,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.list_training_pipelines.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
filter=filter,
read_mask=read_mask,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAICreateDatasetOperator:
@mock.patch(VERTEX_AI_PATH.format("dataset.Dataset.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("dataset.DatasetHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = CreateDatasetOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset=TEST_DATASET,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.create_dataset.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset=TEST_DATASET,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAIDeleteDatasetOperator:
@mock.patch(VERTEX_AI_PATH.format("dataset.Dataset.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("dataset.DatasetHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = DeleteDatasetOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset_id=TEST_DATASET_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.delete_dataset.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset=TEST_DATASET_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAIExportDataOperator:
@mock.patch(VERTEX_AI_PATH.format("dataset.Dataset.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("dataset.DatasetHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ExportDataOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset_id=TEST_DATASET_ID,
export_config=TEST_EXPORT_CONFIG,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.export_data.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset=TEST_DATASET_ID,
export_config=TEST_EXPORT_CONFIG,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAIImportDataOperator:
@mock.patch(VERTEX_AI_PATH.format("dataset.Dataset.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("dataset.DatasetHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ImportDataOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset_id=TEST_DATASET_ID,
import_configs=TEST_IMPORT_CONFIG,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.import_data.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset=TEST_DATASET_ID,
import_configs=TEST_IMPORT_CONFIG,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAIListDatasetsOperator:
@mock.patch(VERTEX_AI_PATH.format("dataset.Dataset.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("dataset.DatasetHook"))
def test_execute(self, mock_hook, to_dict_mock):
page_token = "page_token"
page_size = 42
filter = "filter"
read_mask = "read_mask"
order_by = "order_by"
op = ListDatasetsOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
filter=filter,
page_size=page_size,
page_token=page_token,
read_mask=read_mask,
order_by=order_by,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.list_datasets.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
filter=filter,
page_size=page_size,
page_token=page_token,
read_mask=read_mask,
order_by=order_by,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAIUpdateDatasetOperator:
@mock.patch(VERTEX_AI_PATH.format("dataset.Dataset.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("dataset.DatasetHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = UpdateDatasetOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
project_id=GCP_PROJECT,
region=GCP_LOCATION,
dataset_id=TEST_DATASET_ID,
dataset=TEST_DATASET,
update_mask=TEST_UPDATE_MASK,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.update_dataset.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
dataset_id=TEST_DATASET_ID,
dataset=TEST_DATASET,
update_mask=TEST_UPDATE_MASK,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAICreateAutoMLForecastingTrainingJobOperator:
@mock.patch("google.cloud.aiplatform.datasets.TimeSeriesDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook, mock_dataset):
op = CreateAutoMLForecastingTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
target_column=TEST_TRAINING_TARGET_COLUMN,
time_column=TEST_TRAINING_TIME_COLUMN,
time_series_identifier_column=TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
unavailable_at_forecast_columns=TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
available_at_forecast_columns=TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
forecast_horizon=TEST_TRAINING_FORECAST_HORIZON,
data_granularity_unit=TEST_TRAINING_DATA_GRANULARITY_UNIT,
data_granularity_count=TEST_TRAINING_DATA_GRANULARITY_COUNT,
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_dataset.assert_called_once_with(dataset_name=TEST_DATASET_ID)
mock_hook.return_value.create_auto_ml_forecasting_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
target_column=TEST_TRAINING_TARGET_COLUMN,
time_column=TEST_TRAINING_TIME_COLUMN,
time_series_identifier_column=TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
unavailable_at_forecast_columns=TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
available_at_forecast_columns=TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
forecast_horizon=TEST_TRAINING_FORECAST_HORIZON,
data_granularity_unit=TEST_TRAINING_DATA_GRANULARITY_UNIT,
data_granularity_count=TEST_TRAINING_DATA_GRANULARITY_COUNT,
optimization_objective=None,
column_specs=None,
column_transformations=None,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
validation_fraction_split=None,
test_fraction_split=None,
predefined_split_column_name=None,
weight_column=None,
time_series_attribute_columns=None,
context_window=None,
export_evaluated_data_items=False,
export_evaluated_data_items_bigquery_destination_uri=None,
export_evaluated_data_items_override_destination=False,
quantiles=None,
validation_options=None,
budget_milli_node_hours=1000,
model_display_name=None,
model_labels=None,
sync=True,
)
class TestVertexAICreateAutoMLImageTrainingJobOperator:
@mock.patch("google.cloud.aiplatform.datasets.ImageDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook, mock_dataset):
op = CreateAutoMLImageTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
prediction_type="classification",
multi_label=False,
model_type="CLOUD",
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_dataset.assert_called_once_with(dataset_name=TEST_DATASET_ID)
mock_hook.return_value.create_auto_ml_image_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
prediction_type="classification",
multi_label=False,
model_type="CLOUD",
base_model=None,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
validation_fraction_split=None,
test_fraction_split=None,
training_filter_split=None,
validation_filter_split=None,
test_filter_split=None,
budget_milli_node_hours=None,
model_display_name=None,
model_labels=None,
disable_early_stopping=False,
sync=True,
)
class TestVertexAICreateAutoMLTabularTrainingJobOperator:
@mock.patch("google.cloud.aiplatform.datasets.TabularDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook, mock_dataset):
op = CreateAutoMLTabularTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
target_column=None,
optimization_prediction_type=None,
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_dataset.assert_called_once_with(dataset_name=TEST_DATASET_ID)
mock_hook.return_value.create_auto_ml_tabular_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
target_column=None,
optimization_prediction_type=None,
optimization_objective=None,
column_specs=None,
column_transformations=None,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
validation_fraction_split=None,
test_fraction_split=None,
predefined_split_column_name=None,
timestamp_split_column_name=None,
weight_column=None,
budget_milli_node_hours=1000,
model_display_name=None,
model_labels=None,
disable_early_stopping=False,
export_evaluated_data_items=False,
export_evaluated_data_items_bigquery_destination_uri=None,
export_evaluated_data_items_override_destination=False,
sync=True,
)
class TestVertexAICreateAutoMLTextTrainingJobOperator:
@mock.patch("google.cloud.aiplatform.datasets.TextDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook, mock_dataset):
op = CreateAutoMLTextTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
prediction_type=None,
multi_label=False,
sentiment_max=10,
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_dataset.assert_called_once_with(dataset_name=TEST_DATASET_ID)
mock_hook.return_value.create_auto_ml_text_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
prediction_type=None,
multi_label=False,
sentiment_max=10,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
validation_fraction_split=None,
test_fraction_split=None,
training_filter_split=None,
validation_filter_split=None,
test_filter_split=None,
model_display_name=None,
model_labels=None,
sync=True,
)
class TestVertexAICreateAutoMLVideoTrainingJobOperator:
@mock.patch("google.cloud.aiplatform.datasets.VideoDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook, mock_dataset):
op = CreateAutoMLVideoTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
prediction_type="classification",
model_type="CLOUD",
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_dataset.assert_called_once_with(dataset_name=TEST_DATASET_ID)
mock_hook.return_value.create_auto_ml_video_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
prediction_type="classification",
model_type="CLOUD",
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
test_fraction_split=None,
training_filter_split=None,
test_filter_split=None,
model_display_name=None,
model_labels=None,
sync=True,
)
class TestVertexAIDeleteAutoMLTrainingJobOperator:
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook):
op = DeleteAutoMLTrainingJobOperator(
task_id=TASK_ID,
training_pipeline_id=TRAINING_PIPELINE_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.delete_training_pipeline.assert_called_once_with(
training_pipeline=TRAINING_PIPELINE_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestVertexAIListAutoMLTrainingJobOperator:
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook):
page_token = "page_token"
page_size = 42
filter = "filter"
read_mask = "read_mask"
op = ListAutoMLTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
filter=filter,
read_mask=read_mask,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={'ti': mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook.return_value.list_training_pipelines.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
filter=filter,
read_mask=read_mask,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
|
|
import os
from PyQt4 import QtCore, QtGui
from dependence_repo_manager.ui.ui_repo_manager_window import Ui_repoManagerWindow
import dependence_repo_manager.core.logger as logger
from dependence_repo_manager.core.exceptions import LockException
from dependence_repo_manager.core.dialogs import SettingsDialog, RefreshAllDialog, AddPackageDialog
class MainWindow(QtGui.QMainWindow, Ui_repoManagerWindow):
def __init__(self, manager_instance):
QtGui.QMainWindow.__init__(self)
#view
self.setupUi(self)
#model
self.manager = manager_instance
self.packageModel = QtGui.QStandardItemModel(self.packageTreeView)
self.packageTreeView.setModel(self.packageModel)
self.entryInfoModel = QtGui.QStandardItemModel(self.entryInfoTableView)
self.entryInfoTableView.setModel(self.entryInfoModel)
#config logger with default configuration
logger.LOGGER.config()
##########################################
#signals and slots
#connect logger signal to mainWindow slot
self.connect(logger.LOGGER, logger.LOGGER.updateLog, self.update_log)
self.connect(self.actionSettings, QtCore.SIGNAL("triggered(bool)"), self.settings)
self.connect(self.actionConnectSTAF, QtCore.SIGNAL("triggered(bool)"), self.start_STAF)
self.connect(self.actionRefresh, QtCore.SIGNAL("triggered(bool)"), self.refresh)
self.connect(self.packageTreeView, QtCore.SIGNAL("customContextMenuRequested(QPoint)"), self.package_view_right_clicked)
self.connect(self.packageTreeView, QtCore.SIGNAL("clicked(QModelIndex)"), self.package_view_left_clicked)
self.connect(self.actionLockPackage, QtCore.SIGNAL("triggered(bool)"), self.lock_package)
self.connect(self.actionUnlockPackage, QtCore.SIGNAL("triggered(bool)"), self.unlock_package)
self.connect(self.actionAddPackage, QtCore.SIGNAL("triggered(bool)"), self.add_package)
self.connect(self.actionRemovePackage, QtCore.SIGNAL("triggered(bool)"), self.remove_package)
self.connect(self.actionAddFile, QtCore.SIGNAL("triggered(bool)"), self.add_file)
self.connect(self.actionRemoveFile, QtCore.SIGNAL("triggered(bool)"), self.remove_file)
self.connect(self.actionChangeFile, QtCore.SIGNAL("triggered(bool)"), self.change_file)
self.connect(self.actionAddDirectory, QtCore.SIGNAL("triggered(bool)"), self.add_directory)
self.connect(self.actionRemoveDirectory, QtCore.SIGNAL("triggered(bool)"), self.remove_directory)
self.connect(self.actionChangeDirectory, QtCore.SIGNAL("triggered(bool)"), self.change_directory)
self.connect(self.actionUndo, QtCore.SIGNAL("triggered(bool)"), self.undo)
self.connect(self.actionCommit, QtCore.SIGNAL("triggered(bool)"), self.commit)
#when staf status changes, we should handle it
self.connect(self.manager, self.manager.staf_status_change, self.handle_staf_status_change)
def update_log(self, record):
levelno = record.levelno
if levelno >= logger.level_name("CRITICAL"):
color_str = '<div style="color:red">%s</div>' # red
elif levelno >= logger.level_name("ERROR"):
color_str = '<div style="color:red">%s</div>' # red
elif levelno >= logger.level_name("WARN"):
color_str = '<div style="color:orange">%s</div>' # orange
elif levelno >= logger.level_name("INFO"):
color_str = '<div style="color:black">%s</div>' # black
elif levelno >= logger.level_name("DEBUG"):
color_str = '<div style="color:gray">%s</div>' # gray
else:
color_str = '<div style="color:black">%s</div>' # black
msg = color_str % ("[ %s ][ %s:%s ] %s" % (logger.level_name(levelno), record.filename, record.lineno, record.getMessage()))
self.loggerEdit.append(msg)
def settings(self):
settings_dialog = SettingsDialog(self)
settings_dialog.exec_()
def handle_staf_status_change(self, staf_status):
'''
handle staf status change signal
'''
logger.LOGGER.debug("Handle STAF status change")
if not staf_status:
#left start STAF button enabled, in case STAF stops unexpected
self.actionConnectSTAF.setEnabled(True)
elif staf_status & 0b01000000:
#enable start STAF button, user can start local STAF process manually
self.actionConnectSTAF.setEnabled(True)
elif staf_status & 0b10000000:
#disable start STAF button, user cannot start STAF process since STAF is not configured right
self.actionConnectSTAF.setDisabled(True)
if not staf_status:
#add communicators for repo server and packages
self.manager.repo_server.add_communicator()
elif staf_status & 0b11000000:
#remove communicators for repo server and packages
self.manager.repo_server.remove_communicator()
self.staf_status = staf_status
self.refresh_ui()
def start_STAF(self):
self.manager.start_staf()
def refresh(self):
refresh_dialog = RefreshAllDialog(self)
refresh_dialog.exec_()
def refresh_ui(self):
package_icon = QtGui.QIcon()
package_icon.addPixmap(QtGui.QPixmap(":icons/icons/package.png"))
package_add_icon = QtGui.QIcon()
package_add_icon.addPixmap(QtGui.QPixmap(":icons/icons/package_add.png"))
package_remove_icon = QtGui.QIcon()
package_remove_icon.addPixmap(QtGui.QPixmap(":icons/icons/package_remove.png"))
package_lock_icon = QtGui.QIcon()
package_lock_icon.addPixmap(QtGui.QPixmap(":icons/icons/package_locked.png"))
file_icon = QtGui.QIcon()
file_icon.addPixmap(QtGui.QPixmap(":icons/icons/file.png"))
file_add_icon = QtGui.QIcon()
file_add_icon.addPixmap(QtGui.QPixmap(":icons/icons/file_add.png"))
file_remove_icon = QtGui.QIcon()
file_remove_icon.addPixmap(QtGui.QPixmap(":icons/icons/file_remove.png"))
file_modified_icon = QtGui.QIcon()
file_modified_icon.addPixmap(QtGui.QPixmap(":icons/icons/file_modified.png"))
folder_icon = QtGui.QIcon()
folder_icon.addPixmap(QtGui.QPixmap(":icons/icons/folder.png"))
folder_add_icon = QtGui.QIcon()
folder_add_icon.addPixmap(QtGui.QPixmap(":icons/icons/folder_add.png"))
folder_remove_icon = QtGui.QIcon()
folder_remove_icon.addPixmap(QtGui.QPixmap(":icons/icons/folder_remove.png"))
folder_modified_icon = QtGui.QIcon()
folder_modified_icon.addPixmap(QtGui.QPixmap(":icons/icons/folder_modified.png"))
#keep node expanded info
#after refresh, should restore the expanded node
expanded_packages = []
root_package_item = self.packageModel.item(0)
def get_expanded_packages(package_item):
package_item_index = self.packageModel.indexFromItem(package_item)
if self.packageTreeView.isExpanded(package_item_index):
package_item_data = package_item.data().toPyObject()
expanded_packages.append(package_item_data.package_index)
for i in range(package_item.rowCount()):
item = package_item.child(i)
if item.hasChildren():
get_expanded_packages(item)
if not root_package_item is None:
get_expanded_packages(root_package_item)
#logger.LOGGER.debug("Expanded package items: %s" % repr(expanded_packages))
#keep scroll bar location
horizontal_value = self.packageTreeView.horizontalScrollBar().value()
vertical_value = self.packageTreeView.verticalScrollBar().value()
self.packageModel.clear()
root_package = self.manager.repo_server.root_package
if not root_package is None:
root_package_item = QtGui.QStandardItem(package_icon, QtCore.QString("%0").arg(root_package.name))
root_package_item.setData(QtCore.QVariant(root_package))
self.packageModel.appendRow(root_package_item)
def built_package_model(package, package_item):
#restore expanded node
if package.package_index in expanded_packages:
package_item_index = self.packageModel.indexFromItem(package_item)
self.packageTreeView.expand(package_item_index)
#list other contents, wheb staf status is normal
if not self.staf_status:
for content_name, content in package.all_contents.items():
if content.type == "F":
if content.action == "normal":
content_icon = file_icon
elif content.action == "add":
content_icon = file_add_icon
elif content.action == "delete":
content_icon = file_remove_icon
elif content.action == "modify":
content_icon = file_modified_icon
elif content.type == "D":
if content.action == "normal":
content_icon = folder_icon
elif content.action == "add":
content_icon = folder_add_icon
elif content.action == "delete":
content_icon = folder_remove_icon
elif content.action == "modify":
content_icon = folder_modified_icon
content_item = QtGui.QStandardItem(content_icon, QtCore.QString("%0").arg(content_name))
content_item.setData(QtCore.QVariant(content))
package_item.appendRow(content_item)
if len(package.sub_packages) == 0:
return
for sub_package_name, sub_package in package.sub_packages.items():
if sub_package.status == "commit":
icon = package_icon
elif sub_package.status == "add":
icon = package_add_icon
elif sub_package.status == "delete":
icon = package_remove_icon
sub_package_item = QtGui.QStandardItem(icon, QtCore.QString("%0").arg(sub_package_name))
sub_package_item.setData(QtCore.QVariant(sub_package))
#list child packages
package_item.appendRow(sub_package_item)
#recursion child packages
built_package_model(sub_package, sub_package_item)
built_package_model(root_package, root_package_item)
self.packageTreeView.horizontalScrollBar().setRange(0, horizontal_value)
self.packageTreeView.verticalScrollBar().setRange(0, vertical_value)
self.packageTreeView.horizontalScrollBar().setValue(horizontal_value)
self.packageTreeView.verticalScrollBar().setValue(vertical_value)
def package_view_left_clicked(self, index):
item = self.packageModel.itemFromIndex(index)
if not item is None:
self.entryInfoModel.clear()
item_object = item.data().toPyObject()
if item_object.type == "P":
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Package Index"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.package_index))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Status"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.status))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Compatibility"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.compatibility))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("DUT path"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.dut_path))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Need install?"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.need_install))
self.entryInfoModel.appendRow([name_item, value_item])
if item_object.need_install == "TRUE":
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Action"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.action))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("32bit install path"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.install_path_for_32bit))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("64bit install path"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.install_path_for_64bit))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("License key"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.license_key))
self.entryInfoModel.appendRow([name_item, value_item])
elif item_object.type == "F" or item_object.type == "D":
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Name"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.name))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Type"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.type))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Status"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.action))
self.entryInfoModel.appendRow([name_item, value_item])
name_item = QtGui.QStandardItem(QtCore.QString("%0").arg("Package"))
value_item = QtGui.QStandardItem(QtCore.QString("%0").arg(item_object.parent.package_index))
self.entryInfoModel.appendRow([name_item, value_item])
def package_view_right_clicked(self, point):
index = self.packageTreeView.indexAt(point)
item = self.packageModel.itemFromIndex(index)
if item is None:
return
else:
item_object = item.data().toPyObject()
if item_object.type == "P":
context_menu_for_package = QtGui.QMenu()
context_menu_for_package.addAction(self.actionLockPackage)
context_menu_for_package.addAction(self.actionUnlockPackage)
context_menu_for_package.addSeparator()
context_menu_for_package.addAction(self.actionRemovePackage)
context_menu_for_package.addSeparator()
context_menu_for_package.addAction(self.actionAddPackage)
context_menu_for_package.addAction(self.actionAddFile)
context_menu_for_package.addAction(self.actionAddDirectory)
context_menu_for_package.addSeparator()
context_menu_for_package.addAction(self.actionUndo)
context_menu_for_package.addSeparator()
context_menu_for_package.addAction(self.actionCommit)
if item_object.isLocked():
self.actionLockPackage.setDisabled(True)
self.actionUnlockPackage.setEnabled(True)
if (not item_object.parent is None) and item_object.parent.isLocked():
self.actionRemovePackage.setEnabled(True)
else:
self.actionRemovePackage.setDisabled(True)
self.actionAddPackage.setEnabled(True)
self.actionAddFile.setEnabled(True)
self.actionAddDirectory.setEnabled(True)
self.actionUndo.setEnabled(True)
self.actionCommit.setEnabled(True)
else:
self.actionLockPackage.setEnabled(True)
self.actionUnlockPackage.setDisabled(True)
self.actionRemovePackage.setDisabled(True)
self.actionAddPackage.setDisabled(True)
self.actionAddFile.setDisabled(True)
self.actionAddDirectory.setDisabled(True)
self.actionUndo.setDisabled(True)
self.actionCommit.setDisabled(True)
context_menu_for_package.exec_(self.packageTreeView.mapToGlobal(point))
elif item_object.type == "F":
context_menu_for_file = QtGui.QMenu()
context_menu_for_file.addAction(self.actionRemoveFile)
context_menu_for_file.addSeparator()
context_menu_for_file.addAction(self.actionUndo)
if item_object.parent.isLocked():
self.actionRemoveFile.setEnabled(True)
else:
self.actionRemoveFile.setDisabled(True)
if item_object.action != "normal":
self.actionUndo.setEnabled(True)
else:
self.actionUndo.setDisabled(True)
context_menu_for_file.exec_(self.packageTreeView.mapToGlobal(point))
elif item_object.type == "D":
context_menu_for_directory = QtGui.QMenu()
context_menu_for_directory.addAction(self.actionRemoveDirectory)
context_menu_for_directory.addSeparator()
context_menu_for_directory.addAction(self.actionUndo)
if item_object.parent.isLocked():
self.actionRemoveDirectory.setEnabled(True)
else:
self.actionRemoveDirectory.setDisabled(True)
if item_object.action != "normal":
self.actionUndo.setEnabled(True)
else:
self.actionUndo.setDisabled(True)
context_menu_for_directory.exec_(self.packageTreeView.mapToGlobal(point))
def lock_package(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
try:
item_object.lock()
except LockException:
pass
self.refresh_ui()
def unlock_package(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
try:
item_object.unlock()
except LockException:
pass
self.refresh_ui()
def add_package(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
add_dialog = AddPackageDialog(self, item_object)
add_dialog.exec_()
self.refresh_ui()
def remove_package(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
if not item_object.parent is None:
item_object.parent.delete_package(item_object)
self.refresh_ui()
def add_file(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
file_path = str(QtGui.QFileDialog.getOpenFileName(self, "Add File to dependence repo"))
if os.path.isfile(file_path):
item_object.add_file(file_path)
self.refresh_ui()
def remove_file(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
package_object = item.parent().data().toPyObject()
file_name = item_object.name
package_object.delete_file(file_name)
self.refresh_ui()
def change_file(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index).parent()
item_object = item.data().toPyObject()
file_path = str(QtGui.QFileDialog.getOpenFileName(self, "Add File to dependence repo"))
if os.path.isfile(file_path):
item_object.add_file(file_path)
self.refresh_ui()
def add_directory(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
directory_path = str(QtGui.QFileDialog.getExistingDirectory(self, "Add Directory to dependence repo"))
if os.path.isdir(directory_path):
item_object.add_directory(directory_path)
self.refresh_ui()
def remove_directory(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
package_object = item.parent().data().toPyObject()
directory_name = item_object.name
package_object.delete_directory(directory_name)
self.refresh_ui()
def change_directory(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index).parent()
item_object = item.data().toPyObject()
directory_path = str(QtGui.QFileDialog.getExistingDirectory(self, "Add Directory to dependence repo"))
if os.path.isdir(directory_path):
item_object.add_directory(directory_path)
self.refresh_ui()
def undo(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
logger.LOGGER.debug("Undo changes for entry: %s" % item_object.name)
if item_object.type == "P":
item_object.undo_all()
else:
item_object.parent.undo_change(item_object.name)
self.refresh_ui()
def commit(self):
for selected_index in self.packageTreeView.selectedIndexes():
item = self.packageModel.itemFromIndex(selected_index)
item_object = item.data().toPyObject()
item_object.commit_changes()
self.refresh_ui()
|