repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
jrgdiz/cardwalker | refs/heads/master | grammar/mana/decl.py | 1 | from pyparsing import *
import act
colorname = Forward().setParseAction(act.colorname)
noncolorname = Forward().setParseAction(act.noncolorname)
colorfeature = Forward().setParseAction(act.colorfeature)
color = Forward()
manasymbol = Forward().setParseAction(act.manasymbol)
tapsymbol = Forward().setParseAction(act.tapsymbol)
untapsymbol = Forward().setParseAction(act.untapsymbol)
snowsymbol = Forward().setParseAction(act.snowsymbol)
phyletter = Forward()
physymbol = Forward().setParseAction(act.physymbol)
chybsymbol = Forward().setParseAction(act.chybsymbol)
hybsymbol = Forward().setParseAction(act.hybsymbol)
numcost = Forward().setParseAction(act.numcost)
xcost = Forward().setParseAction(act.xcost)
singlecost = Forward()
manapayment = Forward().setParseAction(act.manapayment) |
jr55662003/My_Rosalind | refs/heads/master | LEXF.py | 2 | '''
Given: A collection of at most 10 symbols defining an ordered alphabet,
and a positive integer n .
Return: All strings of length n that can be formed from the alphabet, ordered lexicographically.
'''
from itertools import product
def lexi_kmers(symbol, n):
lexi = list(product(symbol, repeat = n))
new_lexi = []
for x in lexi:
temp = ""
for i in range(0, n):
temp += x[i]
new_lexi.append(temp)
temp = ""
return new_lexi
if __name__ == '__main__':
with open(input("file input: ")) as f:
symbol = "".join(f.readline().split())
n = int(f.readline())
lexi = lexi_kmers(symbol, n)
with open("output_lexf.txt", "w") as o:
for l in lexi:
o.write(l + "\n")
|
zhouzhenghui/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/test_reprlib.py | 56 | """
Test cases for the repr module
Nick Mathewson
"""
import sys
import os
import shutil
import unittest
from test.support import run_unittest
from reprlib import repr as r # Don't shadow builtin repr
from reprlib import Repr
from reprlib import recursive_repr
def nestedTuple(nesting):
t = ()
for i in range(nesting):
t = (t,)
return t
class ReprTests(unittest.TestCase):
def test_string(self):
eq = self.assertEqual
eq(r("abc"), "'abc'")
eq(r("abcdefghijklmnop"),"'abcdefghijklmnop'")
s = "a"*30+"b"*30
expected = repr(s)[:13] + "..." + repr(s)[-14:]
eq(r(s), expected)
eq(r("\"'"), repr("\"'"))
s = "\""*30+"'"*100
expected = repr(s)[:13] + "..." + repr(s)[-14:]
eq(r(s), expected)
def test_tuple(self):
eq = self.assertEqual
eq(r((1,)), "(1,)")
t3 = (1, 2, 3)
eq(r(t3), "(1, 2, 3)")
r2 = Repr()
r2.maxtuple = 2
expected = repr(t3)[:-2] + "...)"
eq(r2.repr(t3), expected)
def test_container(self):
from array import array
from collections import deque
eq = self.assertEqual
# Tuples give up after 6 elements
eq(r(()), "()")
eq(r((1,)), "(1,)")
eq(r((1, 2, 3)), "(1, 2, 3)")
eq(r((1, 2, 3, 4, 5, 6)), "(1, 2, 3, 4, 5, 6)")
eq(r((1, 2, 3, 4, 5, 6, 7)), "(1, 2, 3, 4, 5, 6, ...)")
# Lists give up after 6 as well
eq(r([]), "[]")
eq(r([1]), "[1]")
eq(r([1, 2, 3]), "[1, 2, 3]")
eq(r([1, 2, 3, 4, 5, 6]), "[1, 2, 3, 4, 5, 6]")
eq(r([1, 2, 3, 4, 5, 6, 7]), "[1, 2, 3, 4, 5, 6, ...]")
# Sets give up after 6 as well
eq(r(set([])), "set([])")
eq(r(set([1])), "set([1])")
eq(r(set([1, 2, 3])), "set([1, 2, 3])")
eq(r(set([1, 2, 3, 4, 5, 6])), "set([1, 2, 3, 4, 5, 6])")
eq(r(set([1, 2, 3, 4, 5, 6, 7])), "set([1, 2, 3, 4, 5, 6, ...])")
# Frozensets give up after 6 as well
eq(r(frozenset([])), "frozenset([])")
eq(r(frozenset([1])), "frozenset([1])")
eq(r(frozenset([1, 2, 3])), "frozenset([1, 2, 3])")
eq(r(frozenset([1, 2, 3, 4, 5, 6])), "frozenset([1, 2, 3, 4, 5, 6])")
eq(r(frozenset([1, 2, 3, 4, 5, 6, 7])), "frozenset([1, 2, 3, 4, 5, 6, ...])")
# collections.deque after 6
eq(r(deque([1, 2, 3, 4, 5, 6, 7])), "deque([1, 2, 3, 4, 5, 6, ...])")
# Dictionaries give up after 4.
eq(r({}), "{}")
d = {'alice': 1, 'bob': 2, 'charles': 3, 'dave': 4}
eq(r(d), "{'alice': 1, 'bob': 2, 'charles': 3, 'dave': 4}")
d['arthur'] = 1
eq(r(d), "{'alice': 1, 'arthur': 1, 'bob': 2, 'charles': 3, ...}")
# array.array after 5.
eq(r(array('i')), "array('i', [])")
eq(r(array('i', [1])), "array('i', [1])")
eq(r(array('i', [1, 2])), "array('i', [1, 2])")
eq(r(array('i', [1, 2, 3])), "array('i', [1, 2, 3])")
eq(r(array('i', [1, 2, 3, 4])), "array('i', [1, 2, 3, 4])")
eq(r(array('i', [1, 2, 3, 4, 5])), "array('i', [1, 2, 3, 4, 5])")
eq(r(array('i', [1, 2, 3, 4, 5, 6])),
"array('i', [1, 2, 3, 4, 5, ...])")
def test_numbers(self):
eq = self.assertEqual
eq(r(123), repr(123))
eq(r(123), repr(123))
eq(r(1.0/3), repr(1.0/3))
n = 10**100
expected = repr(n)[:18] + "..." + repr(n)[-19:]
eq(r(n), expected)
def test_instance(self):
eq = self.assertEqual
i1 = ClassWithRepr("a")
eq(r(i1), repr(i1))
i2 = ClassWithRepr("x"*1000)
expected = repr(i2)[:13] + "..." + repr(i2)[-14:]
eq(r(i2), expected)
i3 = ClassWithFailingRepr()
eq(r(i3), ("<ClassWithFailingRepr instance at %x>"%id(i3)))
s = r(ClassWithFailingRepr)
self.assertTrue(s.startswith("<class "))
self.assertTrue(s.endswith(">"))
self.assertIn(s.find("..."), [12, 13])
def test_lambda(self):
self.assertTrue(repr(lambda x: x).startswith(
"<function <lambda"))
# XXX anonymous functions? see func_repr
def test_builtin_function(self):
eq = self.assertEqual
# Functions
eq(repr(hash), '<built-in function hash>')
# Methods
self.assertTrue(repr(''.split).startswith(
'<built-in method split of str object at 0x'))
def test_range(self):
eq = self.assertEqual
eq(repr(range(1)), 'range(0, 1)')
eq(repr(range(1, 2)), 'range(1, 2)')
eq(repr(range(1, 4, 3)), 'range(1, 4, 3)')
def test_nesting(self):
eq = self.assertEqual
# everything is meant to give up after 6 levels.
eq(r([[[[[[[]]]]]]]), "[[[[[[[]]]]]]]")
eq(r([[[[[[[[]]]]]]]]), "[[[[[[[...]]]]]]]")
eq(r(nestedTuple(6)), "(((((((),),),),),),)")
eq(r(nestedTuple(7)), "(((((((...),),),),),),)")
eq(r({ nestedTuple(5) : nestedTuple(5) }),
"{((((((),),),),),): ((((((),),),),),)}")
eq(r({ nestedTuple(6) : nestedTuple(6) }),
"{((((((...),),),),),): ((((((...),),),),),)}")
eq(r([[[[[[{}]]]]]]), "[[[[[[{}]]]]]]")
eq(r([[[[[[[{}]]]]]]]), "[[[[[[[...]]]]]]]")
def test_cell(self):
# XXX Hmm? How to get at a cell object?
pass
def test_descriptors(self):
eq = self.assertEqual
# method descriptors
eq(repr(dict.items), "<method 'items' of 'dict' objects>")
# XXX member descriptors
# XXX attribute descriptors
# XXX slot descriptors
# static and class methods
class C:
def foo(cls): pass
x = staticmethod(C.foo)
self.assertTrue(repr(x).startswith('<staticmethod object at 0x'))
x = classmethod(C.foo)
self.assertTrue(repr(x).startswith('<classmethod object at 0x'))
def test_unsortable(self):
# Repr.repr() used to call sorted() on sets, frozensets and dicts
# without taking into account that not all objects are comparable
x = set([1j, 2j, 3j])
y = frozenset(x)
z = {1j: 1, 2j: 2}
r(x)
r(y)
r(z)
def touch(path, text=''):
fp = open(path, 'w')
fp.write(text)
fp.close()
class LongReprTest(unittest.TestCase):
def setUp(self):
longname = 'areallylongpackageandmodulenametotestreprtruncation'
self.pkgname = os.path.join(longname)
self.subpkgname = os.path.join(longname, longname)
# Make the package and subpackage
shutil.rmtree(self.pkgname, ignore_errors=True)
os.mkdir(self.pkgname)
touch(os.path.join(self.pkgname, '__init__.py'))
shutil.rmtree(self.subpkgname, ignore_errors=True)
os.mkdir(self.subpkgname)
touch(os.path.join(self.subpkgname, '__init__.py'))
# Remember where we are
self.here = os.getcwd()
sys.path.insert(0, self.here)
def tearDown(self):
actions = []
for dirpath, dirnames, filenames in os.walk(self.pkgname):
for name in dirnames + filenames:
actions.append(os.path.join(dirpath, name))
actions.append(self.pkgname)
actions.sort()
actions.reverse()
for p in actions:
if os.path.isdir(p):
os.rmdir(p)
else:
os.remove(p)
del sys.path[0]
def test_module(self):
eq = self.assertEqual
touch(os.path.join(self.subpkgname, self.pkgname + '.py'))
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation
eq(repr(areallylongpackageandmodulenametotestreprtruncation),
"<module '%s' from '%s'>" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__))
eq(repr(sys), "<module 'sys' (built-in)>")
def test_type(self):
eq = self.assertEqual
touch(os.path.join(self.subpkgname, 'foo.py'), '''\
class foo(object):
pass
''')
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import foo
eq(repr(foo.foo),
"<class '%s.foo'>" % foo.__name__)
def test_object(self):
# XXX Test the repr of a type with a really long tp_name but with no
# tp_repr. WIBNI we had ::Inline? :)
pass
def test_class(self):
touch(os.path.join(self.subpkgname, 'bar.py'), '''\
class bar:
pass
''')
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import bar
# Module name may be prefixed with "test.", depending on how run.
self.assertEqual(repr(bar.bar), "<class '%s.bar'>" % bar.__name__)
def test_instance(self):
touch(os.path.join(self.subpkgname, 'baz.py'), '''\
class baz:
pass
''')
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import baz
ibaz = baz.baz()
self.assertTrue(repr(ibaz).startswith(
"<%s.baz object at 0x" % baz.__name__))
def test_method(self):
eq = self.assertEqual
touch(os.path.join(self.subpkgname, 'qux.py'), '''\
class aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
def amethod(self): pass
''')
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import qux
# Unbound methods first
self.assertTrue(repr(qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod).startswith(
'<function amethod'))
# Bound method next
iqux = qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa()
self.assertTrue(repr(iqux.amethod).startswith(
'<bound method aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod of <%s.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa object at 0x' \
% (qux.__name__,) ))
def test_builtin_function(self):
# XXX test built-in functions and methods with really long names
pass
class ClassWithRepr:
def __init__(self, s):
self.s = s
def __repr__(self):
return "ClassWithRepr(%r)" % self.s
class ClassWithFailingRepr:
def __repr__(self):
raise Exception("This should be caught by Repr.repr_instance")
class MyContainer:
'Helper class for TestRecursiveRepr'
def __init__(self, values):
self.values = list(values)
def append(self, value):
self.values.append(value)
@recursive_repr()
def __repr__(self):
return '<' + ', '.join(map(str, self.values)) + '>'
class MyContainer2(MyContainer):
@recursive_repr('+++')
def __repr__(self):
return '<' + ', '.join(map(str, self.values)) + '>'
class TestRecursiveRepr(unittest.TestCase):
def test_recursive_repr(self):
m = MyContainer(list('abcde'))
m.append(m)
m.append('x')
m.append(m)
self.assertEqual(repr(m), '<a, b, c, d, e, ..., x, ...>')
m = MyContainer2(list('abcde'))
m.append(m)
m.append('x')
m.append(m)
self.assertEqual(repr(m), '<a, b, c, d, e, +++, x, +++>')
def test_main():
run_unittest(ReprTests)
run_unittest(LongReprTest)
run_unittest(TestRecursiveRepr)
if __name__ == "__main__":
test_main()
|
Aravinthu/odoo | refs/heads/master | addons/website_sale/__init__.py | 1315 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import controllers
from . import models
|
dev-lord/estruturas-dados-com-python3 | refs/heads/master | aula04/desempacotamento.py | 1 | """
Material complementar: https://pythonhelp.wordpress.com/2013/01/10/desempacotamento-de-tupla/
"""
lista = [1, 2, 3, 4]
print('tipo: {0} | valor: {1}'.format(type(lista), lista))
# desempacotando a lista e atribuindo a variáveis
a, b, c, d = lista
print('tipo: {0} | a: {1}'.format(type(a), a))
print('tipo: {0} | b: {1}'.format(type(b), b))
print('tipo: {0} | c: {1}'.format(type(c), c))
print('tipo: {0} | d: {1}'.format(type(d), d))
# utilizando underscore _ para desempacotamento, ignorando o terceiro item da lista
x, y, _, z = lista
print('tipo: {0} | x: {1}'.format(type(x), x))
print('tipo: {0} | y: {1}'.format(type(y), y))
print('tipo: {0} | z: {1}'.format(type(z), z))
# desempacotando o primeiro e ultimo elemento
u, _, _, p = lista
print('tipo: {0} | u: {1}'.format(type(u), u))
print('tipo: {0} | p: {1}'.format(type(p), p))
# desempacotando strings
nome = "Robson"
primeira_letra, _, _, _, penultima_letra, ultima_letra = nome
print('tipo: {0} | nome: {1}'.format(type(nome), nome))
print('tipo: {0} | primeira letra: {1}'.format(type(primeira_letra), primeira_letra))
print('tipo: {0} | penultima letra: {1}'.format(type(penultima_letra), penultima_letra))
print('tipo: {0} | ultima letra: {1}'.format(type(ultima_letra), ultima_letra))
# desempacotando função com vários retornos
def funcao(x: int, y: int):
return x ** 2, y ** 2
print(funcao(2, 4))
print(type(funcao(2, 4))) # <-- retorno multiplo de funções gera uma tupla no retorno com os valores contidos nesta.
# desempacotando o retorno da função em variaveis
retorno1, retorno2 = funcao(5, 6)
print(retorno1)
print(retorno2)
|
KaranToor/MA450 | refs/heads/master | google-cloud-sdk/platform/gsutil/third_party/boto/boto/fps/exception.py | 239 | from boto.exception import BotoServerError
class ResponseErrorFactory(BotoServerError):
def __new__(cls, *args, **kw):
error = BotoServerError(*args, **kw)
newclass = globals().get(error.error_code, ResponseError)
obj = newclass.__new__(newclass, *args, **kw)
obj.__dict__.update(error.__dict__)
return obj
class ResponseError(BotoServerError):
"""Undefined response error.
"""
retry = False
def __repr__(self):
return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__,
self.status, self.reason,
self.error_message)
def __str__(self):
return 'FPS Response Error: {0.status} {0.__class__.__name__} {1}\n' \
'{2}\n' \
'{0.error_message}'.format(self,
self.retry and '(Retriable)' or '',
self.__doc__.strip())
class RetriableResponseError(ResponseError):
retry = True
class AccessFailure(RetriableResponseError):
"""Account cannot be accessed.
"""
class AccountClosed(RetriableResponseError):
"""Account is not active.
"""
class AccountLimitsExceeded(RetriableResponseError):
"""The spending or receiving limit on the account is exceeded.
"""
class AmountOutOfRange(ResponseError):
"""The transaction amount is more than the allowed range.
"""
class AuthFailure(RetriableResponseError):
"""AWS was not able to validate the provided access credentials.
"""
class ConcurrentModification(RetriableResponseError):
"""A retriable error can happen when two processes try to modify the
same data at the same time.
"""
class DuplicateRequest(ResponseError):
"""A different request associated with this caller reference already
exists.
"""
class InactiveInstrument(ResponseError):
"""Payment instrument is inactive.
"""
class IncompatibleTokens(ResponseError):
"""The transaction could not be completed because the tokens have
incompatible payment instructions.
"""
class InstrumentAccessDenied(ResponseError):
"""The external calling application is not the recipient for this
postpaid or prepaid instrument.
"""
class InstrumentExpired(ResponseError):
"""The prepaid or the postpaid instrument has expired.
"""
class InsufficientBalance(RetriableResponseError):
"""The sender, caller, or recipient's account balance has
insufficient funds to complete the transaction.
"""
class InternalError(RetriableResponseError):
"""A retriable error that happens due to some transient problem in
the system.
"""
class InvalidAccountState(RetriableResponseError):
"""The account is either suspended or closed.
"""
class InvalidAccountState_Caller(RetriableResponseError):
"""The developer account cannot participate in the transaction.
"""
class InvalidAccountState_Recipient(RetriableResponseError):
"""Recipient account cannot participate in the transaction.
"""
class InvalidAccountState_Sender(RetriableResponseError):
"""Sender account cannot participate in the transaction.
"""
class InvalidCallerReference(ResponseError):
"""The Caller Reference does not have a token associated with it.
"""
class InvalidClientTokenId(ResponseError):
"""The AWS Access Key Id you provided does not exist in our records.
"""
class InvalidDateRange(ResponseError):
"""The end date specified is before the start date or the start date
is in the future.
"""
class InvalidParams(ResponseError):
"""One or more parameters in the request is invalid.
"""
class InvalidPaymentInstrument(ResponseError):
"""The payment method used in the transaction is invalid.
"""
class InvalidPaymentMethod(ResponseError):
"""Specify correct payment method.
"""
class InvalidRecipientForCCTransaction(ResponseError):
"""This account cannot receive credit card payments.
"""
class InvalidSenderRoleForAccountType(ResponseError):
"""This token cannot be used for this operation.
"""
class InvalidTokenId(ResponseError):
"""You did not install the token that you are trying to cancel.
"""
class InvalidTokenId_Recipient(ResponseError):
"""The recipient token specified is either invalid or canceled.
"""
class InvalidTokenId_Sender(ResponseError):
"""The sender token specified is either invalid or canceled or the
token is not active.
"""
class InvalidTokenType(ResponseError):
"""An invalid operation was performed on the token, for example,
getting the token usage information on a single use token.
"""
class InvalidTransactionId(ResponseError):
"""The specified transaction could not be found or the caller did not
execute the transaction or this is not a Pay or Reserve call.
"""
class InvalidTransactionState(ResponseError):
"""The transaction is not complete, or it has temporarily failed.
"""
class NotMarketplaceApp(RetriableResponseError):
"""This is not an marketplace application or the caller does not
match either the sender or the recipient.
"""
class OriginalTransactionFailed(ResponseError):
"""The original transaction has failed.
"""
class OriginalTransactionIncomplete(RetriableResponseError):
"""The original transaction is still in progress.
"""
class PaymentInstrumentNotCC(ResponseError):
"""The payment method specified in the transaction is not a credit
card. You can only use a credit card for this transaction.
"""
class PaymentMethodNotDefined(ResponseError):
"""Payment method is not defined in the transaction.
"""
class PrepaidFundingLimitExceeded(RetriableResponseError):
"""An attempt has been made to fund the prepaid instrument
at a level greater than its recharge limit.
"""
class RefundAmountExceeded(ResponseError):
"""The refund amount is more than the refundable amount.
"""
class SameSenderAndRecipient(ResponseError):
"""The sender and receiver are identical, which is not allowed.
"""
class SameTokenIdUsedMultipleTimes(ResponseError):
"""This token is already used in earlier transactions.
"""
class SenderNotOriginalRecipient(ResponseError):
"""The sender in the refund transaction is not
the recipient of the original transaction.
"""
class SettleAmountGreaterThanDebt(ResponseError):
"""The amount being settled or written off is
greater than the current debt.
"""
class SettleAmountGreaterThanReserveAmount(ResponseError):
"""The amount being settled is greater than the reserved amount.
"""
class SignatureDoesNotMatch(ResponseError):
"""The request signature calculated by Amazon does not match the
signature you provided.
"""
class TokenAccessDenied(ResponseError):
"""Permission to cancel the token is denied.
"""
class TokenNotActive(ResponseError):
"""The token is canceled.
"""
class TokenNotActive_Recipient(ResponseError):
"""The recipient token is canceled.
"""
class TokenNotActive_Sender(ResponseError):
"""The sender token is canceled.
"""
class TokenUsageError(ResponseError):
"""The token usage limit is exceeded.
"""
class TransactionDenied(ResponseError):
"""The transaction is not allowed.
"""
class TransactionFullyRefundedAlready(ResponseError):
"""The transaction has already been completely refunded.
"""
class TransactionTypeNotRefundable(ResponseError):
"""You cannot refund this transaction.
"""
class UnverifiedAccount_Recipient(ResponseError):
"""The recipient's account must have a verified bank account or a
credit card before this transaction can be initiated.
"""
class UnverifiedAccount_Sender(ResponseError):
"""The sender's account must have a verified U.S. credit card or
a verified U.S bank account before this transaction can be
initiated.
"""
class UnverifiedBankAccount(ResponseError):
"""A verified bank account should be used for this transaction.
"""
class UnverifiedEmailAddress_Caller(ResponseError):
"""The caller account must have a verified email address.
"""
class UnverifiedEmailAddress_Recipient(ResponseError):
"""The recipient account must have a verified
email address for receiving payments.
"""
class UnverifiedEmailAddress_Sender(ResponseError):
"""The sender account must have a verified
email address for this payment.
"""
|
tensorflow/addons | refs/heads/master | tensorflow_addons/activations/hardshrink.py | 1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow_addons.utils.types import Number, TensorLike
@tf.keras.utils.register_keras_serializable(package="Addons")
def hardshrink(x: TensorLike, lower: Number = -0.5, upper: Number = 0.5) -> tf.Tensor:
r"""Hard shrink function.
Computes hard shrink function:
$$
\mathrm{hardshrink}(x) =
\begin{cases}
x & \text{if } x < \text{lower} \\
x & \text{if } x > \text{upper} \\
0 & \text{otherwise}
\end{cases}.
$$
Usage:
>>> x = tf.constant([1.0, 0.0, 1.0])
>>> tfa.activations.hardshrink(x)
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([1., 0., 1.], dtype=float32)>
Args:
x: A `Tensor`. Must be one of the following types:
`bfloat16`, float16`, `float32`, `float64`.
lower: `float`, lower bound for setting values to zeros.
upper: `float`, upper bound for setting values to zeros.
Returns:
A `Tensor`. Has the same type as `x`.
"""
if lower > upper:
raise ValueError(
"The value of lower is {} and should"
" not be higher than the value "
"variable upper, which is {} .".format(lower, upper)
)
x = tf.convert_to_tensor(x)
mask_lower = x < lower
mask_upper = upper < x
mask = tf.logical_or(mask_lower, mask_upper)
mask = tf.cast(mask, x.dtype)
return x * mask
|
wannaphongcom/flappy | refs/heads/master | samples/keyboard.py | 2 | #! /usr/bin/env python
# encoding: utf-8
import flappy
from flappy.display import Sprite
from flappy.events import Event, MouseEvent, KeyboardEvent
from flappy.ui import Keyboard
from time import time
WIDTH = 600
HEIGHT = 600
BALL_RADIUS = 40
GRAVITY = 200
THRUST = 5000
DAMP = 0.8
class KeyboardExample(Sprite):
def __init__(self):
super(KeyboardExample, self).__init__()
self.ball = Sprite()
self.ball.cacheAsBitmap = True
gfx = self.ball.graphics
gfx.beginFill(0x20a000)
gfx.drawCircle(0.0, 0.0, BALL_RADIUS)
gfx.endFill()
self.ball.x = WIDTH * 0.5
self.ball.y = HEIGHT * 0.5
self.vx, self.vy = 0.0, 0.0
self.ax, self.ay = 0.0, 0.0
self.addChild(self.ball)
self.addEventListener(Event.ADDED_TO_STAGE, self.on_stage)
self.addEventListener(Event.ENTER_FRAME, self.on_enter_frame)
self.addEventListener(MouseEvent.MOUSE_DOWN, self.on_mouse_down)
self.addEventListener(KeyboardEvent.KEY_DOWN, self.on_key_down)
self.old_time = time()
def on_key_down(self, event):
if event.keyCode == Keyboard.LEFT:
self.ax -= THRUST
if event.keyCode == Keyboard.RIGHT:
self.ax += THRUST
if event.keyCode == Keyboard.UP:
self.ay -= THRUST
if event.keyCode == Keyboard.DOWN:
self.ay += THRUST
def on_stage(self, event):
self.stage.color = 0x383830
self.stage.focus = self
def on_mouse_down(self, event):
self.stage.focus = self
def on_enter_frame(self, event):
new_time = time()
dt = new_time - self.old_time
self.vy += (GRAVITY + self.ay) * dt
self.vx += self.ax * dt
self.ball.x += self.vx * dt
self.ball.y += self.vy * dt
if (self.ball.x + BALL_RADIUS) > WIDTH:
self.vx = -self.vx * DAMP
self.ball.x = WIDTH - BALL_RADIUS
elif (self.ball.x - BALL_RADIUS) < 0.0:
self.vx = -self.vx * DAMP
self.ball.x = BALL_RADIUS
elif (self.ball.y + BALL_RADIUS) > HEIGHT:
self.vy = -self.vy * DAMP
self.ball.y = HEIGHT - BALL_RADIUS
elif (self.ball.y - BALL_RADIUS) < 0.0:
self.vy = -self.vy * DAMP
self.ball.y = BALL_RADIUS
self.ax, self.ay = 0.0, 0.0
self.old_time = new_time
if __name__ == '__main__':
flappy.start(KeyboardExample, width=WIDTH, height=HEIGHT, title=__file__) |
arokem/nipype | refs/heads/master | nipype/interfaces/spm/tests/test_auto_Normalize12.py | 9 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.spm.preprocess import Normalize12
def test_Normalize12_inputs():
input_map = dict(affine_regularization_type=dict(field='eoptions.affreg',
),
apply_to_files=dict(copyfile=True,
field='subj.resample',
),
bias_fwhm=dict(field='eoptions.biasfwhm',
),
bias_regularization=dict(field='eoptions.biasreg',
),
deformation_file=dict(copyfile=False,
field='subj.def',
mandatory=True,
xor=['image_to_align', 'tpm'],
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
image_to_align=dict(copyfile=True,
field='subj.vol',
mandatory=True,
xor=['deformation_file'],
),
jobtype=dict(usedefault=True,
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
paths=dict(),
sampling_distance=dict(field='eoptions.samp',
),
smoothness=dict(field='eoptions.fwhm',
),
tpm=dict(copyfile=False,
field='eoptions.tpm',
xor=['deformation_file'],
),
use_mcr=dict(),
use_v8struct=dict(min_ver='8',
usedefault=True,
),
warping_regularization=dict(field='eoptions.reg',
),
write_bounding_box=dict(field='woptions.bb',
),
write_interp=dict(field='woptions.interp',
),
write_voxel_sizes=dict(field='woptions.vox',
),
)
inputs = Normalize12.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Normalize12_outputs():
output_map = dict(deformation_field=dict(),
normalized_files=dict(),
normalized_image=dict(),
)
outputs = Normalize12.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
h0s/c3py | refs/heads/master | c3py/tooltip.py | 2 | from .chart_component import ChartComponentDict
class TooltipFormat(ChartComponentDict):
"""
Manipulate the format of the tooltip.
"""
def __init__(self):
super(TooltipFormat, self).__init__()
def set_title(self, title):
"""
Set the title of the tooltip. The variable 'x' can be used to refer to the x value at the tooltip.
Parameters
----------
title : str
The title of the tooltip.
Returns
-------
None
Example
-------
.. code-block:: python
chart.tooltip.tooltip_format.set_title('"value on x-axis = " + x')
"""
self.config['title'] = 'function(x) {{ return {title}; }}'.format(
title=title.replace('"', "'"),
)
class Tooltip(ChartComponentDict):
"""
Manipulate the chart's tooltip.
Attributes
----------
tooltip_format = c3py.tooltip.TooltipFormat
"""
def __init__(self):
super(Tooltip, self).__init__()
self.tooltip_format = TooltipFormat()
self.config = {
'show': 'true',
'grouped': 'true',
'format': self.tooltip_format.config,
}
def hide(self):
"""
Hide the tooltip on the chart.
Returns
-------
None
"""
self.config['show'] = 'false'
def ungroup(self):
"""
Ungroup the tooltips. When hovering at a point on a data series, only that series' tooltip will be shown,
instead of all series' tooltips.
Returns
-------
None
"""
self.config['grouped'] = 'false' |
DigitalSlideArchive/large_image | refs/heads/master | test/test_config.py | 2 | from large_image.config import getConfig, setConfig
def testConfigFunctions():
assert isinstance(getConfig(), dict)
setConfig('cache_backend', 'python')
assert getConfig('cache_backend') == 'python'
setConfig('cache_backend', 'memcached')
assert getConfig('cache_backend') == 'memcached'
setConfig('cache_backend', None)
assert getConfig('cache_backend') is None
assert getConfig('unknown', 'python') == 'python'
|
DataDog/stashboard | refs/heads/master | stashboard/contrib/dateutil/__init__.py | 253 | """
Copyright (c) 2003-2010 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
__version__ = "1.5"
|
gargleblaster/trading-with-python | refs/heads/master | historicDataDownloader/testData.py | 77 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 05 22:06:13 2012
@author: jev
"""
import numpy as np
from pandas import *
from matplotlib.pyplot import *
#df1 = DataFrame.from_csv('test1.csv').astype(np.dtype('f4'))
#df2 = DataFrame.from_csv('test2.csv').astype(np.dtype('f4'))
#df = DataFrame([df1,df2])
df = DataFrame.from_csv('test.csv').astype(np.dtype('f4'))
close('all')
clf()
ax1=subplot(2,1,1)
df[['high','low','WAP']].plot(grid=True,ax=gca())
subplot(2,1,2,sharex=ax1)
df[['count','volume']].plot(ax=gca()) |
40223202/test2-1 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/dummy/__init__.py | 693 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
#brython fix me
#import array
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
#brython fix me
#def Array(typecode, sequence, lock=True):
# return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
|
broferek/ansible | refs/heads/devel | test/units/modules/system/test_iptables.py | 18 | from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.modules.system import iptables
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
def get_bin_path(*args, **kwargs):
return "/sbin/iptables"
def get_iptables_version(iptables_path, module):
return "1.8.2"
class TestIptables(ModuleTestCase):
def setUp(self):
super(TestIptables, self).setUp()
self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path)
self.mock_get_bin_path.start()
self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone'
self.mock_get_iptables_version = patch.object(iptables, 'get_iptables_version', get_iptables_version)
self.mock_get_iptables_version.start()
self.addCleanup(self.mock_get_iptables_version.stop) # ensure that the patching is 'undone'
def test_without_required_parameters(self):
"""Failure must occurs when all parameters are missing"""
with self.assertRaises(AnsibleFailJson):
set_module_args({})
iptables.main()
def test_flush_table_without_chain(self):
"""Test flush without chain, flush the table"""
set_module_args({
'flush': True,
})
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args[0][0][0], '/sbin/iptables')
self.assertEqual(run_command.call_args[0][0][1], '-t')
self.assertEqual(run_command.call_args[0][0][2], 'filter')
self.assertEqual(run_command.call_args[0][0][3], '-F')
def test_flush_table_check_true(self):
"""Test flush without parameters and check == true"""
set_module_args({
'flush': True,
'_ansible_check_mode': True,
})
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 0)
# TODO ADD test flush table nat
# TODO ADD test flush with chain
# TODO ADD test flush with chain and table nat
def test_policy_table(self):
"""Test change policy of a chain"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
})
commands_results = [
(0, 'Chain INPUT (policy DROP)\n', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-P',
'INPUT',
'ACCEPT',
])
def test_policy_table_no_change(self):
"""Test don't change policy of a chain if the policy is right"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
})
commands_results = [
(0, 'Chain INPUT (policy ACCEPT)\n', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertFalse(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
def test_policy_table_changed_false(self):
"""Test flush without parameters and change == false"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
'_ansible_check_mode': True,
})
commands_results = [
(0, 'Chain INPUT (policy DROP)\n', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
# TODO ADD test policy without chain fail
# TODO ADD test policy with chain don't exists
# TODO ADD test policy with wrong choice fail
def test_insert_rule_change_false(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'OUTPUT',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'ACCEPT',
'action': 'insert',
'_ansible_check_mode': True,
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
def test_insert_rule(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'OUTPUT',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'ACCEPT',
'action': 'insert'
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-I',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
def test_append_rule_check_mode(self):
"""Test append a redirection rule in check mode"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'REDIRECT',
'table': 'nat',
'to_destination': '5.5.5.5/32',
'protocol': 'udp',
'destination_port': '22',
'to_ports': '8600',
'_ansible_check_mode': True,
})
commands_results = [
(1, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
def test_append_rule(self):
"""Test append a redirection rule"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'REDIRECT',
'table': 'nat',
'to_destination': '5.5.5.5/32',
'protocol': 'udp',
'destination_port': '22',
'to_ports': '8600'
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-A',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
def test_remove_rule(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'SNAT',
'table': 'nat',
'to_source': '5.5.5.5/32',
'protocol': 'udp',
'source_port': '22',
'to_ports': '8600',
'state': 'absent',
'in_interface': 'eth0',
'out_interface': 'eth1',
'comment': 'this is a comment'
})
commands_results = [
(0, '', ''),
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-D',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
def test_remove_rule_check_mode(self):
"""Test flush without parameters check mode"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'SNAT',
'table': 'nat',
'to_source': '5.5.5.5/32',
'protocol': 'udp',
'source_port': '22',
'to_ports': '8600',
'state': 'absent',
'in_interface': 'eth0',
'out_interface': 'eth1',
'comment': 'this is a comment',
'_ansible_check_mode': True,
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
def test_insert_with_reject(self):
""" Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """
set_module_args({
'chain': 'INPUT',
'protocol': 'tcp',
'reject_with': 'tcp-reset',
'ip_version': 'ipv4',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-p',
'tcp',
'-j',
'REJECT',
'--reject-with',
'tcp-reset',
])
def test_insert_jump_reject_with_reject(self):
""" Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """
set_module_args({
'chain': 'INPUT',
'protocol': 'tcp',
'jump': 'REJECT',
'reject_with': 'tcp-reset',
'ip_version': 'ipv4',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-p',
'tcp',
'-j',
'REJECT',
'--reject-with',
'tcp-reset',
])
def test_jump_tee_gateway_negative(self):
""" Missing gateway when JUMP is set to TEE """
set_module_args({
'table': 'mangle',
'chain': 'PREROUTING',
'in_interface': 'eth0',
'protocol': 'udp',
'match': 'state',
'jump': 'TEE',
'ctstate': ['NEW'],
'destination_port': '9521',
'destination': '127.0.0.1'
})
with self.assertRaises(AnsibleFailJson) as e:
iptables.main()
self.assertTrue(e.exception.args[0]['failed'])
self.assertEqual(e.exception.args[0]['msg'], 'jump is TEE but all of the following are missing: gateway')
def test_jump_tee_gateway(self):
""" Using gateway when JUMP is set to TEE """
set_module_args({
'table': 'mangle',
'chain': 'PREROUTING',
'in_interface': 'eth0',
'protocol': 'udp',
'match': 'state',
'jump': 'TEE',
'ctstate': ['NEW'],
'destination_port': '9521',
'gateway': '192.168.10.1',
'destination': '127.0.0.1'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t', 'mangle',
'-C', 'PREROUTING',
'-p', 'udp',
'-d', '127.0.0.1',
'-m', 'state',
'-j', 'TEE',
'--gateway', '192.168.10.1',
'-i', 'eth0',
'--destination-port', '9521',
'--state', 'NEW'
])
def test_tcp_flags(self):
""" Test various ways of inputting tcp_flags """
args = [
{
'chain': 'OUTPUT',
'protocol': 'tcp',
'jump': 'DROP',
'tcp_flags': 'flags=ALL flags_set="ACK,RST,SYN,FIN"'
},
{
'chain': 'OUTPUT',
'protocol': 'tcp',
'jump': 'DROP',
'tcp_flags': {
'flags': 'ALL',
'flags_set': 'ACK,RST,SYN,FIN'
}
},
{
'chain': 'OUTPUT',
'protocol': 'tcp',
'jump': 'DROP',
'tcp_flags': {
'flags': ['ALL'],
'flags_set': ['ACK', 'RST', 'SYN', 'FIN']
}
},
]
for item in args:
set_module_args(item)
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-p',
'tcp',
'--tcp-flags',
'ALL',
'ACK,RST,SYN,FIN',
'-j',
'DROP'
])
def test_log_level(self):
""" Test various ways of log level flag """
log_levels = ['0', '1', '2', '3', '4', '5', '6', '7',
'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug']
for log_lvl in log_levels:
set_module_args({
'chain': 'INPUT',
'jump': 'LOG',
'log_level': log_lvl,
'source': '1.2.3.4/32',
'log_prefix': '** DROP-this_ip **'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t', 'filter',
'-C', 'INPUT',
'-s', '1.2.3.4/32',
'-j', 'LOG',
'--log-prefix', '** DROP-this_ip **',
'--log-level', log_lvl
])
def test_iprange(self):
""" Test iprange module with its flags src_range and dst_range """
set_module_args({
'chain': 'INPUT',
'match': ['iprange'],
'src_range': '192.168.1.100-192.168.1.199',
'jump': 'ACCEPT'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-m',
'iprange',
'-j',
'ACCEPT',
'--src-range',
'192.168.1.100-192.168.1.199',
])
set_module_args({
'chain': 'INPUT',
'src_range': '192.168.1.100-192.168.1.199',
'dst_range': '10.0.0.50-10.0.0.100',
'jump': 'ACCEPT'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-j',
'ACCEPT',
'-m',
'iprange',
'--src-range',
'192.168.1.100-192.168.1.199',
'--dst-range',
'10.0.0.50-10.0.0.100'
])
set_module_args({
'chain': 'INPUT',
'dst_range': '10.0.0.50-10.0.0.100',
'jump': 'ACCEPT'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-j',
'ACCEPT',
'-m',
'iprange',
'--dst-range',
'10.0.0.50-10.0.0.100'
])
def test_insert_rule_with_wait(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'OUTPUT',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'ACCEPT',
'action': 'insert',
'wait': '10'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-w',
'10',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
|
sirMackk/ZeroNet | refs/heads/master | plugins/Newsfeed/__init__.py | 8 | import NewsfeedPlugin |
coder-james/mxnet | refs/heads/master | example/reinforcement-learning/dqn/dqn_demo.py | 15 | import mxnet as mx
import mxnet.ndarray as nd
import numpy
from base import Base
from operators import *
from atari_game import AtariGame
from utils import *
import logging
import argparse
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
mx.random.seed(100)
npy_rng = get_numpy_rng()
class DQNInitializer(mx.initializer.Xavier):
def _init_bias(self, _, arr):
arr[:] = .1
def main():
parser = argparse.ArgumentParser(description='Script to test the trained network on a game.')
parser.add_argument('-r', '--rom', required=False, type=str,
default=os.path.join('roms', 'breakout.bin'),
help='Path of the ROM File.')
parser.add_argument('-v', '--visualization', required=False, type=int, default=0,
help='Visualize the runs.')
parser.add_argument('--lr', required=False, type=float, default=0.01,
help='Learning rate of the AdaGrad optimizer')
parser.add_argument('--eps', required=False, type=float, default=0.01,
help='Eps of the AdaGrad optimizer')
parser.add_argument('--clip-gradient', required=False, type=float, default=None,
help='Clip threshold of the AdaGrad optimizer')
parser.add_argument('--double-q', required=False, type=bool, default=False,
help='Use Double DQN')
parser.add_argument('--wd', required=False, type=float, default=0.0,
help='Weight of the L2 Regularizer')
parser.add_argument('-c', '--ctx', required=False, type=str, default='gpu',
help='Running Context. E.g `-c gpu` or `-c gpu1` or `-c cpu`')
parser.add_argument('-d', '--dir-path', required=False, type=str, default='',
help='Saving directory of model files.')
parser.add_argument('--start-eps', required=False, type=float, default=1.0,
help='Eps of the epsilon-greedy policy at the beginning')
parser.add_argument('--replay-start-size', required=False, type=int, default=50000,
help='The step that the training starts')
parser.add_argument('--kvstore-update-period', required=False, type=int, default=1,
help='The period that the worker updates the parameters from the sever')
parser.add_argument('--kv-type', required=False, type=str, default=None,
help='type of kvstore, default will not use kvstore, could also be dist_async')
parser.add_argument('--optimizer', required=False, type=str, default="adagrad",
help='type of optimizer')
args = parser.parse_args()
if args.dir_path == '':
rom_name = os.path.splitext(os.path.basename(args.rom))[0]
args.dir_path = 'dqn-%s-lr%g' % (rom_name, args.lr)
replay_start_size = args.replay_start_size
max_start_nullops = 30
replay_memory_size = 1000000
history_length = 4
rows = 84
cols = 84
ctx = parse_ctx(args.ctx)
q_ctx = mx.Context(*ctx[0])
game = AtariGame(rom_path=args.rom, resize_mode='scale', replay_start_size=replay_start_size,
resized_rows=rows, resized_cols=cols, max_null_op=max_start_nullops,
replay_memory_size=replay_memory_size, display_screen=args.visualization,
history_length=history_length)
##RUN NATURE
freeze_interval = 10000
epoch_num = 200
steps_per_epoch = 250000
update_interval = 4
discount = 0.99
eps_start = args.start_eps
eps_min = 0.1
eps_decay = (eps_start - eps_min) / 1000000
eps_curr = eps_start
freeze_interval /= update_interval
minibatch_size = 32
action_num = len(game.action_set)
data_shapes = {'data': (minibatch_size, history_length) + (rows, cols),
'dqn_action': (minibatch_size,), 'dqn_reward': (minibatch_size,)}
dqn_sym = dqn_sym_nature(action_num)
qnet = Base(data_shapes=data_shapes, sym_gen=dqn_sym, name='QNet',
initializer=DQNInitializer(factor_type="in"),
ctx=q_ctx)
target_qnet = qnet.copy(name="TargetQNet", ctx=q_ctx)
use_easgd = False
optimizer = mx.optimizer.create(name=args.optimizer, learning_rate=args.lr, eps=args.eps,
clip_gradient=args.clip_gradient,
rescale_grad=1.0, wd=args.wd)
updater = mx.optimizer.get_updater(optimizer)
qnet.print_stat()
target_qnet.print_stat()
# Begin Playing Game
training_steps = 0
total_steps = 0
for epoch in range(epoch_num):
# Run Epoch
steps_left = steps_per_epoch
episode = 0
epoch_reward = 0
start = time.time()
game.start()
while steps_left > 0:
# Running New Episode
episode += 1
episode_loss = 0.0
episode_q_value = 0.0
episode_update_step = 0
episode_action_step = 0
time_episode_start = time.time()
game.begin_episode(steps_left)
while not game.episode_terminate:
# 1. We need to choose a new action based on the current game status
if game.state_enabled and game.replay_memory.sample_enabled:
do_exploration = (npy_rng.rand() < eps_curr)
eps_curr = max(eps_curr - eps_decay, eps_min)
if do_exploration:
action = npy_rng.randint(action_num)
else:
# TODO Here we can in fact play multiple gaming instances simultaneously and make actions for each
# We can simply stack the current_state() of gaming instances and give prediction for all of them
# We need to wait after calling calc_score(.), which makes the program slow
# TODO Profiling the speed of this part!
current_state = game.current_state()
state = nd.array(current_state.reshape((1,) + current_state.shape),
ctx=q_ctx) / float(255.0)
qval_npy = qnet.forward(is_train=False, data=state)[0].asnumpy()
action = numpy.argmax(qval_npy)
episode_q_value += qval_npy[0, action]
episode_action_step += 1
else:
action = npy_rng.randint(action_num)
# 2. Play the game for a single mega-step (Inside the game, the action may be repeated for several times)
game.play(action)
total_steps += 1
# 3. Update our Q network if we can start sampling from the replay memory
# Also, we update every `update_interval`
if total_steps % update_interval == 0 and game.replay_memory.sample_enabled:
# 3.1 Draw sample from the replay_memory
training_steps += 1
episode_update_step += 1
states, actions, rewards, next_states, terminate_flags \
= game.replay_memory.sample(batch_size=minibatch_size)
states = nd.array(states, ctx=q_ctx) / float(255.0)
next_states = nd.array(next_states, ctx=q_ctx) / float(255.0)
actions = nd.array(actions, ctx=q_ctx)
rewards = nd.array(rewards, ctx=q_ctx)
terminate_flags = nd.array(terminate_flags, ctx=q_ctx)
# 3.2 Use the target network to compute the scores and
# get the corresponding target rewards
if not args.double_q:
target_qval = target_qnet.forward(is_train=False, data=next_states)[0]
target_rewards = rewards + nd.choose_element_0index(target_qval,
nd.argmax_channel(target_qval))\
* (1.0 - terminate_flags) * discount
else:
target_qval = target_qnet.forward(is_train=False, data=next_states)[0]
qval = qnet.forward(is_train=False, data=next_states)[0]
target_rewards = rewards + nd.choose_element_0index(target_qval,
nd.argmax_channel(qval))\
* (1.0 - terminate_flags) * discount
outputs = qnet.forward(is_train=True,
data=states,
dqn_action=actions,
dqn_reward=target_rewards)
qnet.backward()
qnet.update(updater=updater)
# 3.3 Calculate Loss
diff = nd.abs(nd.choose_element_0index(outputs[0], actions) - target_rewards)
quadratic_part = nd.clip(diff, -1, 1)
loss = 0.5 * nd.sum(nd.square(quadratic_part)).asnumpy()[0] +\
nd.sum(diff - quadratic_part).asnumpy()[0]
episode_loss += loss
# 3.3 Update the target network every freeze_interval
if training_steps % freeze_interval == 0:
qnet.copy_params_to(target_qnet)
steps_left -= game.episode_step
time_episode_end = time.time()
# Update the statistics
epoch_reward += game.episode_reward
info_str = "Epoch:%d, Episode:%d, Steps Left:%d/%d, Reward:%f, fps:%f, Exploration:%f" \
% (epoch, episode, steps_left, steps_per_epoch, game.episode_reward,
game.episode_step / (time_episode_end - time_episode_start), eps_curr)
if episode_update_step > 0:
info_str += ", Avg Loss:%f/%d" % (episode_loss / episode_update_step,
episode_update_step)
if episode_action_step > 0:
info_str += ", Avg Q Value:%f/%d" % (episode_q_value / episode_action_step,
episode_action_step)
if episode % 100 == 0:
logging.info(info_str)
end = time.time()
fps = steps_per_epoch / (end - start)
qnet.save_params(dir_path=args.dir_path, epoch=epoch)
logging.info("Epoch:%d, FPS:%f, Avg Reward: %f/%d"
% (epoch, fps, epoch_reward / float(episode), episode))
if __name__ == '__main__':
main()
|
jhseu/tensorflow | refs/heads/master | tensorflow/python/training/session_manager_test.py | 8 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SessionManager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager
class SessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.VariableV1([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
def testPrepareSessionSucceedsWithLocalInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
sm = session_manager.SessionManager(
local_init_op=v.initializer,
local_init_feed_dict={p: [1.0, 2.0, 3.0]},
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.VariableV1([6.0, 7.0, 8.0], name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def _test_recovered_variable(self,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
# Create a new Graph and SessionManager and recover from a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
with session_lib.Session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
self._test_recovered_variable(checkpoint_dir=checkpoint_dir)
self._test_recovered_variable(
checkpoint_filename_with_path=checkpoint_management.latest_checkpoint(
checkpoint_dir))
# Cannot set both checkpoint_dir and checkpoint_filename_with_path.
with self.assertRaises(ValueError):
self._test_recovered_variable(
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_management.latest_checkpoint(
checkpoint_dir))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
def testInitWithNoneLocalInitOpError(self):
# Creating a SessionManager with a None local_init_op but
# non-None ready_for_local_init_op raises ValueError
with self.assertRaisesRegexp(
ValueError, "If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "):
session_manager.SessionManager(
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=None)
@test_util.run_v1_only("b/120545219")
def testRecoverSessionWithReadyForLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
"recover_session_ready_for_local_init")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
# We use ready_for_local_init_op=report_uninitialized_variables(),
# which causes recover_session to not run local_init_op, and to return
# initialized=False
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_to_ready_local")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionNoChkptStillRunsLocalInitOp(self):
# This test checks for backwards compatibility.
# In particular, we continue to ensure that recover_session will execute
# local_init_op exactly once, regardless of whether the session was
# successfully recovered.
with ops.Graph().as_default():
w = variables.VariableV1(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
# Try to recover session from None
sess, initialized = sm2.recover_session(
"", saver=None, checkpoint_dir=None)
# Succeeds because recover_session still run local_init_op
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionFailsStillRunsLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_stil_run")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=False)
self.assertFalse(initialized)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionLocalInit(self):
server = server_lib.Server.create_local_server()
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
# Initialize v but not w
s = session_lib.Session(server.target, graph=graph)
s.run(v.initializer)
sess = sm.wait_for_session(server.target, max_wait_secs=3)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testWaitForSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
with self.assertRaises(errors_impl.DeadlineExceededError):
# Time-out because w fails to be initialized,
# because of overly restrictive ready_for_local_init_op
sm.wait_for_session("", max_wait_secs=3)
@test_util.run_v1_only("b/120545219")
def testWaitForSessionInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Session was not ready after waiting.*"):
sm.wait_for_session("", max_wait_secs=3)
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithReadyForLocalInitOp(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
x = variables.VariableV1(
3 * v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
self.assertEqual(False, variables.is_variable_initialized(x).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=[w.initializer, x.initializer])
sess = sm2.prepare_session("", init_op=v.initializer)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
self.assertEquals(3, sess.run(x))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithPartialInitOp(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
x = variables.VariableV1(
3 * v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x")
# TODO(b/70206927): Use ResourceVariables once they are handled properly.
v_res = variables.VariableV1(1, name="v_res")
w_res = variables.VariableV1(
v_res,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w_res")
x_res = variables.VariableV1(
3 * v_res,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x_res")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
self.assertEqual(False, variables.is_variable_initialized(x).eval())
self.assertEqual(False, variables.is_variable_initialized(v_res).eval())
self.assertEqual(False, variables.is_variable_initialized(w_res).eval())
self.assertEqual(False, variables.is_variable_initialized(x_res).eval())
sm2 = session_manager.SessionManager(local_init_op=[
w.initializer, x.initializer, w_res.initializer, x_res.initializer
])
sess = sm2.prepare_session("", init_op=None)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
self.assertEquals(3, sess.run(x))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v_res:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w_res:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x_res:0")).eval(session=sess))
self.assertEquals(1, sess.run(w_res))
self.assertEquals(3, sess.run(x_res))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithCyclicInitializer(self):
# Regression test. Previously Variable._build_initializer_expr would enter
# into an infinite recursion when the variable's initial_value involved
# cyclic dependencies.
with ops.Graph().as_default():
i = control_flow_ops.while_loop(lambda i: i < 1, lambda i: i + 1, [0])
v = variables.VariableV1(array_ops.identity(i), name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session("", init_op=v.initializer)
self.assertEqual(1, sess.run(v))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def testPrepareSessionDidNotInitLocalVariable(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(
RuntimeError, "Init operations did not make model ready.*"):
sm2.prepare_session("", init_op=v.initializer)
def testPrepareSessionDidNotInitLocalVariableList(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready"):
sm2.prepare_session("", init_op=[v.initializer])
def testPrepareSessionWithReadyNotReadyForLocal(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
with self.assertRaisesRegexp(
RuntimeError,
"Init operations did not make model ready for local_init"):
sm2.prepare_session("", init_op=None)
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready.*"):
sm2.prepare_session("", init_op=None)
class ObsoleteSessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.VariableV1([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.VariableV1([6.0, 7.0, 8.0], name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
@test_util.run_v1_only("b/120545219")
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
if __name__ == "__main__":
test.main()
|
0x000000FF/yocto-edison-meta | refs/heads/master | meta-intel-edison-distro/recipes-mostfun/avr-isp/files/avr_isp/intelHex.py | 3 | """
Module to read intel hex files into binary data blobs.
IntelHex files are commonly used to distribute firmware
See: http://en.wikipedia.org/wiki/Intel_HEX
"""
__copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License"
import io
def readHex(filename):
"""
Read an verify an intel hex file. Return the data as an list of bytes.
"""
data = []
extraAddr = 0
f = io.open(filename, "r")
for line in f:
line = line.strip()
if len(line) < 1:
continue
if line[0] != ':':
raise Exception("Hex file has a line not starting with ':'")
recLen = int(line[1:3], 16)
addr = int(line[3:7], 16) + extraAddr
recType = int(line[7:9], 16)
if len(line) != recLen * 2 + 11:
raise Exception("Error in hex file: " + line)
checkSum = 0
for i in xrange(0, recLen + 5):
checkSum += int(line[i*2+1:i*2+3], 16)
checkSum &= 0xFF
if checkSum != 0:
raise Exception("Checksum error in hex file: " + line)
if recType == 0:#Data record
while len(data) < addr + recLen:
data.append(0)
for i in xrange(0, recLen):
data[addr + i] = int(line[i*2+9:i*2+11], 16)
elif recType == 1: #End Of File record
pass
elif recType == 2: #Extended Segment Address Record
extraAddr = int(line[9:13], 16) * 16
else:
print(recType, recLen, addr, checkSum, line)
f.close()
return data
|
yoki/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/commit.py | 124 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import sys
from webkitpy.common.checkout.scm import AuthenticationError, AmbiguousCommitError
from webkitpy.common.config import urls
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.user import User
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class Commit(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
]
def _commit_warning(self, error):
return ('There are %s local commits (and possibly changes in the working directory. '
'Everything will be committed as a single commit. '
'To avoid this prompt, set "git config webkit-patch.commit-should-always-squash true".' % (
error.num_local_commits))
def _check_test_expectations(self, changed_files):
test_expectations_files = [filename for filename in changed_files if filename.endswith('TestExpectations')]
if not test_expectations_files:
return
args = ["--diff-files"]
args.extend(test_expectations_files)
try:
self._tool.executive.run_and_throw_if_fail(self._tool.deprecated_port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
if self._options.non_interactive:
raise
if not self._tool.user.confirm("Are you sure you want to continue?", default="n"):
self._exit(1)
def run(self, state):
self._commit_message = self._tool.checkout().commit_message_for_this_commit(self._options.git_commit).message()
if len(self._commit_message) < 10:
raise Exception("Attempted to commit with a commit message shorter than 10 characters. Either your patch is missing a ChangeLog or webkit-patch may have a bug.")
self._check_test_expectations(self._changed_files(state))
self._state = state
username = None
password = None
force_squash = self._options.non_interactive
num_tries = 0
while num_tries < 3:
num_tries += 1
try:
scm = self._tool.scm()
commit_text = scm.commit_with_message(self._commit_message, git_commit=self._options.git_commit, username=username, password=password, force_squash=force_squash, changed_files=self._changed_files(state))
svn_revision = scm.svn_revision_from_commit_text(commit_text)
_log.info("Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision)))
self._state["commit_text"] = commit_text
break;
except AmbiguousCommitError, e:
if self._tool.user.confirm(self._commit_warning(e)):
force_squash = True
else:
# This will correctly interrupt the rest of the commit process.
raise ScriptError(message="Did not commit")
except AuthenticationError, e:
if self._options.non_interactive:
raise ScriptError(message="Authentication required")
username = self._tool.user.prompt("%s login: " % e.server_host, repeat=5)
if not username:
raise ScriptError("You need to specify the username on %s to perform the commit as." % e.server_host)
if e.prompt_for_password:
password = self._tool.user.prompt_password("%s password for %s: " % (e.server_host, username), repeat=5)
if not password:
raise ScriptError("You need to specify the password for %s on %s to perform the commit." % (username, e.server_host))
|
fernandezcuesta/ansible | refs/heads/devel | lib/ansible/modules/network/illumos/ipadm_addrprop.py | 8 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipadm_addrprop
short_description: Manage IP address properties on Solaris/illumos systems.
description:
- Modify IP address properties on Solaris/illumos systems.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
addrobj:
description:
- Specifies the address object we want to manage.
required: true
aliases: [nic, interface]
property:
description:
- Specifies the name of the address property we want to manage.
required: true
aliases: [name]
value:
description:
- Specifies the value we want to set for the address property.
required: false
temporary:
description:
- Specifies that the address property value is temporary.
Temporary values do not persist across reboots.
required: false
default: false
state:
description:
- Set or reset the property value.
required: false
default: present
choices: [ "present", "absent", "reset" ]
'''
EXAMPLES = '''
name: Mark address on addrobj as deprecated
ipadm_addrprop: property=deprecated value=on addrobj=e1000g0/v6
name: Set network prefix length for addrobj
ipadm_addrprop: addrobj=bge0/v4 name=prefixlen value=26
'''
RETURN = '''
property:
description: property name
returned: always
type: string
sample: deprecated
addrobj:
description: address object name
returned: always
type: string
sample: bge0/v4
state:
description: state of the target
returned: always
type: string
sample: present
temporary:
description: specifies if operation will persist across reboots
returned: always
type: boolean
sample: True
value:
description: property value
returned: when value is provided
type: string
sample: 26
'''
from ansible.module_utils.basic import AnsibleModule
class AddrProp(object):
def __init__(self, module):
self.module = module
self.addrobj = module.params['addrobj']
self.property = module.params['property']
self.value = module.params['value']
self.temporary = module.params['temporary']
self.state = module.params['state']
def property_exists(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-addrprop')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.addrobj)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
self.module.fail_json(msg='Unknown property "%s" on addrobj %s' %
(self.property, self.addrobj),
property=self.property,
addrobj=self.addrobj)
def property_is_modified(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-addrprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('current,default')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.addrobj)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
(value, default) = out.split(':')
if rc == 0 and value == default:
return True
else:
return False
def property_is_set(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-addrprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('current')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.addrobj)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and self.value == out:
return True
else:
return False
def set_property(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('set-addrprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property + '=' + self.value)
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def reset_property(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('reset-addrprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
addrobj=dict(required=True, default=None, aliases=['nic, interface']),
property=dict(required=True, aliases=['name']),
value=dict(required=False),
temporary=dict(default=False, type='bool'),
state=dict(
default='present', choices=['absent', 'present', 'reset']),
),
supports_check_mode=True
)
addrprop = AddrProp(module)
rc = None
out = ''
err = ''
result = {}
result['property'] = addrprop.property
result['addrobj'] = addrprop.addrobj
result['state'] = addrprop.state
result['temporary'] = addrprop.temporary
if addrprop.value:
result['value'] = addrprop.value
if addrprop.state == 'absent' or addrprop.state == 'reset':
if addrprop.property_exists():
if not addrprop.property_is_modified():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addrprop.reset_property()
if rc != 0:
module.fail_json(property=addrprop.property,
addrobj=addrprop.addrobj,
msg=err,
rc=rc)
elif addrprop.state == 'present':
if addrprop.value is None:
module.fail_json(msg='Value is mandatory with state "present"')
if addrprop.property_exists():
if not addrprop.property_is_set():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addrprop.set_property()
if rc != 0:
module.fail_json(property=addrprop.property,
addrobj=addrprop.addrobj,
msg=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
|
t3dev/odoo | refs/heads/master | odoo/addons/test_pylint/tests/__init__.py | 20 | from . import test_pylint
|
rubikloud/scikit-learn | refs/heads/0.17.1-RUBIKLOUD | sklearn/metrics/cluster/bicluster.py | 359 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
|
simo-tuomisto/portfolio | refs/heads/master | Statistical Methods 2014 - Home exam/Code/exam_p02.py | 1 | import numpy as np
import string
import random
import matplotlib.pyplot as mpl
class Passenger:
def __init__(self, rowNumber, seatLetter):
self.rowNumber = float(rowNumber)
self.seatLetter = seatLetter
self.hasWaited = False
def checkPosition(self, position):
return position == self.rowNumber
def __repr__(self):
return 'Passenger %d%s' % (int(self.rowNumber), self.seatLetter)
class Plane:
def __init__(self, rows, aisleTime=20, seatTime=0, walkSpeed=0.5):
self.rows = rows
self.aisle = dict()
self.seated = dict()
self.aisleTime = aisleTime
self.seatTime = seatTime
self.walkSpeed = walkSpeed
self.positions = np.arange(-15.0,rows+walkSpeed,walkSpeed)[::-1]
self.wait = np.zeros_like(self.positions)
self.time = 0
self.times = np.zeros(rows)
def movePassengers(self):
for index in np.arange(0,self.positions.shape[0]):
position = self.positions[index]
if self.wait[index] > 0:
self.wait[index] -= 1
if self.wait[index] > 0:
continue
if position in self.aisle:
passenger = self.aisle[position]
if passenger.checkPosition(position):
if not passenger.hasWaited:
if position not in self.seated:
self.seated[position] = []
self.seated[position].append(passenger.seatLetter)
self.wait[index] = self.aisleTime
if (passenger.seatLetter == 'A' and 'C' in self.seated[position]) or (passenger.seatLetter == 'F' and 'D' in self.seated[position]):
self.wait[index] += self.seatTime
passenger.hasWaited = True
else:
self.times[position-1] = self.times[position-1] + self.time - passenger.time
del self.aisle[position]
elif position+self.walkSpeed not in self.aisle:
self.aisle[position+self.walkSpeed] = self.aisle.pop(position)
self.time += 1
def addPassenger(self, passenger):
if self.positions[-1] not in self.aisle:
passenger.time = self.time
self.aisle[self.positions[-1]] = passenger
return True
else:
return False
class Simulator:
def __init__(self, rows=25, aisleTime=10, seatTime=0):
self.rows = rows
self.aisleTime = aisleTime
self.seatTime = seatTime
def getPassengers(self):
passengers = dict()
for seating in ['A','C','D','F']:
passlist = []
for row in np.arange(1,self.rows+1):
passlist.append(Passenger(row, seating))
passengers[seating] = passlist
return passengers
def simulateOutsideIn(self, verbose=0):
passengers = self.getPassengers()
passengers1 = passengers['A'] + passengers['F']
passengers2 = passengers['C'] + passengers['D']
passengers = random.sample(passengers1,2*self.rows) + random.sample(passengers2,2*self.rows)
return self.simulate(passengers,verbose)
def simulateRandom(self, verbose=0):
passengers = self.getPassengers()
passengers = passengers['A'] + passengers['C'] + passengers['D'] + passengers['F']
passengers = random.sample(passengers,4*self.rows)
return self.simulate(passengers,verbose)
def simulate(self,passengers,verbose):
plane = Plane(self.rows, self.aisleTime, self.seatTime)
for passenger in passengers:
added = False
while not added:
added = plane.addPassenger(passenger)
if verbose >= 2:
self.printSituation(plane)
plane.movePassengers()
while len(plane.aisle) > 0:
if verbose >= 2:
self.printSituation(plane)
plane.movePassengers()
if verbose >= 2:
self.printSituation(plane)
if verbose >= 1:
print plane.seated
#self.printSituation(plane)
return plane.times, plane.time
def printSituation(self, plane):
pos = plane.positions
aisle = []
for key in sorted(plane.aisle.keys()):
aisle.append('%s : %s' % (str(key) , repr(plane.aisle[key])))
aisle = '{ ' + string.join(aisle,' , ') + ' }'
wait = []
for position, waittime in zip(pos[plane.wait > 0], plane.wait[plane.wait > 0]):
wait.append('%s : %s' % (str(position) , str(waittime)))
wait = '{ ' + string.join(wait,' , ') + ' }'
print plane.time,aisle,wait,plane.seated
if __name__=="__main__":
sim = Simulator(rows=25,seatTime=10,aisleTime=20)
nsamples = 50
# Times contains individual passenger times
times1 = []
# Time contains overall time taken
time1 = []
for i in range(1,nsamples+1):
print 'Simulating Outside-In %d' % i
times,time = sim.simulateOutsideIn(verbose=0)
times1.append(times)
time1.append(time)
times1 = np.asfarray(times1)
times_avg1 = np.average(times1,axis=0)
time_avg1 = np.average(time1,axis=0)
time_std1 = np.std(time1,axis=0)
times2 = []
time2 = []
for i in range(1,nsamples+1):
print 'Simulating Random %d' % i
times,time = sim.simulateRandom(verbose=0)
times2.append(times)
time2.append(time)
times2 = np.asfarray(times2)
times_avg2 = np.average(times2,axis=0)
time_avg2 = np.average(time2,axis=0)
time_std2 = np.std(time2,axis=0)
print 'Outside-In boarding:'
print 'Average time taken:',time_avg1,' Std:',time_std1
print 'Random boarding:'
print 'Average time taken:',time_avg2,' Std:',time_std2
bins = np.linspace(np.amin(time1),np.amax(time2),25)
timehist1 = np.histogram(time1, bins=bins)[0]
timehist2 = np.histogram(time2, bins=bins)[0]
mpl.figure(facecolor='white', figsize=(12,9))
mpl.plot(bins[:-1], timehist1,'b^',label='Outside-In boarding')
mpl.plot(bins[:-1], timehist1,'b-')
mpl.plot(bins[:-1], timehist2,'r^',label='Random boarding')
mpl.plot(bins[:-1], timehist2,'r-')
mpl.xlabel('t')
mpl.ylabel('N')
mpl.legend(loc=4)
mpl.savefig('p02_distributions.pdf')
#mpl.show()
|
ilsindia/php-cf-buildtest | refs/heads/master | lib/yaml/dumper.py | 543 |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from emitter import *
from serializer import *
from representer import *
from resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
|
MonicaHsu/truvaluation | refs/heads/master | venv/lib/python2.7/site-packages/werkzeug/datastructures.py | 314 | # -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import codecs
import mimetypes
from itertools import repeat
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setmethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(k, v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def keys(self):
rv = set()
for d in self.dicts:
rv.update(d.keys())
return iter(rv)
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self.keys())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
|
marc-sensenich/ansible | refs/heads/devel | test/units/plugins/test_plugins.py | 31 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from units.compat import unittest
from units.compat.builtins import BUILTINS
from units.compat.mock import mock_open, patch, MagicMock
from ansible.plugins.loader import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, PluginLoader
class TestErrors(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch.object(PluginLoader, '_get_paths')
def test_print_paths(self, mock_method):
mock_method.return_value = ['/path/one', '/path/two', '/path/three']
pl = PluginLoader('foo', 'foo', '', 'test_plugins')
paths = pl.print_paths()
expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three'])
self.assertEqual(paths, expected_paths)
def test_plugins__get_package_paths_no_package(self):
pl = PluginLoader('test', '', 'test', 'test_plugin')
self.assertEqual(pl._get_package_paths(), [])
def test_plugins__get_package_paths_with_package(self):
# the _get_package_paths() call uses __import__ to load a
# python library, and then uses the __file__ attribute of
# the result for that to get the library path, so we mock
# that here and patch the builtin to use our mocked result
foo = MagicMock()
bar = MagicMock()
bam = MagicMock()
bam.__file__ = '/path/to/my/foo/bar/bam/__init__.py'
bar.bam = bam
foo.return_value.bar = bar
pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin')
with patch('{0}.__import__'.format(BUILTINS), foo):
self.assertEqual(pl._get_package_paths(), ['/path/to/my/foo/bar/bam'])
def test_plugins__get_paths(self):
pl = PluginLoader('test', '', 'test', 'test_plugin')
pl._paths = ['/path/one', '/path/two']
self.assertEqual(pl._get_paths(), ['/path/one', '/path/two'])
# NOT YET WORKING
# def fake_glob(path):
# if path == 'test/*':
# return ['test/foo', 'test/bar', 'test/bam']
# elif path == 'test/*/*'
# m._paths = None
# mock_glob = MagicMock()
# mock_glob.return_value = []
# with patch('glob.glob', mock_glob):
# pass
def assertPluginLoaderConfigBecomes(self, arg, expected):
pl = PluginLoader('test', '', arg, 'test_plugin')
self.assertEqual(pl.config, expected)
def test_plugin__init_config_list(self):
config = ['/one', '/two']
self.assertPluginLoaderConfigBecomes(config, config)
def test_plugin__init_config_str(self):
self.assertPluginLoaderConfigBecomes('test', ['test'])
def test_plugin__init_config_none(self):
self.assertPluginLoaderConfigBecomes(None, [])
def test__load_module_source_no_duplicate_names(self):
'''
This test simulates importing 2 plugins with the same name,
and validating that the import is short circuited if a file with the same name
has already been imported
'''
fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
pl = PluginLoader('test', '', 'test', 'test_plugin')
one = pl._load_module_source('import_fixture', os.path.join(fixture_path, 'import_fixture.py'))
# This line wouldn't even succeed if we didn't short circuit on finding a duplicate name
two = pl._load_module_source('import_fixture', '/path/to/import_fixture.py')
self.assertEqual(one, two)
@patch('ansible.plugins.loader.glob')
@patch.object(PluginLoader, '_get_paths')
def test_all_no_duplicate_names(self, gp_mock, glob_mock):
'''
This test goes along with ``test__load_module_source_no_duplicate_names``
and ensures that we ignore duplicate imports on multiple paths
'''
fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
gp_mock.return_value = [
fixture_path,
'/path/to'
]
glob_mock.glob.side_effect = [
[os.path.join(fixture_path, 'import_fixture.py')],
['/path/to/import_fixture.py']
]
pl = PluginLoader('test', '', 'test', 'test_plugin')
# Aside from needing ``list()`` so we can do a len, ``PluginLoader.all`` returns a generator
# so ``list()`` actually causes ``PluginLoader.all`` to run.
plugins = list(pl.all())
self.assertEqual(len(plugins), 1)
self.assertIn(os.path.join(fixture_path, 'import_fixture.py'), pl._module_cache)
self.assertNotIn('/path/to/import_fixture.py', pl._module_cache)
|
asnorkin/sentiment_analysis | refs/heads/master | site/lib/python2.7/site-packages/numpy/lib/index_tricks.py | 66 | from __future__ import division, absolute_import, print_function
import sys
import math
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, ScalarType, array, alltrue, cumprod, arange
)
from numpy.core.numerictypes import find_common_type, issubdtype
from . import function_base
import numpy.matrixlib as matrix
from .function_base import diff
from numpy.core.multiarray import ravel_multi_index, unravel_index
from numpy.lib.stride_tricks import as_strided
makemat = matrix.matrix
__all__ = [
'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
'diag_indices', 'diag_indices_from'
]
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
for k, new in enumerate(args):
new = asarray(new)
if new.ndim != 1:
raise ValueError("Cross index must be 1 dimensional")
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
if issubdtype(new.dtype, _nx.bool_):
new, = new.nonzero()
new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
out.append(new)
return tuple(out)
class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1.
Parameters
----------
sparse : bool, optional
Whether the grid is sparse or not. Default is False.
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
`mgrid` and `ogrid`::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
Examples
--------
>>> mgrid = np.lib.index_tricks.nd_grid()
>>> mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(
int(math.ceil((key[k].stop - start)/(step*1.0))))
if (isinstance(step, float) or
isinstance(start, float) or
isinstance(key[k].stop, float)):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
nn[k] = nn[k][slobj]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None:
start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop + step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
def __getslice__(self, i, j):
return _nx.arange(i, j)
def __len__(self):
return 0
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
mgrid.__doc__ = None # set in numpy.add_newdocs
ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
"""
Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see `r_`.
"""
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
res = makemat(res)
if oldndim == 1 and self.col:
res = res.T
self.axis = self._axis
self.matrix = self._matrix
self.col = 0
return res
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self._axis = axis
self._matrix = matrix
self.axis = axis
self.matrix = matrix
self.col = 0
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(key[k], str):
if k != 0:
raise ValueError("special directives must be the "
"first entry.")
key0 = key[0]
if key0 in 'rc':
self.matrix = True
self.col = (key0 == 'c')
continue
if ',' in key0:
vec = key0.split(',')
try:
self.axis, ndmin = \
[int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(key[k]) in ScalarType:
newobj = array(key[k], ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True,
ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin-tempobj.ndim
if (trans1d < 0):
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = _nx.concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
def __getslice__(self, i, j):
res = _nx.arange(i, j)
return self._retval(res)
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatentor(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
class RClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly. There are two use cases.
1. If the index expression contains comma separated arrays, then stack
them along their first axis.
2. If the index expression contains slice notation or scalars then create
a 1-D array with a range indicated by the slice notation.
If slice notation is used, the syntax ``start:stop:step`` is equivalent
to ``np.arange(start, stop, step)`` inside of the brackets. However, if
``step`` is an imaginary number (i.e. 100j) then its integer portion is
interpreted as a number-of-points desired and the start and stop are
inclusive. In other words ``start:stop:stepj`` is interpreted as
``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
After expansion of slice notation, all comma separated sequences are
concatenated together.
Optional character strings placed as the first element of the index
expression can be used to change the output. The strings 'r' or 'c' result
in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
(column) matrix is produced. If the result is 2-D then both provide the
same matrix result.
A string integer specifies which axis to stack multiple comma separated
arrays along. A string of two comma-separated integers allows indication
of the minimum number of dimensions to force each entry into as the
second integer (the axis to concatenate along is still the first integer).
A string with three comma-separated integers allows specification of the
axis to concatenate along, the minimum number of dimensions to force the
entries to, and which axis should contain the start of the arrays which
are less than the specified number of dimensions. In other words the third
integer allows you to specify where the 1's should be placed in the shape
of the arrays that have their shapes upgraded. By default, they are placed
in the front of the shape tuple. The third argument allows you to specify
where the start of the array should be instead. Thus, a third argument of
'0' would place the 1's at the end of the array shape. Negative integers
specify where in the new shape tuple the last dimension of upgraded arrays
should be placed, so the default is '-1'.
Parameters
----------
Not a function, so takes no parameters
Returns
-------
A concatenated ndarray or matrix.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
c_ : Translates slice objects to concatenation along the second axis.
Examples
--------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
String integers specify the axis to concatenate along or the minimum
number of dimensions to force entries into.
>>> a = np.array([[0, 1, 2], [3, 4, 5]])
>>> np.r_['-1', a, a] # concatenate along last axis
array([[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5]])
>>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
array([[1, 2, 3],
[4, 5, 6]])
>>> np.r_['0,2,0', [1,2,3], [4,5,6]]
array([[1],
[2],
[3],
[4],
[5],
[6]])
>>> np.r_['1,2,0', [1,2,3], [4,5,6]]
array([[1, 4],
[2, 5],
[3, 6]])
Using 'r' or 'c' as a first string argument creates a matrix.
>>> np.r_['r',[1,2,3], [4,5,6]]
matrix([[1, 2, 3, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
class CClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the second axis.
This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
For detailed documentation, see `r_`.
Examples
--------
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
array([[1, 2, 3, 0, 0, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
c_ = CClass()
class ndenumerate(object):
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values.
Parameters
----------
arr : ndarray
Input array.
See Also
--------
ndindex, flatiter
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print(index, x)
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def __init__(self, arr):
self.iter = asarray(arr).flat
def __next__(self):
"""
Standard iterator method, returns the index tuple and array value.
Returns
-------
coords : tuple of ints
The indices of the current iteration.
val : scalar
The array element of the current iteration.
"""
return self.iter.coords, next(self.iter)
def __iter__(self):
return self
next = __next__
class ndindex(object):
"""
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
the N-dimensional index of the array. At each iteration a tuple
of indices is returned, the last dimension is iterated over first.
Parameters
----------
`*args` : ints
The size of each dimension of the array.
See Also
--------
ndenumerate, flatiter
Examples
--------
>>> for index in np.ndindex(3, 2, 1):
... print(index)
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
x = as_strided(_nx.zeros(1), shape=shape,
strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
order='C')
def __iter__(self):
return self
def ndincr(self):
"""
Increment the multi-dimensional index by one.
This method is for backward compatibility only: do not use.
"""
next(self)
def __next__(self):
"""
Standard iterator method, updates the index and returns the index
tuple.
Returns
-------
val : tuple of ints
Returns a tuple containing the indices of the current
iteration.
"""
next(self._it)
return self._it.multi_index
next = __next__
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <[email protected]>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
class IndexExpression(object):
"""
A nicer way to build up index tuples for arrays.
.. note::
Use one of the two predefined instances `index_exp` or `s_`
rather than directly using `IndexExpression`.
For any index combination, including slicing and axis insertion,
``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
array `a`. However, ``np.index_exp[indices]`` can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
Parameters
----------
maketuple : bool
If True, always returns a tuple.
See Also
--------
index_exp : Predefined instance that always returns a tuple:
`index_exp = IndexExpression(maketuple=True)`.
s_ : Predefined instance without tuple conversion:
`s_ = IndexExpression(maketuple=False)`.
Notes
-----
You can do all this with `slice()` plus a few special objects,
but there's a lot to remember and this version is simpler because
it uses the standard array indexing syntax.
Examples
--------
>>> np.s_[2::2]
slice(2, None, 2)
>>> np.index_exp[2::2]
(slice(2, None, 2),)
>>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
array([2, 4])
"""
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affects only tall matrices.
See also
--------
diag_indices, diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
This functionality can be obtained via `diag_indices`, but internally
this version uses a much faster implementation that never constructs the
indices and uses simple slicing.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
>>> # tall matrices wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
>>> # wide matrices
>>> a = np.zeros((3, 5),int)
>>> fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
end = None
if a.ndim == 2:
# Explicit, fast formula for the common case. For 2-d arrays, we
# accept rectangular ones.
step = a.shape[1] + 1
#This is needed to don't have tall matrix have the diagonal wrap.
if not wrap:
end = a.shape[1] * a.shape[1]
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
(n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
for ``i = [0..n-1]``.
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned
indices can be used.
ndim : int, optional
The number of dimensions.
See also
--------
diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = np.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Now, we create indices to manipulate a 3-D array:
>>> d3 = np.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = np.zeros((2, 2, 2), dtype=np.int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]])
"""
idx = arange(n)
return (idx,) * ndim
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
|
frenchfrywpepper/ansible-modules-extras | refs/heads/devel | cloud/vmware/vca_nat.py | 25 | #!/usr/bin/python
# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vca_nat
short_description: add remove nat rules in a gateway in a vca
description:
- Adds or removes nat rules from a gateway in a vca environment
version_added: "2.0"
author: Peter Sprygada (@privateip)
options:
purge_rules:
description:
- If set to true, it will delete all rules in the gateway that are not given as paramter to this module.
required: false
default: false
nat_rules:
description:
- A list of rules to be added to the gateway, Please see examples on valid entries
required: True
default: false
extends_documentation_fragment: vca.documentation
'''
EXAMPLES = '''
#An example for a source nat
- hosts: localhost
connection: local
tasks:
- vca_nat:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'present'
nat_rules:
- rule_type: SNAT
original_ip: 192.168.2.10
translated_ip: 107.189.95.208
#example for a DNAT
- hosts: localhost
connection: local
tasks:
- vca_nat:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'present'
nat_rules:
- rule_type: DNAT
original_ip: 107.189.95.208
original_port: 22
translated_ip: 192.168.2.10
translated_port: 22
'''
import time
import xmltodict
VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port',
'translated_ip', 'translated_port', 'protocol']
def validate_nat_rules(nat_rules):
for rule in nat_rules:
if not isinstance(rule, dict):
raise VcaError("nat rules must be a list of dictionaries, "
"Please check", valid_keys=VALID_RULE_KEYS)
for k in rule.keys():
if k not in VALID_RULE_KEYS:
raise VcaError("%s is not a valid key in nat rules, please "
"check above.." % k, valid_keys=VALID_RULE_KEYS)
rule['original_port'] = str(rule.get('original_port', 'any')).lower()
rule['original_ip'] = rule.get('original_ip', 'any').lower()
rule['translated_ip'] = rule.get('translated_ip', 'any').lower()
rule['translated_port'] = str(rule.get('translated_port', 'any')).lower()
rule['protocol'] = rule.get('protocol', 'any').lower()
rule['rule_type'] = rule.get('rule_type', 'DNAT').lower()
return nat_rules
def nat_rules_to_dict(nat_rules):
result = []
for rule in nat_rules:
gw_rule = rule.get_GatewayNatRule()
result.append(
dict(
rule_type=rule.get_RuleType().lower(),
original_ip=gw_rule.get_OriginalIp().lower(),
original_port=(gw_rule.get_OriginalPort().lower() or 'any'),
translated_ip=gw_rule.get_TranslatedIp().lower(),
translated_port=(gw_rule.get_TranslatedPort().lower() or 'any'),
protocol=(gw_rule.get_Protocol().lower() or 'any')
)
)
return result
def rule_to_string(rule):
strings = list()
for key, value in rule.items():
strings.append('%s=%s' % (key, value))
return ', '.join(string)
def main():
argument_spec = vca_argument_spec()
argument_spec.update(
dict(
nat_rules = dict(type='list', default=[]),
gateway_name = dict(default='gateway'),
purge_rules = dict(default=False, type='bool'),
state = dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
vdc_name = module.params.get('vdc_name')
state = module.params['state']
nat_rules = module.params['nat_rules']
gateway_name = module.params['gateway_name']
purge_rules = module.params['purge_rules']
if not purge_rules and not nat_rules:
module.fail_json(msg='Must define purge_rules or nat_rules')
vca = vca_login(module)
gateway = vca.get_gateway(vdc_name, gateway_name)
if not gateway:
module.fail_json(msg="Not able to find the gateway %s, please check "
"the gateway_name param" % gateway_name)
try:
desired_rules = validate_nat_rules(nat_rules)
except VcaError, e:
module.fail_json(msg=e.message)
rules = gateway.get_nat_rules()
result = dict(changed=False, rules_purged=0)
deletions = 0
additions = 0
if purge_rules is True and len(rules) > 0:
result['rules_purged'] = len(rules)
deletions = result['rules_purged']
rules = list()
if not module.check_mode:
gateway.del_all_nat_rules()
task = gateway.save_services_configuration()
vca.block_until_completed(task)
rules = gateway.get_nat_rules()
result['changed'] = True
current_rules = nat_rules_to_dict(rules)
result['current_rules'] = current_rules
result['desired_rules'] = desired_rules
for rule in desired_rules:
if rule not in current_rules:
additions += 1
if not module.check_mode:
gateway.add_nat_rule(**rule)
result['changed'] = True
result['rules_added'] = additions
result['delete_rule'] = list()
result['delete_rule_rc'] = list()
for rule in current_rules:
if rule not in desired_rules:
deletions += 1
if not module.check_mode:
result['delete_rule'].append(rule)
rc = gateway.del_nat_rule(**rule)
result['delete_rule_rc'].append(rc)
result['changed'] = True
result['rules_deleted'] = deletions
if not module.check_mode and (additions > 0 or deletions > 0):
task = gateway.save_services_configuration()
vca.block_until_completed(task)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.vca import *
if __name__ == '__main__':
main()
|
jwkozel/demobx | refs/heads/master | test/unit/object/test_item.py | 2 | # coding: utf-8
from __future__ import unicode_literals
import json
import pytest
@pytest.fixture(params=('file', 'folder'))
def test_item_and_response(test_file, test_folder, mock_file_response, mock_folder_response, request):
if request.param == 'file':
return test_file, mock_file_response
elif request.param == 'folder':
return test_folder, mock_folder_response
def test_update_info(test_item_and_response, mock_box_session, etag, if_match_header):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value = mock_item_response
data = {'foo': 'bar', 'baz': {'foo': 'bar'}, 'num': 4}
update_response = test_item.update_info(data, etag=etag)
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(data), headers=if_match_header, params=None)
assert isinstance(update_response, test_item.__class__)
assert update_response.object_id == test_item.object_id
def test_rename_item(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value = mock_item_response
rename_response = test_item.rename('new name')
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps({'name': 'new name'}), params=None, headers=None)
assert isinstance(rename_response, test_item.__class__)
def test_copy_item(test_item_and_response, mock_box_session, test_folder, mock_object_id):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url('copy')
mock_box_session.post.return_value = mock_item_response
copy_response = test_item.copy(test_folder)
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps({'parent': {'id': mock_object_id}}))
assert isinstance(copy_response, test_item.__class__)
def test_move_item(test_item_and_response, mock_box_session, test_folder, mock_object_id):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value = mock_item_response
move_response = test_item.move(test_folder)
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps({'parent': {'id': mock_object_id}}), params=None, headers=None)
assert isinstance(move_response, test_item.__class__)
@pytest.mark.parametrize('access,expected_access_data', [(None, {}), ('open', {'access': 'open'})])
def test_get_shared_link(
test_item_and_response,
mock_box_session,
access,
expected_access_data,
test_url,
etag,
if_match_header,
):
# pylint:disable=redefined-outer-name, protected-access
test_item, _ = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value.json.return_value = {'shared_link': {'url': test_url}}
url = test_item.get_shared_link(access, etag=etag)
mock_box_session.put.assert_called_once_with(
expected_url,
data=json.dumps({'shared_link': expected_access_data}),
headers=if_match_header,
params=None,
)
assert url == test_url
def test_remove_shared_link(test_item_and_response, mock_box_session, etag, if_match_header):
# pylint:disable=redefined-outer-name, protected-access
test_item, _ = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value.json.return_value = {'shared_link': {'url': None}}
url = test_item.remove_shared_link(etag=etag)
mock_box_session.put.assert_called_once_with(
expected_url,
data=json.dumps({'shared_link': None}),
headers=if_match_header,
params=None,
)
assert url is None
@pytest.mark.parametrize('fields', (None, ['name', 'created_at']))
def test_get(test_item_and_response, mock_box_session, fields, mock_object_id, etag, if_none_match_header):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.get.return_value = mock_item_response
expected_params = {'fields': ','.join(fields)} if fields else None
info = test_item.get(fields, etag=etag)
mock_box_session.get.assert_called_once_with(expected_url, params=expected_params, headers=if_none_match_header)
assert isinstance(info, test_item.__class__)
assert info.id == mock_object_id
|
chemelnucfin/tensorflow | refs/heads/master | tensorflow/compiler/tests/scatter_nd_op_test.py | 13 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(
[functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)] +
shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(
shape[:ndims - 1] +
[functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(indices, updates, shape):
ref = np.zeros(shape, dtype=updates.dtype)
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
class ScatterNdTest(xla_test.XLATestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
# Scatter via numpy
np_out = np_scatter(indices, updates, ref_shape)
# Scatter via tensorflow
tf_out = tf_scatter(indices, updates, ref_shape)
self.assertAllClose(np_out, tf_out)
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in self.numeric_types:
for itype in set([np.int32, np.int64]).intersection(set(self.int_types)):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def _runScatterNd(self, indices, updates, shape):
with self.session():
updates_placeholder = array_ops.placeholder(updates.dtype)
indices_placeholder = array_ops.placeholder(indices.dtype)
with self.test_scope():
output = array_ops.scatter_nd(indices_placeholder, updates_placeholder,
shape)
feed_dict = {updates_placeholder: updates, indices_placeholder: indices}
return output.eval(feed_dict=feed_dict)
def testSimple(self):
indices = np.array([[4], [3], [1], [7]], dtype=np.int32)
updates = np.array([9, 10, 11, 12], dtype=np.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [8]))
def testRepeatedIndices(self):
indices = np.array([[0], [1], [0], [1]], dtype=np.int32)
updates = np.array([9, 10, 11, 12], dtype=np.float32)
expected = np.array([20, 22], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [2]))
def testSimple2(self):
indices = np.array([[1, 0], [1, 1]], dtype=np.int32)
updates = np.array([11., 12.], dtype=np.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]], dtype=np.float32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [3, 2]))
def testSimple3(self):
indices = np.array([[1]], dtype=np.int32)
updates = np.array([[11., 12.]], dtype=np.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [3, 2]))
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, self._runScatterNd)
def testExtraIndicesDimensions(self):
indices = np.zeros([1, 1, 2], np.int32)
updates = np.zeros([1, 1], np.int32)
expected = np.zeros([2, 2], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [2, 2]))
def testRank3InvalidShape1(self):
indices = np.zeros([3, 2, 2], np.int32)
updates = np.zeros([2, 2, 2], np.int32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Must have updates.shape"):
self._runScatterNd(indices, updates, [2, 2, 2])
def testRank3InvalidShape2(self):
indices = np.zeros([2, 2, 1], np.int32)
updates = np.zeros([2, 2], np.int32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Must have updates.shape"):
self._runScatterNd(indices, updates, [2, 2, 2])
def testScatterOutOfRange(self):
updates = np.array([-3, -4, -5]).astype(np.float32)
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
# Indices out of range should not fail. It produces implementation-defined
# output.
indices = np.array([[-1], [0], [5]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
indices = np.array([[2], [0], [6]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
class ScatterNdTensorTest(xla_test.XLATestCase):
def _runScatter(self, op):
indices_np = np.array([[4], [3], [1], [7]], dtype=np.int32)
updates_np = np.array([9, 10, 11, 12], dtype=np.float32)
with self.session() as sess, self.test_scope():
indices = array_ops.placeholder(indices_np.dtype, shape=indices_np.shape)
updates = array_ops.placeholder(updates_np.dtype, shape=updates_np.shape)
t = array_ops.ones([8], dtype=np.float32)
out = op(t, indices, updates)
return sess.run(out, feed_dict={indices: indices_np, updates: updates_np})
def testAdd(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_add),
np.array([1, 12, 1, 11, 10, 1, 1, 13], dtype=np.float32))
def testSub(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_sub),
np.array([1, -10, 1, -9, -8, 1, 1, -11], dtype=np.float32))
def testUpdate(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_update),
np.array([1, 11, 1, 10, 9, 1, 1, 12], dtype=np.float32))
if __name__ == "__main__":
test.main()
|
rainaashutosh/MyTestRekall | refs/heads/4.0MR2 | rekall-core/rekall/type_generator.py | 3 | # Rekall Memory Forensics
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Authors:
# Michael Cohen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This module generates types automatically by disassembling code.
Generally Rekall prefers to use publicly available debugging information for
building profiles for the different operating systems supported. However, in
many cases, the symbols we need are not publicly available.
We can, in theory distribute hand written profiles, for each OS version but this
has a number of disadvantages:
- It is very time intensive to prepare hand written profiles for each version of
the operating system.
- It is not possible for us to test all possible versions (The Rekall profile
repository has currently hundreds of windows kernel builds - we would need to
maintain the profiles for each of these versions, and add new profiles each
time a hotfix is released.
- We also need to document how we arrive at these offsets in order for others to
verify our finding. It is important for a forensic application to be as
transparent as possible without "magic numbers" in code.
An additional requirement is that the disassembly process be data driven. This
way we can update the exact algorithm within the Rekall profile repository if an
error or bug is discovered without needing to update all current users of the
tool.
How does it work?
=================
The DynamicProfile profile is loaded from the profile repository as normal. Like
in a regular profile, the profile is defined by a json object. A DynamicProfile
however, contains an additional section $DYNAMIC_STRUCT, which will be compiled
into overlays.
{
"$METADATA": {
.....
}
"$DYNAMIC_STRUCT": {
"_TCP_LISTENER": {
"Owner": [
["Disassembler", {
"start": "tcpip.sys!_TcpCovetNetBufferList",
"rule": [
"MOV EAX, [ESI+$out]",
"TEST EAX, EAX",
"PUSH EAX",
"CALL DWORD *__imp__PsGetProcessId",
],
target="unsigned int"
}]
]
}
}
}
"""
__author__ = "Michael Cohen <[email protected]>"
import re
from rekall import registry
from rekall import obj
from rekall import utils
class DynamicParser(object):
"""A dynamic profile processor base class."""
__metaclass__ = registry.MetaclassRegistry
def calculate(self, session):
"""Returns the expected value or a NoneObject."""
_ = session
return obj.NoneObject("No value found")
class Disassembler(DynamicParser):
"""A constant generator deriving values based on the disassembler."""
def __init__(self, session=None, name=None, start=None, end=None,
length=300, rules=None, max_separation=10):
"""Derive a value from disassembly.
Args:
start: Where to start disassembly (Usually a symbol name).
end: Where to stop disassembly.
length: If end is not specified, we disassemble at most this many
bytes.
rules: A list of rules (see above).
"""
self.session = session
self.text_rules = rules
self.rules = self.CompileRule(rules)
self.start = start
self.length = length
self.end = end
self.name = name
self.cached_value = None
self.max_separation = max_separation
def __str__(self):
return "Disassemble %s" % self.start
def CompileRule(self, rule):
"""Convert the rule into a regular expression.
Rules are a list of patterns. Each pattern corresponds to a single
instruction. There can be an arbitrary number of instructions between
each rule.
Output is captured using $out (this can only be specified once). Wild
cards are denoted by *. Wildcards only apply across a single instruction
(and comment). The following is an example of a rule:
MOV EAX, [ESI+$out]
TEST EAX, EAX
PUSH EAX
CALL DWORD *__imp__PsGetProcessId
"""
# Sanitize all regular expression chars in the rule.
result = []
for line in rule:
# Escape regex sensitive chars.
line = re.sub(r"([()\[\]\+])", r"\\\1", line)
# Wildcards
line = re.sub(r"\*", r".+?", line)
# Capture variable. The same capture variable may be specified more
# than once in the same rule, so we need to append the instance
# number of the capture variable to make it unique.
self.instance = 0
def _ReplaceCaptureVars(match):
self.instance += 1
return r"(?P<%s_%s>[^ \[\]+-]+)" % (
match.group(1), self.instance)
line = re.sub(r"\$([a-zA-Z0-9]+)", _ReplaceCaptureVars, line)
result.append(re.compile(line, re.S | re.M))
return result
def calculate(self, session):
if self.cached_value is not None:
return self.cached_value
self.cached_value = self._calculate(session)
return self.cached_value
def _FindRuleIndex(self, line):
for i, rule in enumerate(self.rules):
# At every line we check if the current rule can be matched - if
# it can then it is a better match.
m = rule.search(line)
if m:
yield i, m.groupdict()
def _CheckCaptureVariables(self, vector, contexts):
"""Checks that capture variables are consistent in the vector.
The vector is a list of disassembly lines which match the rules, e.g.
[16, 60, 61]
The context is the capture variables from these rules. In order
to be valid, the capture variables must all be consistent. For
example the following is not consistent (since var1 is RAX in
the first rule and RCX in the second rule):
contexts[16]
{'var1': u'RAX'}
contexts[60]
{'var1': u'RCX', 'out': u'0x88'}
contexts[61]
{}
"""
result = {}
for rule_number, item in enumerate(vector):
rule_context = contexts[rule_number]
# The capture variables in this rule only.
rule_capture_vars_values = {}
for k, v in rule_context[item].iteritems():
var_name = k.rsplit("_", 1)[0]
# If this var is previously known, this match must be the same
# as previously found.
if var_name in result and v != result[var_name]:
return
# If this capture variable's value is the same as another
# capture variable's value in the same rule, exclude the
# match. This means that an expression like:
#
# MOV $var2, [$var1+$out]
#
# Necessarily implies that $var1 and $var2 must be different
# registers.
if (v in rule_capture_vars_values and
rule_capture_vars_values[v] != var_name):
return
result[var_name] = v
rule_capture_vars_values[v] = var_name
return result
def _GetMatch(self, hits, contexts):
"""Find the first vector that matches all the criteria."""
for vector in self.GenerateVector(hits, [], 0):
context = self._CheckCaptureVariables(vector, contexts)
if not context:
continue
return (vector, context)
return [], {}
def GenerateVector(self, hits, vector, level):
for item in hits.get(level, []):
if vector:
if item < vector[-1]:
continue
if item > self.max_separation + vector[-1]:
break
new_vector = vector + [item]
if level + 1 == len(hits):
yield new_vector
elif level + 1 < len(hits):
for result in self.GenerateVector(
hits, new_vector, level+1):
yield result
def _calculate(self, session):
# Try to cache disassembly to speed things up.
try:
disassembler_cache = self.session.GetParameter(
"disassembler_cache", utils.FastStore())
disassembly = disassembler_cache.Get(
(self.start, self.length, self.end))
except KeyError:
disassembly = unicode(session.plugins.dis(
offset=self.start, branch=True,
length=self.length, end=self.end))
disassembler_cache.Put(
(self.start, self.length, self.end), disassembly)
self.session.SetCache("disassembler_cache", disassembler_cache)
hits = {}
contexts = {}
disassembly = disassembly.splitlines()
for hit, line in enumerate(disassembly):
for rule_idx, context in self._FindRuleIndex(line):
hits.setdefault(rule_idx, []).append(hit)
contexts.setdefault(rule_idx, {})[hit] = context
# All the hits must match
if len(hits) < len(self.rules):
self.session.logging.error("Failed to find match for %s", self.name)
# Add some debugging messages here to make diagnosing errors easier.
for i, rule in enumerate(self.text_rules):
if i not in hits:
self.session.logging.debug("Unable to match rule: %s", rule)
return 0
vector, context = self._GetMatch(hits, contexts)
if len(vector) < len(self.rules):
self.session.logging.error("Failed to find match for %s.",
self.name)
return 0
self.session.logging.debug("Found match for %s", self.name)
for x in vector:
self.session.logging.debug(disassembly[x])
return int(context.get("out", "0"), 0)
class DynamicProfile(obj.Profile):
"""A Dynamic profile which parses its overlays from $DYNAMIC_STRUCT."""
def GenerateOverlay(session, dynamic_definition):
"""Parse the definition and generate an overlay from it."""
overlay = {}
for type_name, definition in dynamic_definition.items():
type_overlay = {}
overlay[type_name] = [None, type_overlay]
for field_name, attempts in definition.items():
parsers = []
for (parser_name, kwargs) in attempts:
kwargs = kwargs.copy()
target = kwargs.pop("target", None)
target_args = kwargs.pop("target_args", {})
name = "%s.%s" % (type_name, field_name)
parsers.append(DynamicParser.classes.get(parser_name)(
session=session, name=name, **kwargs))
# Make the offset a callable
# Bind parameters in lambda:
# pylint: disable=dangerous-default-value,cell-var-from-loop
def offset_cb(x, parsers=parsers, field_name=field_name):
for p in parsers:
result = p.calculate(x.obj_session)
if result:
return result
else:
session.logging.debug(
"Unable to find %s.%s via %s", x.obj_name,
field_name, p)
return 0
type_overlay[field_name] = [offset_cb, [target, target_args]]
return overlay
|
chromium2014/src | refs/heads/master | third_party/tlslite/tlslite/integration/clienthelper.py | 116 | # Authors:
# Trevor Perrin
# Dimitris Moraitis - Anon ciphersuites
#
# See the LICENSE file for legal information regarding use of this file.
"""
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.checker import Checker
class ClientHelper(object):
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None,
certChain=None, privateKey=None,
checker=None,
settings = None,
anon = False):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP,
or you can do certificate-based server
authentication with one of these argument combinations:
- x509Fingerprint
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP username. Requires the
'password' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.x509certchain.X509CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP arguments.
@type privateKey: L{tlslite.utils.rsakey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP arguments.
@type checker: L{tlslite.checker.Checker}
@param checker: Callable object called after handshaking to
evaluate the connection and raise an Exception if necessary.
@type settings: L{tlslite.handshakesettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.certChain = None
self.privateKey = None
self.checker = None
self.anon = anon
#SRP Authentication
if username and password and not \
(certChain or privateKey):
self.username = username
self.password = password
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
self.checker = checker
self.settings = settings
self.tlsSession = None
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.anon:
tlsConnection.handshakeClientAnonymous(session=self.tlsSession,
settings=self.settings,
checker=self.checker)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session |
ysekky/GPy | refs/heads/devel | GPy/core/parameterization/priors.py | 3 | # Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy.special import gammaln, digamma
from ...util.linalg import pdinv
from paramz.domains import _REAL, _POSITIVE
import warnings
import weakref
class Prior(object):
domain = None
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance or cls._instance.__class__ is not cls:
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
cls._instance = newfunc(cls)
else:
cls._instance = newfunc(cls, *args, **kwargs)
return cls._instance
def pdf(self, x):
return np.exp(self.lnpdf(x))
def plot(self):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import priors_plots
priors_plots.univariate_plot(self)
def __repr__(self, *args, **kwargs):
return self.__str__()
class Gaussian(Prior):
"""
Implementation of the univariate Gaussian probability function, coupled with random variables.
:param mu: mean
:param sigma: standard deviation
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _REAL
_instances = []
def __new__(cls, mu=0, sigma=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, mu, sigma)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, sigma):
self.mu = float(mu)
self.sigma = float(sigma)
self.sigma2 = np.square(self.sigma)
self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
def __str__(self):
return "N({:.2g}, {:.2g})".format(self.mu, self.sigma)
def lnpdf(self, x):
return self.constant - 0.5 * np.square(x - self.mu) / self.sigma2
def lnpdf_grad(self, x):
return -(x - self.mu) / self.sigma2
def rvs(self, n):
return np.random.randn(n) * self.sigma + self.mu
# def __getstate__(self):
# return self.mu, self.sigma
#
# def __setstate__(self, state):
# self.mu = state[0]
# self.sigma = state[1]
# self.sigma2 = np.square(self.sigma)
# self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
class Uniform(Prior):
domain = _REAL
_instances = []
def __new__(cls, lower=0, upper=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
o = super(Prior, cls).__new__(cls, lower, upper)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
self.lower = float(lower)
self.upper = float(upper)
def __str__(self):
return "[{:.2g}, {:.2g}]".format(self.lower, self.upper)
def lnpdf(self, x):
region = (x >= self.lower) * (x <= self.upper)
return region
def lnpdf_grad(self, x):
return np.zeros(x.shape)
def rvs(self, n):
return np.random.uniform(self.lower, self.upper, size=n)
# def __getstate__(self):
# return self.lower, self.upper
#
# def __setstate__(self, state):
# self.lower = state[0]
# self.upper = state[1]
class LogGaussian(Gaussian):
"""
Implementation of the univariate *log*-Gaussian probability function, coupled with random variables.
:param mu: mean
:param sigma: standard deviation
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __new__(cls, mu=0, sigma=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, mu, sigma)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, sigma):
self.mu = float(mu)
self.sigma = float(sigma)
self.sigma2 = np.square(self.sigma)
self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
def __str__(self):
return "lnN({:.2g}, {:.2g})".format(self.mu, self.sigma)
def lnpdf(self, x):
return self.constant - 0.5 * np.square(np.log(x) - self.mu) / self.sigma2 - np.log(x)
def lnpdf_grad(self, x):
return -((np.log(x) - self.mu) / self.sigma2 + 1.) / x
def rvs(self, n):
return np.exp(np.random.randn(int(n)) * self.sigma + self.mu)
class MultivariateGaussian(Prior):
"""
Implementation of the multivariate Gaussian probability function, coupled with random variables.
:param mu: mean (N-dimensional array)
:param var: covariance matrix (NxN)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _REAL
_instances = []
def __new__(cls, mu=0, var=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu == mu) and np.all(instance().var == var):
return instance()
o = super(Prior, cls).__new__(cls, mu, var)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, var):
self.mu = np.array(mu).flatten()
self.var = np.array(var)
assert len(self.var.shape) == 2
assert self.var.shape[0] == self.var.shape[1]
assert self.var.shape[0] == self.mu.size
self.input_dim = self.mu.size
self.inv, self.hld = pdinv(self.var)
self.constant = -0.5 * self.input_dim * np.log(2 * np.pi) - self.hld
def summary(self):
raise NotImplementedError
def pdf(self, x):
return np.exp(self.lnpdf(x))
def lnpdf(self, x):
d = x - self.mu
return self.constant - 0.5 * np.sum(d * np.dot(d, self.inv), 1)
def lnpdf_grad(self, x):
d = x - self.mu
return -np.dot(self.inv, d)
def rvs(self, n):
return np.random.multivariate_normal(self.mu, self.var, n)
def plot(self):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import priors_plots
priors_plots.multivariate_plot(self)
def __getstate__(self):
return self.mu, self.var
def __setstate__(self, state):
self.mu = state[0]
self.var = state[1]
assert len(self.var.shape) == 2
assert self.var.shape[0] == self.var.shape[1]
assert self.var.shape[0] == self.mu.size
self.input_dim = self.mu.size
self.inv, self.hld = pdinv(self.var)
self.constant = -0.5 * self.input_dim * np.log(2 * np.pi) - self.hld
def gamma_from_EV(E, V):
warnings.warn("use Gamma.from_EV to create Gamma Prior", FutureWarning)
return Gamma.from_EV(E, V)
class Gamma(Prior):
"""
Implementation of the Gamma probability function, coupled with random variables.
:param a: shape parameter
:param b: rate parameter (warning: it's the *inverse* of the scale)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __new__(cls, a=1, b=.5): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().a == a and instance().b == b:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, a, b)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, a, b):
self.a = float(a)
self.b = float(b)
self.constant = -gammaln(self.a) + a * np.log(b)
def __str__(self):
return "Ga({:.2g}, {:.2g})".format(self.a, self.b)
def summary(self):
ret = {"E[x]": self.a / self.b, \
"E[ln x]": digamma(self.a) - np.log(self.b), \
"var[x]": self.a / self.b / self.b, \
"Entropy": gammaln(self.a) - (self.a - 1.) * digamma(self.a) - np.log(self.b) + self.a}
if self.a > 1:
ret['Mode'] = (self.a - 1.) / self.b
else:
ret['mode'] = np.nan
return ret
def lnpdf(self, x):
return self.constant + (self.a - 1) * np.log(x) - self.b * x
def lnpdf_grad(self, x):
return (self.a - 1.) / x - self.b
def rvs(self, n):
return np.random.gamma(scale=1. / self.b, shape=self.a, size=n)
@staticmethod
def from_EV(E, V):
"""
Creates an instance of a Gamma Prior by specifying the Expected value(s)
and Variance(s) of the distribution.
:param E: expected value
:param V: variance
"""
a = np.square(E) / V
b = E / V
return Gamma(a, b)
def __getstate__(self):
return self.a, self.b
def __setstate__(self, state):
self.a = state[0]
self.b = state[1]
self.constant = -gammaln(self.a) + self.a * np.log(self.b)
class InverseGamma(Gamma):
"""
Implementation of the inverse-Gamma probability function, coupled with random variables.
:param a: shape parameter
:param b: rate parameter (warning: it's the *inverse* of the scale)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __new__(cls, a=1, b=.5): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().a == a and instance().b == b:
return instance()
o = super(Prior, cls).__new__(cls, a, b)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, a, b):
self.a = float(a)
self.b = float(b)
self.constant = -gammaln(self.a) + a * np.log(b)
def __str__(self):
return "iGa({:.2g}, {:.2g})".format(self.a, self.b)
def lnpdf(self, x):
return self.constant - (self.a + 1) * np.log(x) - self.b / x
def lnpdf_grad(self, x):
return -(self.a + 1.) / x + self.b / x ** 2
def rvs(self, n):
return 1. / np.random.gamma(scale=1. / self.b, shape=self.a, size=n)
class DGPLVM_KFDA(Prior):
"""
Implementation of the Discriminative Gaussian Process Latent Variable function using
Kernel Fisher Discriminant Analysis by Seung-Jean Kim for implementing Face paper
by Chaochao Lu.
:param lambdaa: constant
:param sigma2: constant
.. Note:: Surpassing Human-Level Face paper dgplvm implementation
"""
domain = _REAL
# _instances = []
# def __new__(cls, lambdaa, sigma2): # Singleton:
# if cls._instances:
# cls._instances[:] = [instance for instance in cls._instances if instance()]
# for instance in cls._instances:
# if instance().mu == mu and instance().sigma == sigma:
# return instance()
# o = super(Prior, cls).__new__(cls, mu, sigma)
# cls._instances.append(weakref.ref(o))
# return cls._instances[-1]()
def __init__(self, lambdaa, sigma2, lbl, kern, x_shape):
"""A description for init"""
self.datanum = lbl.shape[0]
self.classnum = lbl.shape[1]
self.lambdaa = lambdaa
self.sigma2 = sigma2
self.lbl = lbl
self.kern = kern
lst_ni = self.compute_lst_ni()
self.a = self.compute_a(lst_ni)
self.A = self.compute_A(lst_ni)
self.x_shape = x_shape
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
if len(cls) > 2:
for i in range(2, self.classnum):
del cls[i]
return cls
def x_reduced(self, cls):
x1 = cls[0]
x2 = cls[1]
x = np.concatenate((x1, x2), axis=0)
return x
def compute_lst_ni(self):
lst_ni = []
lst_ni1 = []
lst_ni2 = []
f1 = (np.where(self.lbl[:, 0] == 1)[0])
f2 = (np.where(self.lbl[:, 1] == 1)[0])
for idx in f1:
lst_ni1.append(idx)
for idx in f2:
lst_ni2.append(idx)
lst_ni.append(len(lst_ni1))
lst_ni.append(len(lst_ni2))
return lst_ni
def compute_a(self, lst_ni):
a = np.ones((self.datanum, 1))
count = 0
for N_i in lst_ni:
if N_i == lst_ni[0]:
a[count:count + N_i] = (float(1) / N_i) * a[count]
count += N_i
else:
if N_i == lst_ni[1]:
a[count: count + N_i] = -(float(1) / N_i) * a[count]
count += N_i
return a
def compute_A(self, lst_ni):
A = np.zeros((self.datanum, self.datanum))
idx = 0
for N_i in lst_ni:
B = float(1) / np.sqrt(N_i) * (np.eye(N_i) - ((float(1) / N_i) * np.ones((N_i, N_i))))
A[idx:idx + N_i, idx:idx + N_i] = B
idx += N_i
return A
# Here log function
def lnpdf(self, x):
x = x.reshape(self.x_shape)
K = self.kern.K(x)
a_trans = np.transpose(self.a)
paran = self.lambdaa * np.eye(x.shape[0]) + self.A.dot(K).dot(self.A)
inv_part = pdinv(paran)[0]
J = a_trans.dot(K).dot(self.a) - a_trans.dot(K).dot(self.A).dot(inv_part).dot(self.A).dot(K).dot(self.a)
J_star = (1. / self.lambdaa) * J
return (-1. / self.sigma2) * J_star
# Here gradient function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
K = self.kern.K(x)
paran = self.lambdaa * np.eye(x.shape[0]) + self.A.dot(K).dot(self.A)
inv_part = pdinv(paran)[0]
b = self.A.dot(inv_part).dot(self.A).dot(K).dot(self.a)
a_Minus_b = self.a - b
a_b_trans = np.transpose(a_Minus_b)
DJ_star_DK = (1. / self.lambdaa) * (a_Minus_b.dot(a_b_trans))
DJ_star_DX = self.kern.gradients_X(DJ_star_DK, x)
return (-1. / self.sigma2) * DJ_star_DX
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior'
def __getstate___(self):
return self.lbl, self.lambdaa, self.sigma2, self.kern, self.x_shape
def __setstate__(self, state):
lbl, lambdaa, sigma2, kern, a, A, x_shape = state
self.datanum = lbl.shape[0]
self.classnum = lbl.shape[1]
self.lambdaa = lambdaa
self.sigma2 = sigma2
self.lbl = lbl
self.kern = kern
lst_ni = self.compute_lst_ni()
self.a = self.compute_a(lst_ni)
self.A = self.compute_A(lst_ni)
self.x_shape = x_shape
class DGPLVM(Prior):
"""
Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.
:param sigma2: constant
.. Note:: DGPLVM for Classification paper implementation
"""
domain = _REAL
def __new__(cls, sigma2, lbl, x_shape):
return super(Prior, cls).__new__(cls, sigma2, lbl, x_shape)
def __init__(self, sigma2, lbl, x_shape):
self.sigma2 = sigma2
# self.x = x
self.lbl = lbl
self.classnum = lbl.shape[1]
self.datanum = lbl.shape[0]
self.x_shape = x_shape
self.dim = x_shape[1]
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
return cls
# This function computes mean of each class. The mean is calculated through each dimension
def compute_Mi(self, cls):
M_i = np.zeros((self.classnum, self.dim))
for i in cls:
# Mean of each class
class_i = cls[i]
M_i[i] = np.mean(class_i, axis=0)
return M_i
# Adding data points as tuple to the dictionary so that we can access indices
def compute_indices(self, x):
data_idx = {}
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in data_idx:
data_idx[class_label] = []
t = (j, x[j])
data_idx[class_label].append(t)
return data_idx
# Adding indices to the list so we can access whole the indices
def compute_listIndices(self, data_idx):
lst_idx = []
lst_idx_all = []
for i in data_idx:
if len(lst_idx) == 0:
pass
#Do nothing, because it is the first time list is created so is empty
else:
lst_idx = []
# Here we put indices of each class in to the list called lst_idx_all
for m in range(len(data_idx[i])):
lst_idx.append(data_idx[i][m][0])
lst_idx_all.append(lst_idx)
return lst_idx_all
# This function calculates between classes variances
def compute_Sb(self, cls, M_i, M_0):
Sb = np.zeros((self.dim, self.dim))
for i in cls:
B = (M_i[i] - M_0).reshape(self.dim, 1)
B_trans = B.transpose()
Sb += (float(len(cls[i])) / self.datanum) * B.dot(B_trans)
return Sb
# This function calculates within classes variances
def compute_Sw(self, cls, M_i):
Sw = np.zeros((self.dim, self.dim))
for i in cls:
N_i = float(len(cls[i]))
W_WT = np.zeros((self.dim, self.dim))
for xk in cls[i]:
W = (xk - M_i[i])
W_WT += np.outer(W, W)
Sw += (N_i / self.datanum) * ((1. / N_i) * W_WT)
return Sw
# Calculating beta and Bi for Sb
def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
# import pdb
# pdb.set_trace()
B_i = np.zeros((self.classnum, self.dim))
Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
for i in data_idx:
# pdb.set_trace()
# Calculating Bi
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
for k in range(self.datanum):
for i in data_idx:
N_i = float(len(data_idx[i]))
if k in lst_idx_all[i]:
beta = (float(1) / N_i) - (float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
else:
beta = -(float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
Sig_beta_B_i_all = Sig_beta_B_i_all.transpose()
return Sig_beta_B_i_all
# Calculating W_j s separately so we can access all the W_j s anytime
def compute_wj(self, data_idx, M_i):
W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
xj = tpl[1]
j = tpl[0]
W_i[j] = (xj - M_i[i])
return W_i
# Calculating alpha and Wj for Sw
def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
Sig_alpha_W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
k = tpl[0]
for j in lst_idx_all[i]:
if k == j:
alpha = 1 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
else:
alpha = 0 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
Sig_alpha_W_i = (1. / self.datanum) * np.transpose(Sig_alpha_W_i)
return Sig_alpha_W_i
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
# sb_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
data_idx = self.compute_indices(x)
lst_idx_all = self.compute_listIndices(data_idx)
Sig_beta_B_i_all = self.compute_sig_beta_Bi(data_idx, M_i, M_0, lst_idx_all)
W_i = self.compute_wj(data_idx, M_i)
Sig_alpha_W_i = self.compute_sig_alpha_W(data_idx, lst_idx_all, W_i)
# Calculating inverse of Sb and its transpose and minus
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
Sb_inv_N_trans = np.transpose(Sb_inv_N)
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
Sw_trans = np.transpose(Sw)
# Calculating DJ/DXk
DJ_Dxk = 2 * (
Sb_inv_N_trans_minus.dot(Sw_trans).dot(Sb_inv_N_trans).dot(Sig_beta_B_i_all) + Sb_inv_N_trans.dot(
Sig_alpha_W_i))
# Calculating derivative of the log of the prior
DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
return DPx_Dx.T
# def frb(self, x):
# from functools import partial
# from GPy.models import GradientChecker
# f = partial(self.lnpdf)
# df = partial(self.lnpdf_grad)
# grad = GradientChecker(f, df, x, 'X')
# grad.checkgrad(verbose=1)
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior_Raq'
# ******************************************
from . import Parameterized
from . import Param
class DGPLVM_Lamda(Prior, Parameterized):
"""
Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.
:param sigma2: constant
.. Note:: DGPLVM for Classification paper implementation
"""
domain = _REAL
# _instances = []
# def __new__(cls, mu, sigma): # Singleton:
# if cls._instances:
# cls._instances[:] = [instance for instance in cls._instances if instance()]
# for instance in cls._instances:
# if instance().mu == mu and instance().sigma == sigma:
# return instance()
# o = super(Prior, cls).__new__(cls, mu, sigma)
# cls._instances.append(weakref.ref(o))
# return cls._instances[-1]()
def __init__(self, sigma2, lbl, x_shape, lamda, name='DP_prior'):
super(DGPLVM_Lamda, self).__init__(name=name)
self.sigma2 = sigma2
# self.x = x
self.lbl = lbl
self.lamda = lamda
self.classnum = lbl.shape[1]
self.datanum = lbl.shape[0]
self.x_shape = x_shape
self.dim = x_shape[1]
self.lamda = Param('lamda', np.diag(lamda))
self.link_parameter(self.lamda)
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
return cls
# This function computes mean of each class. The mean is calculated through each dimension
def compute_Mi(self, cls):
M_i = np.zeros((self.classnum, self.dim))
for i in cls:
# Mean of each class
class_i = cls[i]
M_i[i] = np.mean(class_i, axis=0)
return M_i
# Adding data points as tuple to the dictionary so that we can access indices
def compute_indices(self, x):
data_idx = {}
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in data_idx:
data_idx[class_label] = []
t = (j, x[j])
data_idx[class_label].append(t)
return data_idx
# Adding indices to the list so we can access whole the indices
def compute_listIndices(self, data_idx):
lst_idx = []
lst_idx_all = []
for i in data_idx:
if len(lst_idx) == 0:
pass
#Do nothing, because it is the first time list is created so is empty
else:
lst_idx = []
# Here we put indices of each class in to the list called lst_idx_all
for m in range(len(data_idx[i])):
lst_idx.append(data_idx[i][m][0])
lst_idx_all.append(lst_idx)
return lst_idx_all
# This function calculates between classes variances
def compute_Sb(self, cls, M_i, M_0):
Sb = np.zeros((self.dim, self.dim))
for i in cls:
B = (M_i[i] - M_0).reshape(self.dim, 1)
B_trans = B.transpose()
Sb += (float(len(cls[i])) / self.datanum) * B.dot(B_trans)
return Sb
# This function calculates within classes variances
def compute_Sw(self, cls, M_i):
Sw = np.zeros((self.dim, self.dim))
for i in cls:
N_i = float(len(cls[i]))
W_WT = np.zeros((self.dim, self.dim))
for xk in cls[i]:
W = (xk - M_i[i])
W_WT += np.outer(W, W)
Sw += (N_i / self.datanum) * ((1. / N_i) * W_WT)
return Sw
# Calculating beta and Bi for Sb
def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
# import pdb
# pdb.set_trace()
B_i = np.zeros((self.classnum, self.dim))
Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
for i in data_idx:
# pdb.set_trace()
# Calculating Bi
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
for k in range(self.datanum):
for i in data_idx:
N_i = float(len(data_idx[i]))
if k in lst_idx_all[i]:
beta = (float(1) / N_i) - (float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
else:
beta = -(float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
Sig_beta_B_i_all = Sig_beta_B_i_all.transpose()
return Sig_beta_B_i_all
# Calculating W_j s separately so we can access all the W_j s anytime
def compute_wj(self, data_idx, M_i):
W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
xj = tpl[1]
j = tpl[0]
W_i[j] = (xj - M_i[i])
return W_i
# Calculating alpha and Wj for Sw
def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
Sig_alpha_W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
k = tpl[0]
for j in lst_idx_all[i]:
if k == j:
alpha = 1 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
else:
alpha = 0 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
Sig_alpha_W_i = (1. / self.datanum) * np.transpose(Sig_alpha_W_i)
return Sig_alpha_W_i
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!
#self.lamda.values[:] = self.lamda.values/self.lamda.values.sum()
xprime = x.dot(np.diagflat(self.lamda))
x = xprime
# print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.5))[0]
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.9)[0]
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
xprime = x.dot(np.diagflat(self.lamda))
x = xprime
# print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
data_idx = self.compute_indices(x)
lst_idx_all = self.compute_listIndices(data_idx)
Sig_beta_B_i_all = self.compute_sig_beta_Bi(data_idx, M_i, M_0, lst_idx_all)
W_i = self.compute_wj(data_idx, M_i)
Sig_alpha_W_i = self.compute_sig_alpha_W(data_idx, lst_idx_all, W_i)
# Calculating inverse of Sb and its transpose and minus
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.5))[0]
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.9)[0]
Sb_inv_N_trans = np.transpose(Sb_inv_N)
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
Sw_trans = np.transpose(Sw)
# Calculating DJ/DXk
DJ_Dxk = 2 * (
Sb_inv_N_trans_minus.dot(Sw_trans).dot(Sb_inv_N_trans).dot(Sig_beta_B_i_all) + Sb_inv_N_trans.dot(
Sig_alpha_W_i))
# Calculating derivative of the log of the prior
DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
DPxprim_Dx = np.diagflat(self.lamda).dot(DPx_Dx)
# Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
DPxprim_Dx = DPxprim_Dx.T
DPxprim_Dlamda = DPx_Dx.dot(x)
# Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
DPxprim_Dlamda = DPxprim_Dlamda.T
self.lamda.gradient = np.diag(DPxprim_Dlamda)
# print DPxprim_Dx
return DPxprim_Dx
# def frb(self, x):
# from functools import partial
# from GPy.models import GradientChecker
# f = partial(self.lnpdf)
# df = partial(self.lnpdf_grad)
# grad = GradientChecker(f, df, x, 'X')
# grad.checkgrad(verbose=1)
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior_Raq_Lamda'
# ******************************************
class DGPLVM_T(Prior):
"""
Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.
:param sigma2: constant
.. Note:: DGPLVM for Classification paper implementation
"""
domain = _REAL
# _instances = []
# def __new__(cls, mu, sigma): # Singleton:
# if cls._instances:
# cls._instances[:] = [instance for instance in cls._instances if instance()]
# for instance in cls._instances:
# if instance().mu == mu and instance().sigma == sigma:
# return instance()
# o = super(Prior, cls).__new__(cls, mu, sigma)
# cls._instances.append(weakref.ref(o))
# return cls._instances[-1]()
def __init__(self, sigma2, lbl, x_shape, vec):
self.sigma2 = sigma2
# self.x = x
self.lbl = lbl
self.classnum = lbl.shape[1]
self.datanum = lbl.shape[0]
self.x_shape = x_shape
self.dim = x_shape[1]
self.vec = vec
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
return cls
# This function computes mean of each class. The mean is calculated through each dimension
def compute_Mi(self, cls):
M_i = np.zeros((self.classnum, self.dim))
for i in cls:
# Mean of each class
# class_i = np.multiply(cls[i],vec)
class_i = cls[i]
M_i[i] = np.mean(class_i, axis=0)
return M_i
# Adding data points as tuple to the dictionary so that we can access indices
def compute_indices(self, x):
data_idx = {}
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in data_idx:
data_idx[class_label] = []
t = (j, x[j])
data_idx[class_label].append(t)
return data_idx
# Adding indices to the list so we can access whole the indices
def compute_listIndices(self, data_idx):
lst_idx = []
lst_idx_all = []
for i in data_idx:
if len(lst_idx) == 0:
pass
#Do nothing, because it is the first time list is created so is empty
else:
lst_idx = []
# Here we put indices of each class in to the list called lst_idx_all
for m in range(len(data_idx[i])):
lst_idx.append(data_idx[i][m][0])
lst_idx_all.append(lst_idx)
return lst_idx_all
# This function calculates between classes variances
def compute_Sb(self, cls, M_i, M_0):
Sb = np.zeros((self.dim, self.dim))
for i in cls:
B = (M_i[i] - M_0).reshape(self.dim, 1)
B_trans = B.transpose()
Sb += (float(len(cls[i])) / self.datanum) * B.dot(B_trans)
return Sb
# This function calculates within classes variances
def compute_Sw(self, cls, M_i):
Sw = np.zeros((self.dim, self.dim))
for i in cls:
N_i = float(len(cls[i]))
W_WT = np.zeros((self.dim, self.dim))
for xk in cls[i]:
W = (xk - M_i[i])
W_WT += np.outer(W, W)
Sw += (N_i / self.datanum) * ((1. / N_i) * W_WT)
return Sw
# Calculating beta and Bi for Sb
def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
# import pdb
# pdb.set_trace()
B_i = np.zeros((self.classnum, self.dim))
Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
for i in data_idx:
# pdb.set_trace()
# Calculating Bi
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
for k in range(self.datanum):
for i in data_idx:
N_i = float(len(data_idx[i]))
if k in lst_idx_all[i]:
beta = (float(1) / N_i) - (float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
else:
beta = -(float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
Sig_beta_B_i_all = Sig_beta_B_i_all.transpose()
return Sig_beta_B_i_all
# Calculating W_j s separately so we can access all the W_j s anytime
def compute_wj(self, data_idx, M_i):
W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
xj = tpl[1]
j = tpl[0]
W_i[j] = (xj - M_i[i])
return W_i
# Calculating alpha and Wj for Sw
def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
Sig_alpha_W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
k = tpl[0]
for j in lst_idx_all[i]:
if k == j:
alpha = 1 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
else:
alpha = 0 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
Sig_alpha_W_i = (1. / self.datanum) * np.transpose(Sig_alpha_W_i)
return Sig_alpha_W_i
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
xprim = x.dot(self.vec)
x = xprim
# print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#print 'SB_inv: ', Sb_inv_N
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
xprim = x.dot(self.vec)
x = xprim
# print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
data_idx = self.compute_indices(x)
lst_idx_all = self.compute_listIndices(data_idx)
Sig_beta_B_i_all = self.compute_sig_beta_Bi(data_idx, M_i, M_0, lst_idx_all)
W_i = self.compute_wj(data_idx, M_i)
Sig_alpha_W_i = self.compute_sig_alpha_W(data_idx, lst_idx_all, W_i)
# Calculating inverse of Sb and its transpose and minus
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#print 'SB_inv: ',Sb_inv_N
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
Sb_inv_N_trans = np.transpose(Sb_inv_N)
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
Sw_trans = np.transpose(Sw)
# Calculating DJ/DXk
DJ_Dxk = 2 * (
Sb_inv_N_trans_minus.dot(Sw_trans).dot(Sb_inv_N_trans).dot(Sig_beta_B_i_all) + Sb_inv_N_trans.dot(
Sig_alpha_W_i))
# Calculating derivative of the log of the prior
DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
return DPx_Dx.T
# def frb(self, x):
# from functools import partial
# from GPy.models import GradientChecker
# f = partial(self.lnpdf)
# df = partial(self.lnpdf_grad)
# grad = GradientChecker(f, df, x, 'X')
# grad.checkgrad(verbose=1)
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior_Raq_TTT'
class HalfT(Prior):
"""
Implementation of the half student t probability function, coupled with random variables.
:param A: scale parameter
:param nu: degrees of freedom
"""
domain = _POSITIVE
_instances = []
def __new__(cls, A, nu): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().A == A and instance().nu == nu:
return instance()
o = super(Prior, cls).__new__(cls, A, nu)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, A, nu):
self.A = float(A)
self.nu = float(nu)
self.constant = gammaln(.5*(self.nu+1.)) - gammaln(.5*self.nu) - .5*np.log(np.pi*self.A*self.nu)
def __str__(self):
return "hT({:.2g}, {:.2g})".format(self.A, self.nu)
def lnpdf(self, theta):
return (theta > 0) * (self.constant - .5*(self.nu + 1) * np.log(1. + (1./self.nu) * (theta/self.A)**2))
# theta = theta if isinstance(theta,np.ndarray) else np.array([theta])
# lnpdfs = np.zeros_like(theta)
# theta = np.array([theta])
# above_zero = theta.flatten()>1e-6
# v = self.nu
# sigma2=self.A
# stop
# lnpdfs[above_zero] = (+ gammaln((v + 1) * 0.5)
# - gammaln(v * 0.5)
# - 0.5*np.log(sigma2 * v * np.pi)
# - 0.5*(v + 1)*np.log(1 + (1/np.float(v))*((theta[above_zero][0]**2)/sigma2))
# )
# return lnpdfs
def lnpdf_grad(self, theta):
theta = theta if isinstance(theta, np.ndarray) else np.array([theta])
grad = np.zeros_like(theta)
above_zero = theta > 1e-6
v = self.nu
sigma2 = self.A
grad[above_zero] = -0.5*(v+1)*(2*theta[above_zero])/(v*sigma2 + theta[above_zero][0]**2)
return grad
def rvs(self, n):
# return np.random.randn(n) * self.sigma + self.mu
from scipy.stats import t
# [np.abs(x) for x in t.rvs(df=4,loc=0,scale=50, size=10000)])
ret = t.rvs(self.nu, loc=0, scale=self.A, size=n)
ret[ret < 0] = 0
return ret
class Exponential(Prior):
"""
Implementation of the Exponential probability function,
coupled with random variables.
:param l: shape parameter
"""
domain = _POSITIVE
_instances = []
def __new__(cls, l): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().l == l:
return instance()
o = super(Exponential, cls).__new__(cls, l)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, l):
self.l = l
def __str__(self):
return "Exp({:.2g})".format(self.l)
def summary(self):
ret = {"E[x]": 1. / self.l,
"E[ln x]": np.nan,
"var[x]": 1. / self.l**2,
"Entropy": 1. - np.log(self.l),
"Mode": 0.}
return ret
def lnpdf(self, x):
return np.log(self.l) - self.l * x
def lnpdf_grad(self, x):
return - self.l
def rvs(self, n):
return np.random.exponential(scale=self.l, size=n)
class StudentT(Prior):
"""
Implementation of the student t probability function, coupled with random variables.
:param mu: mean
:param sigma: standard deviation
:param nu: degrees of freedom
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _REAL
_instances = []
def __new__(cls, mu=0, sigma=1, nu=4): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma and instance().nu == nu:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, mu, sigma, nu)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, sigma, nu):
self.mu = float(mu)
self.sigma = float(sigma)
self.sigma2 = np.square(self.sigma)
self.nu = float(nu)
def __str__(self):
return "St({:.2g}, {:.2g}, {:.2g})".format(self.mu, self.sigma, self.nu)
def lnpdf(self, x):
from scipy.stats import t
return t.logpdf(x,self.nu,self.mu,self.sigma)
def lnpdf_grad(self, x):
return -(self.nu + 1.)*(x - self.mu)/( self.nu*self.sigma2 + np.square(x - self.mu) )
def rvs(self, n):
from scipy.stats import t
ret = t.rvs(self.nu, loc=self.mu, scale=self.sigma, size=n)
return ret
|
Jgarcia-IAS/localizacion | refs/heads/master | openerp/report/render/makohtml2html/__init__.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from makohtml2html import parseNode
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
plotly/plotly.py | refs/heads/master | packages/python/plotly/plotly/validators/scatter/line/__init__.py | 2 | import sys
if sys.version_info < (3, 7):
from ._width import WidthValidator
from ._smoothing import SmoothingValidator
from ._simplify import SimplifyValidator
from ._shape import ShapeValidator
from ._dash import DashValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._width.WidthValidator",
"._smoothing.SmoothingValidator",
"._simplify.SimplifyValidator",
"._shape.ShapeValidator",
"._dash.DashValidator",
"._color.ColorValidator",
],
)
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/box/_ysrc.py | 1 | import _plotly_utils.basevalidators
class YsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="ysrc", parent_name="box", **kwargs):
super(YsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
shubhamgupta123/erpnext | refs/heads/master | erpnext/hr/doctype/repayment_schedule/repayment_schedule.py | 45 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class RepaymentSchedule(Document):
pass
|
chand3040/sree_odoo | refs/heads/master | openerp/addons/gamification/models/challenge.py | 91 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from datetime import date, datetime, timedelta
import calendar
import logging
_logger = logging.getLogger(__name__)
# display top 3 in ranking, could be db variable
MAX_VISIBILITY_RANKING = 3
def start_end_date_for_period(period, default_start_date=False, default_end_date=False):
"""Return the start and end date for a goal period based on today
:param str default_start_date: string date in DEFAULT_SERVER_DATE_FORMAT format
:param str default_end_date: string date in DEFAULT_SERVER_DATE_FORMAT format
:return: (start_date, end_date), dates in string format, False if the period is
not defined or unknown"""
today = date.today()
if period == 'daily':
start_date = today
end_date = start_date
elif period == 'weekly':
delta = timedelta(days=today.weekday())
start_date = today - delta
end_date = start_date + timedelta(days=7)
elif period == 'monthly':
month_range = calendar.monthrange(today.year, today.month)
start_date = today.replace(day=1)
end_date = today.replace(day=month_range[1])
elif period == 'yearly':
start_date = today.replace(month=1, day=1)
end_date = today.replace(month=12, day=31)
else: # period == 'once':
start_date = default_start_date # for manual goal, start each time
end_date = default_end_date
return (start_date, end_date)
return (datetime.strftime(start_date, DF), datetime.strftime(end_date, DF))
class gamification_challenge(osv.Model):
"""Gamification challenge
Set of predifined objectives assigned to people with rules for recurrence and
rewards
If 'user_ids' is defined and 'period' is different than 'one', the set will
be assigned to the users for each period (eg: every 1st of each month if
'monthly' is selected)
"""
_name = 'gamification.challenge'
_description = 'Gamification challenge'
_inherit = 'mail.thread'
def _get_next_report_date(self, cr, uid, ids, field_name, arg, context=None):
"""Return the next report date based on the last report date and report
period.
:return: a string in DEFAULT_SERVER_DATE_FORMAT representing the date"""
res = {}
for challenge in self.browse(cr, uid, ids, context=context):
last = datetime.strptime(challenge.last_report_date, DF).date()
if challenge.report_message_frequency == 'daily':
next = last + timedelta(days=1)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'weekly':
next = last + timedelta(days=7)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'monthly':
month_range = calendar.monthrange(last.year, last.month)
next = last.replace(day=month_range[1]) + timedelta(days=1)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'yearly':
res[challenge.id] = last.replace(year=last.year + 1).strftime(DF)
# frequency == 'once', reported when closed only
else:
res[challenge.id] = False
return res
def _get_categories(self, cr, uid, context=None):
return [
('hr', 'Human Ressources / Engagement'),
('other', 'Settings / Gamification Tools'),
]
def _get_report_template(self, cr, uid, context=None):
try:
return self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'simple_report_template')[1]
except ValueError:
return False
_order = 'end_date, start_date, name, id'
_columns = {
'name': fields.char('Challenge Name', required=True, translate=True),
'description': fields.text('Description', translate=True),
'state': fields.selection([
('draft', 'Draft'),
('inprogress', 'In Progress'),
('done', 'Done'),
], copy=False,
string='State', required=True, track_visibility='onchange'),
'manager_id': fields.many2one('res.users',
string='Responsible', help="The user responsible for the challenge."),
'user_ids': fields.many2many('res.users', 'gamification_challenge_users_rel',
string='Users',
help="List of users participating to the challenge"),
'user_domain': fields.char('User domain', help="Alternative to a list of users"),
'period': fields.selection([
('once', 'Non recurring'),
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('yearly', 'Yearly')
],
string='Periodicity',
help='Period of automatic goal assigment. If none is selected, should be launched manually.',
required=True),
'start_date': fields.date('Start Date',
help="The day a new challenge will be automatically started. If no periodicity is set, will use this date as the goal start date."),
'end_date': fields.date('End Date',
help="The day a new challenge will be automatically closed. If no periodicity is set, will use this date as the goal end date."),
'invited_user_ids': fields.many2many('res.users', 'gamification_invited_user_ids_rel',
string="Suggest to users"),
'line_ids': fields.one2many('gamification.challenge.line', 'challenge_id',
string='Lines',
help="List of goals that will be set",
required=True, copy=True),
'reward_id': fields.many2one('gamification.badge', string="For Every Succeding User"),
'reward_first_id': fields.many2one('gamification.badge', string="For 1st user"),
'reward_second_id': fields.many2one('gamification.badge', string="For 2nd user"),
'reward_third_id': fields.many2one('gamification.badge', string="For 3rd user"),
'reward_failure': fields.boolean('Reward Bests if not Succeeded?'),
'reward_realtime': fields.boolean('Reward as soon as every goal is reached',
help="With this option enabled, a user can receive a badge only once. The top 3 badges are still rewarded only at the end of the challenge."),
'visibility_mode': fields.selection([
('personal', 'Individual Goals'),
('ranking', 'Leader Board (Group Ranking)'),
],
string="Display Mode", required=True),
'report_message_frequency': fields.selection([
('never', 'Never'),
('onchange', 'On change'),
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('yearly', 'Yearly')
],
string="Report Frequency", required=True),
'report_message_group_id': fields.many2one('mail.group',
string='Send a copy to',
help='Group that will receive a copy of the report in addition to the user'),
'report_template_id': fields.many2one('email.template', string="Report Template", required=True),
'remind_update_delay': fields.integer('Non-updated manual goals will be reminded after',
help="Never reminded if no value or zero is specified."),
'last_report_date': fields.date('Last Report Date'),
'next_report_date': fields.function(_get_next_report_date,
type='date', string='Next Report Date', store=True),
'category': fields.selection(lambda s, *a, **k: s._get_categories(*a, **k),
string="Appears in", help="Define the visibility of the challenge through menus", required=True),
}
_defaults = {
'period': 'once',
'state': 'draft',
'visibility_mode': 'personal',
'report_message_frequency': 'never',
'last_report_date': fields.date.today,
'manager_id': lambda s, cr, uid, c: uid,
'category': 'hr',
'reward_failure': False,
'report_template_id': lambda s, *a, **k: s._get_report_template(*a, **k),
'reward_realtime': True,
}
def create(self, cr, uid, vals, context=None):
"""Overwrite the create method to add the user of groups"""
if vals.get('user_domain'):
user_ids = self._get_challenger_users(cr, uid, vals.get('user_domain'), context=context)
if not vals.get('user_ids'):
vals['user_ids'] = []
vals['user_ids'] += [(4, user_id) for user_id in user_ids]
return super(gamification_challenge, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int,long)):
ids = [ids]
if vals.get('user_domain'):
user_ids = self._get_challenger_users(cr, uid, vals.get('user_domain'), context=context)
if not vals.get('user_ids'):
vals['user_ids'] = []
vals['user_ids'] += [(4, user_id) for user_id in user_ids]
write_res = super(gamification_challenge, self).write(cr, uid, ids, vals, context=context)
if vals.get('report_message_frequency', 'never') != 'never':
# _recompute_challenge_users do not set users for challenges with no reports, subscribing them now
for challenge in self.browse(cr, uid, ids, context=context):
self.message_subscribe(cr, uid, [challenge.id], [user.partner_id.id for user in challenge.user_ids], context=context)
if vals.get('state') == 'inprogress':
self._recompute_challenge_users(cr, uid, ids, context=context)
self._generate_goals_from_challenge(cr, uid, ids, context=context)
elif vals.get('state') == 'done':
self.check_challenge_reward(cr, uid, ids, force=True, context=context)
elif vals.get('state') == 'draft':
# resetting progress
if self.pool.get('gamification.goal').search(cr, uid, [('challenge_id', 'in', ids), ('state', '=', 'inprogress')], context=context):
raise osv.except_osv("Error", "You can not reset a challenge with unfinished goals.")
return write_res
##### Update #####
def _cron_update(self, cr, uid, context=None, ids=False):
"""Daily cron check.
- Start planned challenges (in draft and with start_date = today)
- Create the missing goals (eg: modified the challenge to add lines)
- Update every running challenge
"""
if context is None:
context = {}
# start scheduled challenges
planned_challenge_ids = self.search(cr, uid, [
('state', '=', 'draft'),
('start_date', '<=', fields.date.today())])
if planned_challenge_ids:
self.write(cr, uid, planned_challenge_ids, {'state': 'inprogress'}, context=context)
# close scheduled challenges
planned_challenge_ids = self.search(cr, uid, [
('state', '=', 'inprogress'),
('end_date', '>=', fields.date.today())])
if planned_challenge_ids:
self.write(cr, uid, planned_challenge_ids, {'state': 'done'}, context=context)
if not ids:
ids = self.search(cr, uid, [('state', '=', 'inprogress')], context=context)
# in cron mode, will do intermediate commits
# TODO in trunk: replace by parameter
context = dict(context, commit_gamification=True)
return self._update_all(cr, uid, ids, context=context)
def _update_all(self, cr, uid, ids, context=None):
"""Update the challenges and related goals
:param list(int) ids: the ids of the challenges to update, if False will
update only challenges in progress."""
if not ids:
return True
if isinstance(ids, (int,long)):
ids = [ids]
goal_obj = self.pool.get('gamification.goal')
# include yesterday goals to update the goals that just ended
# exclude goals for users that did not connect since the last update
yesterday = date.today() - timedelta(days=1)
cr.execute("""SELECT gg.id
FROM gamification_goal as gg,
gamification_challenge as gc,
res_users as ru
WHERE gg.challenge_id = gc.id
AND gg.user_id = ru.id
AND gg.write_date < ru.login_date
AND gg.closed IS false
AND gc.id IN %s
AND (gg.state = 'inprogress'
OR (gg.state = 'reached'
AND (gg.end_date >= %s OR gg.end_date IS NULL)))
""", (tuple(ids), yesterday.strftime(DF)))
goal_ids = [res[0] for res in cr.fetchall()]
# update every running goal already generated linked to selected challenges
goal_obj.update(cr, uid, goal_ids, context=context)
self._recompute_challenge_users(cr, uid, ids, context=context)
self._generate_goals_from_challenge(cr, uid, ids, context=context)
for challenge in self.browse(cr, uid, ids, context=context):
if challenge.last_report_date != fields.date.today():
# goals closed but still opened at the last report date
closed_goals_to_report = goal_obj.search(cr, uid, [
('challenge_id', '=', challenge.id),
('start_date', '>=', challenge.last_report_date),
('end_date', '<=', challenge.last_report_date)
])
if challenge.next_report_date and fields.date.today() >= challenge.next_report_date:
self.report_progress(cr, uid, challenge, context=context)
elif len(closed_goals_to_report) > 0:
# some goals need a final report
self.report_progress(cr, uid, challenge, subset_goal_ids=closed_goals_to_report, context=context)
self.check_challenge_reward(cr, uid, ids, context=context)
return True
def quick_update(self, cr, uid, challenge_id, context=None):
"""Update all the goals of a specific challenge, no generation of new goals"""
goal_ids = self.pool.get('gamification.goal').search(cr, uid, [('challenge_id', '=', challenge_id)], context=context)
self.pool.get('gamification.goal').update(cr, uid, goal_ids, context=context)
return True
def _get_challenger_users(self, cr, uid, domain, context=None):
user_domain = eval(ustr(domain))
return self.pool['res.users'].search(cr, uid, user_domain, context=context)
def _recompute_challenge_users(self, cr, uid, challenge_ids, context=None):
"""Recompute the domain to add new users and remove the one no longer matching the domain"""
for challenge in self.browse(cr, uid, challenge_ids, context=context):
if challenge.user_domain:
old_user_ids = [user.id for user in challenge.user_ids]
new_user_ids = self._get_challenger_users(cr, uid, challenge.user_domain, context=context)
to_remove_ids = list(set(old_user_ids) - set(new_user_ids))
to_add_ids = list(set(new_user_ids) - set(old_user_ids))
write_op = [(3, user_id) for user_id in to_remove_ids]
write_op += [(4, user_id) for user_id in to_add_ids]
if write_op:
self.write(cr, uid, [challenge.id], {'user_ids': write_op}, context=context)
return True
def action_start(self, cr, uid, ids, context=None):
"""Start a challenge"""
return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
def action_check(self, cr, uid, ids, context=None):
"""Check a challenge
Create goals that haven't been created yet (eg: if added users)
Recompute the current value for each goal related"""
return self._update_all(cr, uid, ids=ids, context=context)
def action_report_progress(self, cr, uid, ids, context=None):
"""Manual report of a goal, does not influence automatic report frequency"""
if isinstance(ids, (int,long)):
ids = [ids]
for challenge in self.browse(cr, uid, ids, context=context):
self.report_progress(cr, uid, challenge, context=context)
return True
##### Automatic actions #####
def _generate_goals_from_challenge(self, cr, uid, ids, context=None):
"""Generate the goals for each line and user.
If goals already exist for this line and user, the line is skipped. This
can be called after each change in the list of users or lines.
:param list(int) ids: the list of challenge concerned"""
goal_obj = self.pool.get('gamification.goal')
for challenge in self.browse(cr, uid, ids, context=context):
(start_date, end_date) = start_end_date_for_period(challenge.period)
to_update = []
# if no periodicity, use challenge dates
if not start_date and challenge.start_date:
start_date = challenge.start_date
if not end_date and challenge.end_date:
end_date = challenge.end_date
for line in challenge.line_ids:
# there is potentially a lot of users
# detect the ones with no goal linked to this line
date_clause = ""
query_params = [line.id]
if start_date:
date_clause += "AND g.start_date = %s"
query_params.append(start_date)
if end_date:
date_clause += "AND g.end_date = %s"
query_params.append(end_date)
query = """SELECT u.id AS user_id
FROM res_users u
LEFT JOIN gamification_goal g
ON (u.id = g.user_id)
WHERE line_id = %s
{date_clause}
""".format(date_clause=date_clause)
cr.execute(query, query_params)
user_with_goal_ids = cr.dictfetchall()
participant_user_ids = [user.id for user in challenge.user_ids]
user_without_goal_ids = list(set(participant_user_ids) - set([user['user_id'] for user in user_with_goal_ids]))
user_squating_challenge_ids = list(set([user['user_id'] for user in user_with_goal_ids]) - set(participant_user_ids))
if user_squating_challenge_ids:
# users that used to match the challenge
goal_to_remove_ids = goal_obj.search(cr, uid, [('challenge_id', '=', challenge.id), ('user_id', 'in', user_squating_challenge_ids)], context=context)
goal_obj.unlink(cr, uid, goal_to_remove_ids, context=context)
values = {
'definition_id': line.definition_id.id,
'line_id': line.id,
'target_goal': line.target_goal,
'state': 'inprogress',
}
if start_date:
values['start_date'] = start_date
if end_date:
values['end_date'] = end_date
# the goal is initialised over the limit to make sure we will compute it at least once
if line.condition == 'higher':
values['current'] = line.target_goal - 1
else:
values['current'] = line.target_goal + 1
if challenge.remind_update_delay:
values['remind_update_delay'] = challenge.remind_update_delay
for user_id in user_without_goal_ids:
values.update({'user_id': user_id})
goal_id = goal_obj.create(cr, uid, values, context=context)
to_update.append(goal_id)
goal_obj.update(cr, uid, to_update, context=context)
return True
##### JS utilities #####
def _get_serialized_challenge_lines(self, cr, uid, challenge, user_id=False, restrict_goal_ids=False, restrict_top=False, context=None):
"""Return a serialised version of the goals information if the user has not completed every goal
:challenge: browse record of challenge to compute
:user_id: res.users id of the user retrieving progress (False if no distinction, only for ranking challenges)
:restrict_goal_ids: <list(int)> compute only the results for this subset if gamification.goal ids, if False retrieve every goal of current running challenge
:restrict_top: <int> for challenge lines where visibility_mode == 'ranking', retrieve only these bests results and itself, if False retrieve all
restrict_goal_ids has priority over restrict_top
format list
# if visibility_mode == 'ranking'
{
'name': <gamification.goal.description name>,
'description': <gamification.goal.description description>,
'condition': <reach condition {lower,higher}>,
'computation_mode': <target computation {manually,count,sum,python}>,
'monetary': <{True,False}>,
'suffix': <value suffix>,
'action': <{True,False}>,
'display_mode': <{progress,boolean}>,
'target': <challenge line target>,
'own_goal_id': <gamification.goal id where user_id == uid>,
'goals': [
{
'id': <gamification.goal id>,
'rank': <user ranking>,
'user_id': <res.users id>,
'name': <res.users name>,
'state': <gamification.goal state {draft,inprogress,reached,failed,canceled}>,
'completeness': <percentage>,
'current': <current value>,
}
]
},
# if visibility_mode == 'personal'
{
'id': <gamification.goal id>,
'name': <gamification.goal.description name>,
'description': <gamification.goal.description description>,
'condition': <reach condition {lower,higher}>,
'computation_mode': <target computation {manually,count,sum,python}>,
'monetary': <{True,False}>,
'suffix': <value suffix>,
'action': <{True,False}>,
'display_mode': <{progress,boolean}>,
'target': <challenge line target>,
'state': <gamification.goal state {draft,inprogress,reached,failed,canceled}>,
'completeness': <percentage>,
'current': <current value>,
}
"""
goal_obj = self.pool.get('gamification.goal')
(start_date, end_date) = start_end_date_for_period(challenge.period)
res_lines = []
all_reached = True
for line in challenge.line_ids:
line_data = {
'name': line.definition_id.name,
'description': line.definition_id.description,
'condition': line.definition_id.condition,
'computation_mode': line.definition_id.computation_mode,
'monetary': line.definition_id.monetary,
'suffix': line.definition_id.suffix,
'action': True if line.definition_id.action_id else False,
'display_mode': line.definition_id.display_mode,
'target': line.target_goal,
}
domain = [
('line_id', '=', line.id),
('state', '!=', 'draft'),
]
if restrict_goal_ids:
domain.append(('ids', 'in', restrict_goal_ids))
else:
# if no subset goals, use the dates for restriction
if start_date:
domain.append(('start_date', '=', start_date))
if end_date:
domain.append(('end_date', '=', end_date))
if challenge.visibility_mode == 'personal':
if not user_id:
raise osv.except_osv(_('Error!'),_("Retrieving progress for personal challenge without user information"))
domain.append(('user_id', '=', user_id))
sorting = goal_obj._order
limit = 1
else:
line_data.update({
'own_goal_id': False,
'goals': [],
})
sorting = "completeness desc, current desc"
limit = False
goal_ids = goal_obj.search(cr, uid, domain, order=sorting, limit=limit, context=context)
ranking = 0
for goal in goal_obj.browse(cr, uid, goal_ids, context=context):
if challenge.visibility_mode == 'personal':
# limit=1 so only one result
line_data.update({
'id': goal.id,
'current': goal.current,
'completeness': goal.completeness,
'state': goal.state,
})
if goal.state != 'reached':
all_reached = False
else:
ranking += 1
if user_id and goal.user_id.id == user_id:
line_data['own_goal_id'] = goal.id
elif restrict_top and ranking > restrict_top:
# not own goal and too low to be in top
continue
line_data['goals'].append({
'id': goal.id,
'user_id': goal.user_id.id,
'name': goal.user_id.name,
'rank': ranking,
'current': goal.current,
'completeness': goal.completeness,
'state': goal.state,
})
if goal.state != 'reached':
all_reached = False
if goal_ids:
res_lines.append(line_data)
if all_reached:
return []
return res_lines
##### Reporting #####
def report_progress(self, cr, uid, challenge, context=None, users=False, subset_goal_ids=False):
"""Post report about the progress of the goals
:param challenge: the challenge object that need to be reported
:param users: the list(res.users) of users that are concerned by
the report. If False, will send the report to every user concerned
(goal users and group that receive a copy). Only used for challenge with
a visibility mode set to 'personal'.
:param goal_ids: the list(int) of goal ids linked to the challenge for
the report. If not specified, use the goals for the current challenge
period. This parameter can be used to produce report for previous challenge
periods.
:param subset_goal_ids: a list(int) of goal ids to restrict the report
"""
if context is None:
context = {}
temp_obj = self.pool.get('email.template')
ctx = context.copy()
if challenge.visibility_mode == 'ranking':
lines_boards = self._get_serialized_challenge_lines(cr, uid, challenge, user_id=False, restrict_goal_ids=subset_goal_ids, restrict_top=False, context=context)
ctx.update({'challenge_lines': lines_boards})
body_html = temp_obj.render_template(cr, uid, challenge.report_template_id.body_html, 'gamification.challenge', challenge.id, context=ctx)
# send to every follower and participant of the challenge
self.message_post(cr, uid, challenge.id,
body=body_html,
partner_ids=[user.partner_id.id for user in challenge.user_ids],
context=context,
subtype='mail.mt_comment')
if challenge.report_message_group_id:
self.pool.get('mail.group').message_post(cr, uid, challenge.report_message_group_id.id,
body=body_html,
context=context,
subtype='mail.mt_comment')
else:
# generate individual reports
for user in users or challenge.user_ids:
goals = self._get_serialized_challenge_lines(cr, uid, challenge, user.id, restrict_goal_ids=subset_goal_ids, context=context)
if not goals:
continue
ctx.update({'challenge_lines': goals})
body_html = temp_obj.render_template(cr, user.id, challenge.report_template_id.body_html, 'gamification.challenge', challenge.id, context=ctx)
# send message only to users, not on the challenge
self.message_post(cr, uid, 0,
body=body_html,
partner_ids=[(4, user.partner_id.id)],
context=context,
subtype='mail.mt_comment')
if challenge.report_message_group_id:
self.pool.get('mail.group').message_post(cr, uid, challenge.report_message_group_id.id,
body=body_html,
context=context,
subtype='mail.mt_comment')
return self.write(cr, uid, challenge.id, {'last_report_date': fields.date.today()}, context=context)
##### Challenges #####
# TODO in trunk, remove unused parameter user_id
def accept_challenge(self, cr, uid, challenge_ids, context=None, user_id=None):
"""The user accept the suggested challenge"""
return self._accept_challenge(cr, uid, uid, challenge_ids, context=context)
def _accept_challenge(self, cr, uid, user_id, challenge_ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, user_id, context=context)
message = "%s has joined the challenge" % user.name
self.message_post(cr, SUPERUSER_ID, challenge_ids, body=message, context=context)
self.write(cr, SUPERUSER_ID, challenge_ids, {'invited_user_ids': [(3, user_id)], 'user_ids': [(4, user_id)]}, context=context)
return self._generate_goals_from_challenge(cr, SUPERUSER_ID, challenge_ids, context=context)
# TODO in trunk, remove unused parameter user_id
def discard_challenge(self, cr, uid, challenge_ids, context=None, user_id=None):
"""The user discard the suggested challenge"""
return self._discard_challenge(cr, uid, uid, challenge_ids, context=context)
def _discard_challenge(self, cr, uid, user_id, challenge_ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, user_id, context=context)
message = "%s has refused the challenge" % user.name
self.message_post(cr, SUPERUSER_ID, challenge_ids, body=message, context=context)
return self.write(cr, SUPERUSER_ID, challenge_ids, {'invited_user_ids': (3, user_id)}, context=context)
def reply_challenge_wizard(self, cr, uid, challenge_id, context=None):
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'challenge_wizard')
id = result and result[1] or False
result = self.pool.get('ir.actions.act_window').read(cr, uid, [id], context=context)[0]
result['res_id'] = challenge_id
return result
def check_challenge_reward(self, cr, uid, ids, force=False, context=None):
"""Actions for the end of a challenge
If a reward was selected, grant it to the correct users.
Rewards granted at:
- the end date for a challenge with no periodicity
- the end of a period for challenge with periodicity
- when a challenge is manually closed
(if no end date, a running challenge is never rewarded)
"""
if isinstance(ids, (int,long)):
ids = [ids]
commit = context.get('commit_gamification', False)
for challenge in self.browse(cr, uid, ids, context=context):
(start_date, end_date) = start_end_date_for_period(challenge.period, challenge.start_date, challenge.end_date)
yesterday = date.today() - timedelta(days=1)
rewarded_users = []
challenge_ended = end_date == yesterday.strftime(DF) or force
if challenge.reward_id and (challenge_ended or challenge.reward_realtime):
# not using start_date as intemportal goals have a start date but no end_date
reached_goals = self.pool.get('gamification.goal').read_group(cr, uid, [
('challenge_id', '=', challenge.id),
('end_date', '=', end_date),
('state', '=', 'reached')
], fields=['user_id'], groupby=['user_id'], context=context)
for reach_goals_user in reached_goals:
if reach_goals_user['user_id_count'] == len(challenge.line_ids):
# the user has succeeded every assigned goal
user_id = reach_goals_user['user_id'][0]
if challenge.reward_realtime:
badges = self.pool['gamification.badge.user'].search(cr, uid, [
('challenge_id', '=', challenge.id),
('badge_id', '=', challenge.reward_id.id),
('user_id', '=', user_id),
], count=True, context=context)
if badges > 0:
# has already recieved the badge for this challenge
continue
self.reward_user(cr, uid, user_id, challenge.reward_id.id, challenge.id, context=context)
rewarded_users.append(user_id)
if commit:
cr.commit()
if challenge_ended:
# open chatter message
message_body = _("The challenge %s is finished." % challenge.name)
if rewarded_users:
user_names = self.pool['res.users'].name_get(cr, uid, rewarded_users, context=context)
message_body += _("<br/>Reward (badge %s) for every succeeding user was sent to %s." % (challenge.reward_id.name, ", ".join([name for (user_id, name) in user_names])))
else:
message_body += _("<br/>Nobody has succeeded to reach every goal, no badge is rewared for this challenge.")
# reward bests
if challenge.reward_first_id:
(first_user, second_user, third_user) = self.get_top3_users(cr, uid, challenge, context=context)
if first_user:
self.reward_user(cr, uid, first_user.id, challenge.reward_first_id.id, challenge.id, context=context)
message_body += _("<br/>Special rewards were sent to the top competing users. The ranking for this challenge is :")
message_body += "<br/> 1. %s - %s" % (first_user.name, challenge.reward_first_id.name)
else:
message_body += _("Nobody reached the required conditions to receive special badges.")
if second_user and challenge.reward_second_id:
self.reward_user(cr, uid, second_user.id, challenge.reward_second_id.id, challenge.id, context=context)
message_body += "<br/> 2. %s - %s" % (second_user.name, challenge.reward_second_id.name)
if third_user and challenge.reward_third_id:
self.reward_user(cr, uid, third_user.id, challenge.reward_second_id.id, challenge.id, context=context)
message_body += "<br/> 3. %s - %s" % (third_user.name, challenge.reward_third_id.name)
self.message_post(cr, uid, challenge.id,
partner_ids=[user.partner_id.id for user in challenge.user_ids],
body=message_body,
context=context)
if commit:
cr.commit()
return True
def get_top3_users(self, cr, uid, challenge, context=None):
"""Get the top 3 users for a defined challenge
Ranking criterias:
1. succeed every goal of the challenge
2. total completeness of each goal (can be over 100)
Top 3 is computed only for users succeeding every goal of the challenge,
except if reward_failure is True, in which case every user is
considered.
:return: ('first', 'second', 'third'), tuple containing the res.users
objects of the top 3 users. If no user meets the criterias for a rank,
it is set to False. Nobody can receive a rank is noone receives the
higher one (eg: if 'second' == False, 'third' will be False)
"""
goal_obj = self.pool.get('gamification.goal')
(start_date, end_date) = start_end_date_for_period(challenge.period, challenge.start_date, challenge.end_date)
challengers = []
for user in challenge.user_ids:
all_reached = True
total_completness = 0
# every goal of the user for the running period
goal_ids = goal_obj.search(cr, uid, [
('challenge_id', '=', challenge.id),
('user_id', '=', user.id),
('start_date', '=', start_date),
('end_date', '=', end_date)
], context=context)
for goal in goal_obj.browse(cr, uid, goal_ids, context=context):
if goal.state != 'reached':
all_reached = False
if goal.definition_condition == 'higher':
# can be over 100
total_completness += 100.0 * goal.current / goal.target_goal
elif goal.state == 'reached':
# for lower goals, can not get percentage so 0 or 100
total_completness += 100
challengers.append({'user': user, 'all_reached': all_reached, 'total_completness': total_completness})
sorted_challengers = sorted(challengers, key=lambda k: (k['all_reached'], k['total_completness']), reverse=True)
if len(sorted_challengers) == 0 or (not challenge.reward_failure and not sorted_challengers[0]['all_reached']):
# nobody succeeded
return (False, False, False)
if len(sorted_challengers) == 1 or (not challenge.reward_failure and not sorted_challengers[1]['all_reached']):
# only one user succeeded
return (sorted_challengers[0]['user'], False, False)
if len(sorted_challengers) == 2 or (not challenge.reward_failure and not sorted_challengers[2]['all_reached']):
# only one user succeeded
return (sorted_challengers[0]['user'], sorted_challengers[1]['user'], False)
return (sorted_challengers[0]['user'], sorted_challengers[1]['user'], sorted_challengers[2]['user'])
def reward_user(self, cr, uid, user_id, badge_id, challenge_id=False, context=None):
"""Create a badge user and send the badge to him
:param user_id: the user to reward
:param badge_id: the concerned badge
"""
badge_user_obj = self.pool.get('gamification.badge.user')
user_badge_id = badge_user_obj.create(cr, uid, {'user_id': user_id, 'badge_id': badge_id, 'challenge_id':challenge_id}, context=context)
return badge_user_obj._send_badge(cr, uid, [user_badge_id], context=context)
class gamification_challenge_line(osv.Model):
"""Gamification challenge line
Predifined goal for 'gamification_challenge'
These are generic list of goals with only the target goal defined
Should only be created for the gamification_challenge object
"""
_name = 'gamification.challenge.line'
_description = 'Gamification generic goal for challenge'
_order = "sequence, id"
def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None):
goal_definition = self.pool.get('gamification.goal.definition')
if not definition_id:
return {'value': {'definition_id': False}}
goal_definition = goal_definition.browse(cr, uid, definition_id, context=context)
ret = {
'value': {
'condition': goal_definition.condition,
'definition_full_suffix': goal_definition.full_suffix
}
}
return ret
_columns = {
'name': fields.related('definition_id', 'name', string="Name", type="char"),
'challenge_id': fields.many2one('gamification.challenge',
string='Challenge',
required=True,
ondelete="cascade"),
'definition_id': fields.many2one('gamification.goal.definition',
string='Goal Definition',
required=True,
ondelete="cascade"),
'target_goal': fields.float('Target Value to Reach',
required=True),
'sequence': fields.integer('Sequence',
help='Sequence number for ordering'),
'condition': fields.related('definition_id', 'condition', type="selection",
readonly=True, string="Condition", selection=[('lower', '<='), ('higher', '>=')]),
'definition_suffix': fields.related('definition_id', 'suffix', type="char", readonly=True, string="Unit"),
'definition_monetary': fields.related('definition_id', 'monetary', type="boolean", readonly=True, string="Monetary"),
'definition_full_suffix': fields.related('definition_id', 'full_suffix', type="char", readonly=True, string="Suffix"),
}
_default = {
'sequence': 1,
}
|
google-research/tiny-differentiable-simulator | refs/heads/master | python/examples/whole_body_control/static_gait_controller.py | 2 | # Lint as: python3
"""A static gait controller for a quadruped robot. Experimental code."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
import numpy as np
from mpc_controller import foot_stepper
toe_pos_local_ref = np.array([[0.1478, -0.11459, -0.45576],
[0.1478, 0.11688, -0.45576],
[-0.2895, -0.11459, -0.45576],
[-0.2895, 0.11688, -0.45576]])
class StaticGaitController(object):
"""A static gait controller for a quadruped robot."""
def __init__(self, robot):
self._robot = robot
self._toe_ids = tuple(robot.urdf_loader.get_end_effector_id_dict().values())
self._wait_count = 0
self._stepper = foot_stepper.FootStepper(self._robot.pybullet_client,
self._toe_ids, toe_pos_local_ref)
def act(self, observation):
"""Computes actions based on observations."""
del observation
p = self._robot.pybullet_client
quadruped = self._robot.robot_id
step_input = foot_stepper.StepInput()
ls = p.getLinkStates(
quadruped, self._toe_ids, computeForwardKinematics=True)
toe_pos_world = np.array([ls[0][0], ls[1][0], ls[2][0], ls[3][0]])
base_com_pos, base_com_orn = p.getBasePositionAndOrientation(quadruped)
new_pos_world = np.array([0, 0, 0])
if self._stepper.is_com_stable() and not self._stepper.move_swing_foot:
self._wait_count += 1
if self._wait_count == 20:
self._stepper.next_foot()
if self._wait_count > 50:
self._wait_count = 0
step_dist = 0.15
print("time {}, make a step of {}".format(
self._robot.GetTimeSinceReset(), step_dist))
new_pos_local = self._stepper.get_reference_pos_swing_foot()
new_pos_local[0] += step_dist
new_pos_world, _ = p.multiplyTransforms(base_com_pos, base_com_orn,
new_pos_local, [0, 0, 0, 1])
self._stepper.swing_foot()
step_input.new_pos_world = new_pos_world
step_input.base_com_pos = base_com_pos
step_input.base_com_orn = base_com_orn
step_input.toe_pos_world = toe_pos_world
step_input.dt = 1.0 / 250
step_output = self._stepper.update(step_input)
# Finds joint poses to achieve toePosWorld
desired_joint_angles = self._robot.motor_angles_from_foot_positions(
foot_positions=step_output.new_toe_pos_world,
position_in_world_frame=True)[1]
return desired_joint_angles
|
syci/OCB | refs/heads/9.0 | addons/google_calendar/__openerp__.py | 19 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Google Calendar',
'version': '1.0',
'category': 'Tools',
'description': """
The module adds the possibility to synchronize Google Calendar with OpenERP
===========================================================================
""",
'website': 'https://www.odoo.com/page/crm',
'depends': ['google_account', 'calendar'],
'qweb': ['static/src/xml/*.xml'],
'data': [
'res_config_view.xml',
'security/ir.model.access.csv',
'views/google_calendar.xml',
'views/res_users.xml',
'google_calendar.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
|
supermanue/distributedController | refs/heads/master | clusterController/PbsTask.py | 1 | '''
Created on Feb 22, 2013
@author: u5682
'''
from DistributedTask import DistributedTask
import ShellExecution
import os
class PBSTask(object):
'''
classdocs
'''
def __init__(self, distributedTask):
'''
Constructor
'''
self.task = distributedTask
def submit(self):
arguments = ' '.join(self.task.arguments)
currentDir = os.getcwd()
os.chdir(self.task.workingDirectory)
commandToExecute = "/opt/pbs/bin/qsub -l walltime=23:59:00 -v " +\
"working_directory=" + self.task.workingDirectory + \
",executable="+ self.task.executable +\
",arguments=\"'"+arguments+"'\" " + \
currentDir + "/clusterController/clusterControllerScript.sh"
print ("Submitting task to PBS")
output = ShellExecution.execute(commandToExecute)
#print (" " + self.task.workingDirectory + "/" + self.task.executable + " " + arguments)
#print (" " + output)
os.chdir(currentDir)
|
csparpa/robograph | refs/heads/master | robograph/datamodel/nodes/lib/transcoders.py | 1 | import json
import os
from robograph.datamodel.base import node
class ToJSON(node.Node):
"""
This node converts serializable data to JSON
Requirements:
data --> data to be dumped to JSON
Eg:
ToJSON(data=[1,2,3])
ToJSON(dict(a="1",b="2"))
"""
_reqs = ['data']
def output(self):
return json.dumps(self._params['data'])
class ToCSV(node.Node):
"""
This node converts a data matrix to CSV
Requirements:
data_matrix --> iterable of lists (csv rows)
header_list --> header data list (csv header)
delimiter --> separator token for row values
linesep --> newline char
Eg:
ToCSV(data_matrix=[[1,2,3],[4,5,6],[7,8,9]],
header_list=['one','two','three'],
delimiter=',',
linesep='\n')
"""
_reqs = ['data_matrix', 'header_list', 'delimiter', 'linesep']
def output(self):
if self._params['delimiter'] is None:
delim = ','
else:
delim = self._params['delimiter']
if self._params['linesep'] is None:
eol = os.linesep
else:
eol = self._params['linesep']
lines = eol.join([delim.join(map(str, row)) for row in self._params['data_matrix']])
if self._params['header_list']:
header = delim.join(self._params['header_list'])
else:
header = ''
return header + eol + lines
|
Yas3r/OWASP-ZSC | refs/heads/master | lib/encoder/freebsd_x86/xor_random.py | 20 | #!/usr/bin/env python
'''
OWASP ZSC | ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
'''
import random,binascii,string
chars = string.digits + string.ascii_letters
def start(shellcode,job):
if 'chmod(' in job:
shellcode = 'N' + shellcode
if 'dir_create(' in job:
shellcode = 'N' + shellcode
if 'download_execute(' in job:
shellcode = 'N' + shellcode
if 'download(' in job:
shellcode = 'N' + shellcode
if 'exec(' in job:
shellcode = 'N' + shellcode
if 'file_create(' in job:
shellcode = 'N' + shellcode
if 'script_executor(' in job:
shellcode = 'N' + shellcode
if 'system(' in job:
shellcode = 'N' + shellcode
if 'write(' in job:
shellcode = 'N' + shellcode
return shellcode
|
suutari/shoop | refs/heads/master | shuup/core/models/_shops.py | 1 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum, EnumIntegerField
from filer.fields.image import FilerImageField
from jsonfield import JSONField
from parler.models import TranslatedFields
from shuup.core.fields import CurrencyField, InternalIdentifierField
from shuup.core.pricing import TaxfulPrice, TaxlessPrice
from shuup.utils.analog import define_log_model
from ._base import ChangeProtected, TranslatableShuupModel
from ._orders import Order
def _get_default_currency():
return settings.SHUUP_HOME_CURRENCY
class ShopStatus(Enum):
DISABLED = 0
ENABLED = 1
class Labels:
DISABLED = _('disabled')
ENABLED = _('enabled')
@python_2_unicode_compatible
class Shop(ChangeProtected, TranslatableShuupModel):
protected_fields = ["currency", "prices_include_tax"]
change_protect_message = _("The following fields cannot be changed since there are existing orders for this shop")
identifier = InternalIdentifierField(unique=True)
domain = models.CharField(max_length=128, blank=True, null=True, unique=True, verbose_name=_("domain"), help_text=_(
"Your shop domain name. Use this field to configure the URL that is used to visit your site. "
"Note: this requires additional configuration through your internet domain registrar."
))
status = EnumIntegerField(ShopStatus, default=ShopStatus.DISABLED, verbose_name=_("status"), help_text=_(
"Your shop status. Disable your shop if it is no longer in use."
))
owner = models.ForeignKey("Contact", blank=True, null=True, on_delete=models.SET_NULL, verbose_name=_("contact"))
options = JSONField(blank=True, null=True, verbose_name=_("options"))
currency = CurrencyField(default=_get_default_currency, verbose_name=_("currency"), help_text=_(
"The primary shop currency. This is the currency used when selling your products."
))
prices_include_tax = models.BooleanField(default=True, verbose_name=_("prices include tax"), help_text=_(
"This option defines whether product prices entered in admin include taxes. "
"Note this behavior can be overridden with contact group pricing."
))
logo = FilerImageField(verbose_name=_("logo"), blank=True, null=True, on_delete=models.SET_NULL)
maintenance_mode = models.BooleanField(verbose_name=_("maintenance mode"), default=False, help_text=_(
"Check this if you would like to make your shop temporarily unavailable while you do some shop maintenance."
))
contact_address = models.ForeignKey(
"MutableAddress", verbose_name=_("contact address"), blank=True, null=True, on_delete=models.SET_NULL)
translations = TranslatedFields(
name=models.CharField(max_length=64, verbose_name=_("name"), help_text=_(
"The shop name. This name is displayed throughout admin."
)),
public_name=models.CharField(max_length=64, verbose_name=_("public name"), help_text=_(
"The public shop name. This name is displayed in the store front and in any customer email correspondence."
)),
maintenance_message=models.CharField(
max_length=300, blank=True, verbose_name=_("maintenance message"), help_text=_(
"The message to display to customers while your shop is in maintenance mode."
)
)
)
def __str__(self):
return self.safe_translation_getter("name", default="Shop %d" % self.pk)
def create_price(self, value):
"""
Create a price with given value and settings of this shop.
Takes the ``prices_include_tax`` and ``currency`` settings of
this Shop into account.
:type value: decimal.Decimal|int|str
:rtype: shuup.core.pricing.Price
"""
if self.prices_include_tax:
return TaxfulPrice(value, self.currency)
else:
return TaxlessPrice(value, self.currency)
def _are_changes_protected(self):
return Order.objects.filter(shop=self).exists()
ShopLogEntry = define_log_model(Shop)
|
timvandermeij/drone-tomography | refs/heads/master | tests/zigbee_rf_sensor.py | 3 | # Core imports
import Queue
import thread
import time
# Library imports
from mock import patch, MagicMock, PropertyMock
# Package imports
from ..core.Thread_Manager import Thread_Manager
from ..core.Threadable import Threadable
from ..settings.Arguments import Arguments
from ..reconstruction.Buffer import Buffer
from ..zigbee.Packet import Packet
from ..zigbee.RF_Sensor import RF_Sensor, DisabledException
from ..zigbee.TDMA_Scheduler import TDMA_Scheduler
from settings import SettingsTestCase
from zigbee_packet import ZigBeePacketTestCase
class ZigBeeRFSensorTestCase(SettingsTestCase, ZigBeePacketTestCase):
"""
Test case base class that provides the necessities to create one of the
`RF_Sensor` types of objects.
"""
def setUp(self):
super(ZigBeeRFSensorTestCase, self).setUp()
self.arguments = Arguments("settings.json", ["--rf-sensor-id", "1"])
self.thread_manager = Thread_Manager()
self.location_callback = MagicMock(return_value=((0, 0), 0))
self.receive_callback = MagicMock()
self.valid_callback = MagicMock(return_value=(True, True))
def _create_sensor(self, sensor_type, **kwargs):
"""
Create the RF sensor object. The `sensor_type` is a class that is either
`RF_Sensor` or a subclass thereof. Additional keyword arguments are
passed through to the object initialization.
The resulting `RF_Sensor` object is returned.
"""
return sensor_type(self.arguments, self.thread_manager,
self.location_callback, self.receive_callback,
self.valid_callback, **kwargs)
class TestZigBeeRFSensor(ZigBeeRFSensorTestCase):
def setUp(self):
super(TestZigBeeRFSensor, self).setUp()
self.settings = self.arguments.get_settings("zigbee_base")
type_mock = PropertyMock(return_value="zigbee_base")
with patch.object(RF_Sensor, "type", new_callable=type_mock):
self.rf_sensor = self._create_sensor(RF_Sensor)
def test_initialization(self):
# Providing an uncallable callback raises an exception.
with self.assertRaises(TypeError):
RF_Sensor(self.arguments, self.thread_manager, None, None, None)
# Not providing an `Arguments` object raises an exception.
with self.assertRaises(ValueError):
RF_Sensor(None, self.thread_manager, self.location_callback,
self.receive_callback, self.valid_callback)
# The settings must be loaded when an `Arguments` object is provided.
self.assertEqual(self.rf_sensor._settings, self.settings)
# Common member variables must be initialized.
self.assertEqual(self.rf_sensor._id, self.settings.get("rf_sensor_id"))
self.assertEqual(self.rf_sensor._number_of_sensors,
self.settings.get("number_of_sensors"))
self.assertEqual(self.rf_sensor._address, None)
self.assertEqual(self.rf_sensor._connection, None)
self.assertEqual(self.rf_sensor._buffer, None)
self.assertIsInstance(self.rf_sensor._scheduler, TDMA_Scheduler)
self.assertIsInstance(self.rf_sensor._packets, Queue.Queue)
self.assertEqual(self.rf_sensor._packets.qsize(), 0)
self.assertIsInstance(self.rf_sensor._custom_packets, Queue.Queue)
self.assertEqual(self.rf_sensor._custom_packets.qsize(), 0)
self.assertEqual(self.rf_sensor._joined, False)
self.assertEqual(self.rf_sensor._activated, False)
self.assertEqual(self.rf_sensor._started, False)
self.assertEqual(self.rf_sensor._loop_delay, self.settings.get("loop_delay"))
self.assertTrue(hasattr(self.rf_sensor._location_callback, "__call__"))
self.assertTrue(hasattr(self.rf_sensor._receive_callback, "__call__"))
self.assertTrue(hasattr(self.rf_sensor._valid_callback, "__call__"))
def test_id(self):
# The RF sensor ID must be returned.
self.assertEqual(self.rf_sensor.id, self.rf_sensor._id)
def test_number_of_sensors(self):
# The number of sensors must be returned.
self.assertEqual(self.rf_sensor.number_of_sensors,
self.rf_sensor._number_of_sensors)
def test_buffer(self):
# Providing an invalid buffer raises an exception.
with self.assertRaises(ValueError):
self.rf_sensor.buffer = []
# A valid buffer must be set and returned.
buffer = Buffer(self.settings)
self.rf_sensor.buffer = buffer
self.assertEqual(self.rf_sensor.buffer, buffer)
def test_type(self):
# Verify that the interface requires subclasses to implement
# the `type` property.
with self.assertRaises(NotImplementedError):
dummy = self.rf_sensor.type
def test_identity(self):
# The identity must include the ID, address and network join status.
self.assertEqual(self.rf_sensor.identity, {
"id": self.rf_sensor._id,
"address": self.rf_sensor._address,
"joined": self.rf_sensor._joined
})
def test_activate(self):
with patch.object(RF_Sensor, "_setup") as setup_mock:
with patch.object(thread, "start_new_thread") as start_new_thread_mock:
self.rf_sensor.activate()
# The sensor must be setup and the loop thread must be started.
self.assertTrue(self.rf_sensor._activated)
self.assertEqual(setup_mock.call_count, 1)
self.assertEqual(start_new_thread_mock.call_count, 1)
def test_deactivate(self):
connection_mock = MagicMock()
with patch.object(RF_Sensor, "_setup"):
with patch.object(thread, "start_new_thread"):
self.rf_sensor.activate()
self.rf_sensor._connection = connection_mock
self.rf_sensor.deactivate()
# The connection must be closed and the sensor must be deactivated.
self.assertEqual(self.rf_sensor._activated, False)
self.assertEqual(connection_mock.close.call_count, 1)
self.assertEqual(self.rf_sensor._connection, None)
def test_start(self):
# The sensor must be started for sending RSSI broadcast/ground
# station packets. Make sure that the schedule will try to shift again
# when the measurements start.
self.rf_sensor.start()
self.assertTrue(self.rf_sensor._started)
self.assertEqual(self.rf_sensor._packets.qsize(), 0)
self.assertNotEqual(self.rf_sensor._scheduler.timestamp, 0.0)
def test_stop(self):
# Pretend that we start the RF sensor so that we know that `stop`
# functions.
self.rf_sensor.start()
# The sensor must be stopped for sending custom packets. Make sure that
# the scheduler timestamp is reset, so that it updates correctly in
# case we restart the sensor measurements.
self.rf_sensor.stop()
self.assertEqual(self.rf_sensor._started, False)
self.assertEqual(self.rf_sensor._scheduler.timestamp, 0.0)
def test_enqueue(self):
# Providing a packet that is not a `Packet` object raises an exception.
with self.assertRaises(TypeError):
self.rf_sensor.enqueue({
"foo": "bar"
})
# Providing a private packet raises an exception.
with self.assertRaises(ValueError):
self.packet.set("specification", "rssi_broadcast")
self.rf_sensor.enqueue(self.packet)
# Packets that do not have a destination must be broadcasted.
# We subtract one because we do not send to ourself.
self.packet.set("specification", "waypoint_clear")
self.packet.set("to_id", 2)
self.rf_sensor.enqueue(self.packet)
self.assertEqual(self.rf_sensor._custom_packets.qsize(),
self.rf_sensor.number_of_sensors - 1)
for to_id in xrange(1, self.rf_sensor.number_of_sensors + 1):
if to_id == self.rf_sensor.id:
continue
item = self.rf_sensor._custom_packets.get()
self.assertIsInstance(item["packet"], Packet)
self.assertEqual(item["packet"].get_all(), {
"specification": "waypoint_clear",
"to_id": 2
})
self.assertEqual(item["to"], to_id)
self.assertEqual(self.rf_sensor._custom_packets.qsize(), 0)
# Packets that do contain a destination must be enqueued directly.
self.rf_sensor.enqueue(self.packet, to=2)
self.assertEqual(self.rf_sensor._custom_packets.qsize(), 1)
self.assertEqual(self.rf_sensor._custom_packets.get(), {
"packet": self.packet,
"to": 2
})
self.assertEqual(self.rf_sensor._custom_packets.qsize(), 0)
def test_discover(self):
# Providing an invalid callback raises an exception.
with self.assertRaisesRegexp(TypeError, "callback is not callable"):
self.rf_sensor.discover(None)
callback_mock = MagicMock()
# Providing invalid required sensors raises an exception.
with self.assertRaisesRegexp(TypeError, "must be a `set`"):
self.rf_sensor.discover(callback_mock, required_sensors=2)
# Providing an set of required sensors that cannot be discovered raises
# an exception.
with self.assertRaisesRegexp(ValueError, "only contain vehicle sensors"):
self.rf_sensor.discover(callback_mock, required_sensors=set([0]))
def test_setup(self):
# Verify that the interface requires subclasses to implement
# the `_setup` method.
with self.assertRaises(NotImplementedError):
self.rf_sensor._setup()
def test_loop(self):
self.rf_sensor._activated = True
with patch.object(Threadable, "interrupt") as interrupt_mock:
with patch.object(RF_Sensor, "_loop_body") as loop_body_mock:
# The loop body and interrupt handler must be called when
# an exception other than a `DisabledException` is raised.
loop_body_mock.configure_mock(side_effect=RuntimeError)
self.rf_sensor._loop()
loop_body_mock.assert_called_once_with()
interrupt_mock.assert_called_once_with()
with patch.object(Threadable, "interrupt") as interrupt_mock:
with patch.object(RF_Sensor, "_loop_body") as loop_body_mock:
# The loop body must be called when a `DisabledException` is
# raised, but nothing else must happen.
loop_body_mock.configure_mock(side_effect=DisabledException)
self.rf_sensor._loop()
loop_body_mock.assert_called_once_with()
interrupt_mock.assert_not_called()
def test_loop_body(self):
with patch.object(RF_Sensor, "_send_custom_packets") as send_custom_packets_mock:
# Send custom packets when the sensor has been activated,
# but not started.
self.rf_sensor._loop_body()
send_custom_packets_mock.assert_called_once_with()
# If the current time is inside an allocated slot, then packets
# may be sent.
in_slot_mock = PropertyMock(return_value=True)
with patch.object(TDMA_Scheduler, "in_slot", new_callable=in_slot_mock):
with patch.object(TDMA_Scheduler, "update") as update_mock:
with patch.object(RF_Sensor, "_send") as send_mock:
self.rf_sensor._started = True
# Send RSSI broadcast/ground station packets when the sensor
# has been activated and started.
self.rf_sensor._loop_body()
update_mock.assert_called_once_with()
send_mock.assert_called_once_with()
# If the current time is not inside an allocated slot, then no
# packets may be sent.
in_slot_mock = PropertyMock(return_value=False)
with patch.object(TDMA_Scheduler, "in_slot", new_callable=in_slot_mock):
with patch.object(TDMA_Scheduler, "update") as update_mock:
with patch.object(RF_Sensor, "_send") as send_mock:
self.rf_sensor._started = True
# Send RSSI broadcast/ground station packets when the sensor
# has been activated and started.
self.rf_sensor._loop_body()
update_mock.assert_not_called()
send_mock.assert_not_called()
@patch.object(RF_Sensor, "_send_tx_frame")
def test_send(self, send_tx_frame_mock):
self.rf_sensor._packets.put(self.rf_sensor._create_rssi_broadcast_packet(2))
# If the current time is inside an allocated slot, then packets
# may be sent.
in_slot_mock = PropertyMock(return_value=True)
with patch.object(TDMA_Scheduler, "in_slot", new_callable=in_slot_mock):
self.rf_sensor._send()
calls = send_tx_frame_mock.call_args_list
# RSSI broadcast packets must be sent to all sensors in the network
# (excluding ourself). Note that we do not inspect the packet contents
# other than the specification because that is covered in the test
# for the `_create_rssi_broadcast_packet` method.
for to_id in xrange(1, self.rf_sensor.number_of_sensors + 1):
if to_id == self.rf_sensor.id:
continue
packet, to = calls.pop(0)[0]
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "rssi_broadcast")
self.assertEqual(to, to_id)
# RSSI ground station packets must be sent to the ground station.
# The packet list must be empty afterwards. We added one packet to the
# list at the start of this test, so we must detect it here.
packet, to = calls.pop(0)[0]
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "rssi_broadcast")
self.assertEqual(to, 0)
self.assertEqual(self.rf_sensor._packets.qsize(), 0)
send_tx_frame_mock.reset_mock()
# If the current time is not inside an allocated slot, then no
# packets may be sent.
in_slot_mock = PropertyMock(return_value=False)
with patch.object(TDMA_Scheduler, "in_slot", new_callable=in_slot_mock):
self.rf_sensor._send()
send_tx_frame_mock.assert_not_called()
def test_send_custom_packets(self):
self.packet.set("specification", "waypoint_clear")
self.packet.set("to_id", 2)
self.rf_sensor.enqueue(self.packet, to=2)
with patch.object(RF_Sensor, "_send_tx_frame") as send_tx_frame_mock:
self.rf_sensor._send_custom_packets()
# Custom packets must be sent to their destinations.
packet, to = send_tx_frame_mock.call_args[0]
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "waypoint_clear")
self.assertEqual(packet.get("to_id"), 2)
self.assertEqual(to, 2)
self.assertEqual(self.rf_sensor._custom_packets.qsize(), 0)
def test_send_tx_frame(self):
# Having a closed connection raises an exception.
with self.assertRaises(DisabledException):
self.rf_sensor._send_tx_frame(self.packet, to=2)
with patch.object(self.rf_sensor, "_connection"):
# Providing an invalid packet raises an exception.
with self.assertRaises(TypeError):
self.rf_sensor._send_tx_frame(None, to=2)
# Providing an invalid destination raises an exception.
with self.assertRaises(TypeError):
self.rf_sensor._send_tx_frame(self.packet)
def test_receive(self):
# Verify that the interface requires subclasses to implement
# the `_receive` method.
with self.assertRaises(NotImplementedError):
self.rf_sensor._receive(packet=self.packet)
def test_create_rssi_broadcast_packet(self):
packet = self.rf_sensor._create_rssi_broadcast_packet(2)
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "rssi_broadcast")
self.assertEqual(packet.get("latitude"), 0)
self.assertEqual(packet.get("longitude"), 0)
self.assertTrue(packet.get("valid"))
self.assertEqual(packet.get("waypoint_index"), 0)
self.assertEqual(packet.get("sensor_id"), self.rf_sensor.id)
self.assertAlmostEqual(packet.get("timestamp"), time.time(), delta=0.1)
def test_create_rssi_ground_station_packet(self):
rssi_broadcast_packet = self.rf_sensor._create_rssi_broadcast_packet(2)
packet = self.rf_sensor._create_rssi_ground_station_packet(rssi_broadcast_packet)
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "rssi_ground_station")
self.assertEqual(packet.get("sensor_id"), self.rf_sensor.id)
self.assertEqual(packet.get("from_latitude"), rssi_broadcast_packet.get("latitude"))
self.assertEqual(packet.get("from_longitude"), rssi_broadcast_packet.get("longitude"))
self.assertEqual(packet.get("from_valid"), rssi_broadcast_packet.get("valid"))
self.assertEqual(packet.get("to_latitude"), 0)
self.assertEqual(packet.get("to_longitude"), 0)
self.assertTrue(packet.get("to_valid"))
|
alekz112/statsmodels | refs/heads/master | statsmodels/tsa/arima_model.py | 7 | # Note: The information criteria add 1 to the number of parameters
# whenever the model has an AR or MA term since, in principle,
# the variance could be treated as a free parameter and restricted
# This code does not allow this, but it adds consistency with other
# packages such as gretl and X12-ARIMA
from __future__ import absolute_import
from statsmodels.compat.python import string_types, range
# for 2to3 with extensions
from datetime import datetime
import numpy as np
from scipy import optimize
from scipy.stats import t, norm
from scipy.signal import lfilter
from numpy import dot, log, zeros, pi
from numpy.linalg import inv
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
from statsmodels.regression.linear_model import yule_walker, GLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams,
_ma_transparams, _ma_invtransparams,
unintegrate, unintegrate_levels)
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tools.numdiff import approx_hess_cs, approx_fprime_cs
from statsmodels.tsa.base.datetools import _index_date
from statsmodels.tsa.kalmanf import KalmanFilter
_armax_notes = """
Notes
-----
If exogenous variables are given, then the model that is fit is
.. math::
\\phi(L)(y_t - X_t\\beta) = \\theta(L)\epsilon_t
where :math:`\\phi` and :math:`\\theta` are polynomials in the lag
operator, :math:`L`. This is the regression model with ARMA errors,
or ARMAX model. This specification is used, whether or not the model
is fit using conditional sum of square or maximum-likelihood, using
the `method` argument in
:meth:`statsmodels.tsa.arima_model.%(Model)s.fit`. Therefore, for
now, `css` and `mle` refer to estimation methods only. This may
change for the case of the `css` model in future versions.
"""
_arma_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional arry of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_arma_model = "Autoregressive Moving Average ARMA(p,q) Model"
_arima_model = "Autoregressive Integrated Moving Average ARIMA(p,d,q) Model"
_arima_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional arry of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_predict_notes = """
Notes
-----
Use the results predict method instead.
"""
_results_notes = """
Notes
-----
It is recommended to use dates with the time-series models, as the
below will probably make clear. However, if ARIMA is used without
dates and/or `start` and `end` are given as indices, then these
indices are in terms of the *original*, undifferenced series. Ie.,
given some undifferenced observations::
1970Q1, 1
1970Q2, 1.5
1970Q3, 1.25
1970Q4, 2.25
1971Q1, 1.2
1971Q2, 4.1
1970Q1 is observation 0 in the original series. However, if we fit an
ARIMA(p,1,q) model then we lose this first observation through
differencing. Therefore, the first observation we can forecast (if
using exact MLE) is index 1. In the differenced series this is index
0, but we refer to it as 1 from the original series.
"""
_predict = """
%(Model)s model in-sample and out-of-sample prediction
Parameters
----------
%(params)s
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction.
exog : array-like, optional
If the model is an ARMAX and out-of-sample forecasting is
requested, exog must be given. Note that you'll need to pass
`k_ar` additional lags for any exogenous variables. E.g., if you
fit an ARMAX(2, q) model and want to predict 5 steps, you need 7
observations to do this.
dynamic : bool, optional
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
%(extra_params)s
Returns
-------
%(returns)s
%(extra_section)s
"""
_predict_returns = """predict : array
The predicted values.
"""
_arma_predict = _predict % {"Model" : "ARMA",
"params" : """
params : array-like
The fitted parameters of the model.""",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _predict_notes}
_arma_results_predict = _predict % {"Model" : "ARMA", "params" : "",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_predict = _predict % {"Model" : "ARIMA",
"params" : """params : array-like
The fitted parameters of the model.""",
"extra_params" : """typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""", "returns" : _predict_returns,
"extra_section" : _predict_notes}
_arima_results_predict = _predict % {"Model" : "ARIMA",
"params" : "",
"extra_params" :
"""typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_plot_predict_example = """ Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import pandas as pd
>>>
>>> dta = sm.datasets.sunspots.load_pandas().data[['SUNACTIVITY']]
>>> dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')
>>> res = sm.tsa.ARMA(dta, (3, 0)).fit()
>>> fig, ax = plt.subplots()
>>> ax = dta.ix['1950':].plot(ax=ax)
>>> fig = res.plot_predict('1990', '2012', dynamic=True, ax=ax,
... plot_insample=False)
>>> plt.show()
.. plot:: plots/arma_predict_plot.py
"""
_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' + _results_notes)
}
_arima_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' +
'\n'.join(_results_notes.split('\n')[:3]) +
("""
This is hard-coded to only allow plotting of the forecasts in levels.
""") +
'\n'.join(_results_notes.split('\n')[3:]))
}
def cumsum_n(x, n):
if n:
n -= 1
x = np.cumsum(x)
return cumsum_n(x, n)
else:
return x
def _check_arima_start(start, k_ar, k_diff, method, dynamic):
if start < 0:
raise ValueError("The start index %d of the original series "
"has been differenced away" % start)
elif (dynamic or 'mle' not in method) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _get_predict_out_of_sample(endog, p, q, k_trend, k_exog, start, errors,
trendparam, exparams, arparams, maparams, steps,
method, exog=None):
"""
Returns endog, resid, mu of appropriate length for out of sample
prediction.
"""
if q:
resid = np.zeros(q)
if start and 'mle' in method or (start == p and not start == 0):
resid[:q] = errors[start-q:start]
elif start:
resid[:q] = errors[start-q-p:start-p]
else:
resid[:q] = errors[-q:]
else:
resid = None
y = endog
if k_trend == 1:
# use expectation not constant
if k_exog > 0:
#TODO: technically should only hold for MLE not
# conditional model. See #274.
# ensure 2-d for conformability
if np.ndim(exog) == 1 and k_exog == 1:
# have a 1d series of observations -> 2d
exog = exog[:, None]
elif np.ndim(exog) == 1:
# should have a 1d row of exog -> 2d
if len(exog) != k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
X = lagmat(np.dot(exog, exparams), p, original='in', trim='both')
mu = trendparam * (1 - arparams.sum())
# arparams were reversed in unpack for ease later
mu = mu + (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = trendparam * (1 - arparams.sum())
mu = np.array([mu]*steps)
elif k_exog > 0:
X = np.dot(exog, exparams)
#NOTE: you shouldn't have to give in-sample exog!
X = lagmat(X, p, original='in', trim='both')
mu = (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = np.zeros(steps)
endog = np.zeros(p + steps - 1)
if p and start:
endog[:p] = y[start-p:start]
elif p:
endog[:p] = y[-p:]
return endog, resid, mu
def _arma_predict_out_of_sample(params, steps, errors, p, q, k_trend, k_exog,
endog, exog=None, start=0, method='mle'):
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q), k_trend,
k_exog, reverse=True)
endog, resid, mu = _get_predict_out_of_sample(endog, p, q, k_trend, k_exog,
start, errors, trendparam,
exparams, arparams,
maparams, steps, method,
exog)
forecast = np.zeros(steps)
if steps == 1:
if q:
return mu[0] + np.dot(arparams, endog[:p]) + np.dot(maparams,
resid[:q])
else:
return mu[0] + np.dot(arparams, endog[:p])
if q:
i = 0 # if q == 1
else:
i = -1
for i in range(min(q, steps - 1)):
fcast = (mu[i] + np.dot(arparams, endog[i:i + p]) +
np.dot(maparams[:q - i], resid[i:i + q]))
forecast[i] = fcast
endog[i+p] = fcast
for i in range(i + 1, steps - 1):
fcast = mu[i] + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i+p] = fcast
#need to do one more without updating endog
forecast[-1] = mu[-1] + np.dot(arparams, endog[steps - 1:])
return forecast
def _arma_predict_in_sample(start, end, endog, resid, k_ar, method):
"""
Pre- and in-sample fitting for ARMA.
"""
if 'mle' in method:
fittedvalues = endog - resid # get them all then trim
else:
fittedvalues = endog[k_ar:] - resid
fv_start = start
if 'mle' not in method:
fv_start -= k_ar # start is in terms of endog index
fv_end = min(len(fittedvalues), end + 1)
return fittedvalues[fv_start:fv_end]
def _validate(start, k_ar, k_diff, dates, method):
if isinstance(start, (string_types, datetime)):
start = _index_date(start, dates)
start -= k_diff
if 'mle' not in method and start < k_ar - k_diff:
raise ValueError("Start must be >= k_ar for conditional "
"MLE or dynamic forecast. Got %s" % start)
return start
def _unpack_params(params, order, k_trend, k_exog, reverse=False):
p, q = order
k = k_trend + k_exog
maparams = params[k+p:]
arparams = params[k:k+p]
trend = params[:k_trend]
exparams = params[k_trend:k]
if reverse:
return trend, exparams, arparams[::-1], maparams[::-1]
return trend, exparams, arparams, maparams
def _unpack_order(order):
k_ar, k_ma, k = order
k_lags = max(k_ar, k_ma+1)
return k_ar, k_ma, order, k_lags
def _make_arma_names(data, k_trend, order, exog_names):
k_ar, k_ma = order
exog_names = exog_names or []
ar_lag_names = util.make_lag_names([data.ynames], k_ar, 0)
ar_lag_names = [''.join(('ar.', i)) for i in ar_lag_names]
ma_lag_names = util.make_lag_names([data.ynames], k_ma, 0)
ma_lag_names = [''.join(('ma.', i)) for i in ma_lag_names]
trend_name = util.make_lag_names('', 0, k_trend)
exog_names = trend_name + exog_names + ar_lag_names + ma_lag_names
return exog_names
def _make_arma_exog(endog, exog, trend):
k_trend = 1 # overwritten if no constant
if exog is None and trend == 'c': # constant only
exog = np.ones((len(endog), 1))
elif exog is not None and trend == 'c': # constant plus exogenous
exog = add_trend(exog, trend='c', prepend=True)
elif exog is not None and trend == 'nc':
# make sure it's not holding constant from last run
if exog.var() == 0:
exog = None
k_trend = 0
if trend == 'nc':
k_trend = 0
return k_trend, exog
def _check_estimable(nobs, n_params):
if nobs <= n_params:
raise ValueError("Insufficient degrees of freedom to estimate")
class ARMA(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : _arma_model,
"params" : _arma_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARMA"}}
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
super(ARMA, self).__init__(endog, exog, dates, freq, missing=missing)
exog = self.data.exog # get it after it's gone through processing
_check_estimable(len(self.endog), sum(order))
self.k_ar = k_ar = order[0]
self.k_ma = k_ma = order[1]
self.k_lags = max(k_ar, k_ma+1)
if exog is not None:
if exog.ndim == 1:
exog = exog[:, None]
k_exog = exog.shape[1] # number of exog. variables excl. const
else:
k_exog = 0
self.k_exog = k_exog
def _fit_start_params_hr(self, order):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength selected according
to best BIC. Obtain the residuals. Then fit an ARMA(p,q) model via
OLS using these residuals for a first approximation. Uses a separate
OLS regression to find the coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
"""
p, q, k = order
start_params = zeros((p+q+k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
# make sure we don't run into small data problems in AR fit
nobs = len(endog)
maxlag = int(round(12*(nobs/100.)**(1/4.)))
if maxlag >= nobs:
maxlag = nobs - 1
armod = AR(endog).fit(ic='bic', trend='nc', maxlag=maxlag)
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
# it's possible in small samples that optimal lag-order
# doesn't leave enough obs. No consistent way to fix.
if p_tmp + q >= len(endog):
raise ValueError("Proper starting parameters cannot"
" be found for this order with this "
"number of observations. Use the "
"start_params argument.")
resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
trim='both'),
arcoefs_tmp)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, 'both')[endog_start:]
lag_resid = lagmat(resid, q, 'both')[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp + q, p):], X).fit().params
start_params[k:k+p+q] = coefs
else:
start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
if q == 0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k:k+p] = arcoefs
# check AR coefficients
if p and not np.all(np.abs(np.roots(np.r_[1, -start_params[k:k + p]]
)) < 1):
raise ValueError("The computed initial AR coefficients are not "
"stationary\nYou should induce stationarity, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
elif q and not np.all(np.abs(np.roots(np.r_[1, start_params[k + p:]]
)) < 1):
raise ValueError("The computed initial MA coefficients are not "
"invertible\nYou should induce invertibility, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
return start_params
def _fit_start_params(self, order, method):
if method != 'css-mle': # use Hannan-Rissanen to get start params
start_params = self._fit_start_params_hr(order)
else: # use CSS to get start params
func = lambda params: -self.loglike_css(params)
#start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
start_params = self._fit_start_params_hr(order)
if self.transparams:
start_params = self._invtransparams(start_params)
bounds = [(None,)*2]*sum(order)
mlefit = optimize.fmin_l_bfgs_b(func, start_params,
approx_grad=True, m=12,
pgtol=1e-7, factr=1e3,
bounds=bounds, iprint=-1)
start_params = self._transparams(mlefit[0])
return start_params
def score(self, params):
"""
Compute the score function at params.
Notes
-----
This is a numerical approximation.
"""
return approx_fprime_cs(params, self.loglike, args=(False,))
def hessian(self, params):
"""
Compute the Hessian at params,
Notes
-----
This is a numerical approximation.
"""
return approx_hess_cs(params, self.loglike, args=(False,))
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = np.zeros_like(params)
# just copy exogenous parameters
if k != 0:
newparams[:k] = params[:k]
# AR Coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_transparams(params[k:k+k_ar].copy())
# MA Coeffs
if k_ma != 0:
newparams[k+k_ar:] = _ma_transparams(params[k+k_ar:].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = start_params.copy()
arcoefs = newparams[k:k+k_ar]
macoefs = newparams[k+k_ar:]
# AR coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_invtransparams(arcoefs)
# MA coeffs
if k_ma != 0:
newparams[k+k_ar:k+k_ar+k_ma] = _ma_invtransparams(macoefs)
return newparams
def _get_predict_start(self, start, dynamic):
# do some defaults
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
k_diff = getattr(self, 'k_diff', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
self._set_predict_start_date(start) # else it's done in super
elif isinstance(start, int):
start = super(ARMA, self)._get_predict_start(start)
else: # should be on a date
#elif 'mle' not in method or dynamic: # should be on a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARMA, self)._get_predict_start(start)
_check_arima_start(start, k_ar, k_diff, method, dynamic)
return start
def _get_predict_end(self, end, dynamic=False):
# pass through so predict works for ARIMA and ARMA
return super(ARMA, self)._get_predict_end(end)
def geterrors(self, params):
"""
Get the errors of the ARMA process.
Parameters
----------
params : array-like
The fitted ARMA parameters
order : array-like
3 item iterable, with the number of AR, MA, and exogenous
parameters, including the trend
"""
#start = self._get_predict_start(start) # will be an index of a date
#end, out_of_sample = self._get_predict_end(end)
params = np.asarray(params)
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
method = getattr(self, 'method', 'mle')
if 'mle' in method: # use KalmanFilter to get errors
(y, k, nobs, k_ar, k_ma, k_lags, newparams, Z_mat, m, R_mat,
T_mat, paramsdtype) = KalmanFilter._init_kalman_state(params,
self)
errors = KalmanFilter.geterrors(y, k, k_ar, k_ma, k_lags, nobs,
Z_mat, m, R_mat, T_mat,
paramsdtype)
if isinstance(errors, tuple):
errors = errors[0] # non-cython version returns a tuple
else: # use scipy.signal.lfilter
y = self.endog.copy()
k = self.k_exog + self.k_trend
if k > 0:
y -= dot(self.exog, params[:k])
k_ar = self.k_ar
k_ma = self.k_ma
(trendparams, exparams,
arparams, maparams) = _unpack_params(params, (k_ar, k_ma),
self.k_trend, self.k_exog,
reverse=False)
b, a = np.r_[1, -arparams], np.r_[1, maparams]
zi = zeros((max(k_ar, k_ma)))
for i in range(k_ar):
zi[i] = sum(-b[:i+1][::-1]*y[:i+1])
e = lfilter(b, a, y, zi=zi)
errors = e[0][k_ar:]
return errors.squeeze()
def predict(self, params, start=None, end=None, exog=None, dynamic=False):
method = getattr(self, 'method', 'mle') # don't assume fit
#params = np.asarray(params)
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end, dynamic)
if out_of_sample and (exog is None and self.k_exog > 0):
raise ValueError("You must provide exog for ARMAX")
endog = self.endog
resid = self.geterrors(params)
k_ar = self.k_ar
if out_of_sample != 0 and self.k_exog > 0:
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
# we need the last k_ar exog for the lag-polynomial
if self.k_exog > 0 and k_ar > 0:
# need the last k_ar exog for the lag-polynomial
exog = np.vstack((self.exog[-k_ar:, self.k_trend:], exog))
if dynamic:
#TODO: now that predict does dynamic in-sample it should
# also return error estimates and confidence intervals
# but how? len(endog) is not tot_obs
out_of_sample += end - start + 1
return _arma_predict_out_of_sample(params, out_of_sample, resid,
k_ar, self.k_ma, self.k_trend,
self.k_exog, endog, exog,
start, method)
predictedvalues = _arma_predict_in_sample(start, end, endog, resid,
k_ar, method)
if out_of_sample:
forecastvalues = _arma_predict_out_of_sample(params, out_of_sample,
resid, k_ar,
self.k_ma,
self.k_trend,
self.k_exog, endog,
exog, method=method)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
predict.__doc__ = _arma_predict
def loglike(self, params, set_sigma2=True):
"""
Compute the log-likelihood for ARMA(p,q) model
Notes
-----
Likelihood used depends on the method set in fit
"""
method = self.method
if method in ['mle', 'css-mle']:
return self.loglike_kalman(params, set_sigma2)
elif method == 'css':
return self.loglike_css(params, set_sigma2)
else:
raise ValueError("Method %s not understood" % method)
def loglike_kalman(self, params, set_sigma2=True):
"""
Compute exact loglikelihood for ARMA(p,q) model by the Kalman Filter.
"""
return KalmanFilter.loglike(params, self, set_sigma2)
def loglike_css(self, params, set_sigma2=True):
"""
Conditional Sum of Squares likelihood function.
"""
k_ar = self.k_ar
k_ma = self.k_ma
k = self.k_exog + self.k_trend
y = self.endog.copy().astype(params.dtype)
nobs = self.nobs
# how to handle if empty?
if self.transparams:
newparams = self._transparams(params)
else:
newparams = params
if k > 0:
y -= dot(self.exog, newparams[:k])
# the order of p determines how many zeros errors to set for lfilter
b, a = np.r_[1, -newparams[k:k + k_ar]], np.r_[1, newparams[k + k_ar:]]
zi = np.zeros((max(k_ar, k_ma)), dtype=params.dtype)
for i in range(k_ar):
zi[i] = sum(-b[:i + 1][::-1] * y[:i + 1])
errors = lfilter(b, a, y, zi=zi)[0][k_ar:]
ssr = np.dot(errors, errors)
sigma2 = ssr/nobs
if set_sigma2:
self.sigma2 = sigma2
llf = -nobs/2.*(log(2*pi) + log(sigma2)) - ssr/(2*sigma2)
return llf
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
statsmodels.tsa.arima_model.ARMAResults class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
k_ar = self.k_ar
k_ma = self.k_ma
# enforce invertibility
self.transparams = transparams
endog, exog = self.endog, self.exog
k_exog = self.k_exog
self.nobs = len(endog) # this is overwritten if method is 'css'
# (re)set trend and handle exogenous variables
# always pass original exog
k_trend, exog = _make_arma_exog(endog, self.exog, trend)
# Check has something to estimate
if k_ar == 0 and k_ma == 0 and k_trend == 0 and k_exog == 0:
raise ValueError("Estimation requires the inclusion of least one "
"AR term, MA term, a constant or an exogenous "
"variable.")
# check again now that we know the trend
_check_estimable(len(endog), k_ar + k_ma + k_exog + k_trend)
self.k_trend = k_trend
self.exog = exog # overwrites original exog from __init__
# (re)set names for this model
self.exog_names = _make_arma_names(self.data, k_trend, (k_ar, k_ma),
self.exog_names)
k = k_trend + k_exog
# choose objective function
if k_ma == 0 and k_ar == 0:
method = "css" # Always CSS when no AR or MA terms
self.method = method = method.lower()
# adjust nobs for css
if method == 'css':
self.nobs = len(self.endog) - k_ar
if start_params is not None:
start_params = np.asarray(start_params)
else: # estimate starting parameters
start_params = self._fit_start_params((k_ar, k_ma, k), method)
if transparams: # transform initial parameters to ensure invertibility
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(ARMA, self).fit(start_params, method=solver,
maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if transparams: # transform parameters back
params = self._transparams(params)
self.transparams = False # so methods don't expect transf.
normalized_cov_params = None # TODO: fix this
armafit = ARMAResults(self, params, normalized_cov_params)
armafit.mle_retvals = mlefit.mle_retvals
armafit.mle_settings = mlefit.mle_settings
armafit.mlefit = mlefit
return ARMAResultsWrapper(armafit)
#NOTE: the length of endog changes when we give a difference to fit
#so model methods are not the same on unfit models as fit ones
#starting to think that order of model should be put in instantiation...
class ARIMA(ARMA):
__doc__ = tsbase._tsa_doc % {"model" : _arima_model,
"params" : _arima_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARIMA"}}
def __new__(cls, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d == 0: # then we just use an ARMA model
return ARMA(endog, (p, q), exog, dates, freq, missing)
else:
mod = super(ARIMA, cls).__new__(cls)
mod.__init__(endog, order, exog, dates, freq, missing)
return mod
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d > 2:
#NOTE: to make more general, need to address the d == 2 stuff
# in the predict method
raise ValueError("d > 2 is not supported")
super(ARIMA, self).__init__(endog, (p, q), exog, dates, freq, missing)
self.k_diff = d
self._first_unintegrate = unintegrate_levels(self.endog[:d], d)
self.endog = np.diff(self.endog, n=d)
#NOTE: will check in ARMA but check again since differenced now
_check_estimable(len(self.endog), p+q)
if exog is not None:
self.exog = self.exog[d:]
if d == 1:
self.data.ynames = 'D.' + self.endog_names
else:
self.data.ynames = 'D{0:d}.'.format(d) + self.endog_names
# what about exog, should we difference it automatically before
# super call?
def _get_predict_start(self, start, dynamic):
"""
"""
#TODO: remove all these getattr and move order specification to
# class constructor
k_diff = getattr(self, 'k_diff', 0)
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
elif isinstance(start, int):
start -= k_diff
try: # catch when given an integer outside of dates index
start = super(ARIMA, self)._get_predict_start(start,
dynamic)
except IndexError:
raise ValueError("start must be in series. "
"got %d" % (start + k_diff))
else: # received a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARIMA, self)._get_predict_start(start, dynamic)
# reset date for k_diff adjustment
self._set_predict_start_date(start + k_diff)
return start
def _get_predict_end(self, end, dynamic=False):
"""
Returns last index to be forecast of the differenced array.
Handling of inclusiveness should be done in the predict function.
"""
end, out_of_sample = super(ARIMA, self)._get_predict_end(end, dynamic)
if 'mle' not in self.method and not dynamic:
end -= self.k_ar
return end - self.k_diff, out_of_sample
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARIMA(p,d,q) model by exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
`statsmodels.tsa.arima.ARIMAResults` class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARIMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
arima_fit = super(ARIMA, self).fit(start_params, trend,
method, transparams, solver,
maxiter, full_output, disp,
callback, **kwargs)
normalized_cov_params = None # TODO: fix this?
arima_fit = ARIMAResults(self, arima_fit._results.params,
normalized_cov_params)
arima_fit.k_diff = self.k_diff
return ARIMAResultsWrapper(arima_fit)
def predict(self, params, start=None, end=None, exog=None, typ='linear',
dynamic=False):
# go ahead and convert to an index for easier checking
if isinstance(start, (string_types, datetime)):
start = _index_date(start, self.data.dates)
if typ == 'linear':
if not dynamic or (start != self.k_ar + self.k_diff and
start is not None):
return super(ARIMA, self).predict(params, start, end, exog,
dynamic)
else:
# need to assume pre-sample residuals are zero
# do this by a hack
q = self.k_ma
self.k_ma = 0
predictedvalues = super(ARIMA, self).predict(params, start,
end, exog,
dynamic)
self.k_ma = q
return predictedvalues
elif typ == 'levels':
endog = self.data.endog
if not dynamic:
predict = super(ARIMA, self).predict(params, start, end,
dynamic)
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
d = self.k_diff
if 'mle' in self.method:
start += d - 1 # for case where d == 2
end += d - 1
# add each predicted diff to lagged endog
if out_of_sample:
fv = predict[:-out_of_sample] + endog[start:end+1]
if d == 2: #TODO: make a general solution to this
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[start:end + 1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
k_ar = self.k_ar
if out_of_sample:
fv = (predict[:-out_of_sample] +
endog[max(start, self.k_ar-1):end+k_ar+1])
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[max(start, k_ar):end+k_ar+1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
#IFF we need to use pre-sample values assume pre-sample
# residuals are zero, do this by a hack
if start == self.k_ar + self.k_diff or start is None:
# do the first k_diff+1 separately
p = self.k_ar
q = self.k_ma
k_exog = self.k_exog
k_trend = self.k_trend
k_diff = self.k_diff
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q),
k_trend,
k_exog,
reverse=True)
# this is the hack
self.k_ma = 0
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
if not start:
start = self._get_predict_start(start, dynamic)
start += k_diff
self.k_ma = q
return endog[start-1] + np.cumsum(predict)
else:
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
return endog[start-1] + np.cumsum(predict)
return fv
else: # pragma : no cover
raise ValueError("typ %s not understood" % typ)
predict.__doc__ = _arima_predict
class ARMAResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an ARMA model.
Parameters
----------
model : ARMA instance
The fitted model instance
params : array
Fitted parameters
normalized_cov_params : array, optional
The normalized variance covariance matrix
scale : float, optional
Optional argument to scale the variance covariance matrix.
Returns
--------
**Attributes**
aic : float
Akaike Information Criterion
:math:`-2*llf+2* df_model`
where `df_model` includes all AR parameters, MA parameters, constant
terms parameters on constant terms and the variance.
arparams : array
The parameters associated with the AR coefficients in the model.
arroots : array
The roots of the AR coefficients are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
bic : float
Bayes Information Criterion
-2*llf + log(nobs)*df_model
Where if the model is fit using conditional sum of squares, the
number of observations `nobs` does not include the `p` pre-sample
observations.
bse : array
The standard errors of the parameters. These are computed using the
numerical Hessian.
df_model : array
The model degrees of freedom = `k_exog` + `k_trend` + `k_ar` + `k_ma`
df_resid : array
The residual degrees of freedom = `nobs` - `df_model`
fittedvalues : array
The predicted values of the model.
hqic : float
Hannan-Quinn Information Criterion
-2*llf + 2*(`df_model`)*log(log(nobs))
Like `bic` if the model is fit using conditional sum of squares then
the `k_ar` pre-sample observations are not counted in `nobs`.
k_ar : int
The number of AR coefficients in the model.
k_exog : int
The number of exogenous variables included in the model. Does not
include the constant.
k_ma : int
The number of MA coefficients.
k_trend : int
This is 0 for no constant or 1 if a constant is included.
llf : float
The value of the log-likelihood function evaluated at `params`.
maparams : array
The value of the moving average coefficients.
maroots : array
The roots of the MA coefficients are the solution to
(1 + maparams[0]*z + maparams[1]*z**2 + ... + maparams[q-1]*z**q) = 0
Stability requires that the roots in modules lie outside the unit
circle.
model : ARMA instance
A reference to the model that was fit.
nobs : float
The number of observations used to fit the model. If the model is fit
using exact maximum likelihood this is equal to the total number of
observations, `n_totobs`. If the model is fit using conditional
maximum likelihood this is equal to `n_totobs` - `k_ar`.
n_totobs : float
The total number of observations for `endog`. This includes all
observations, even pre-sample values if the model is fit using `css`.
params : array
The parameters of the model. The order of variables is the trend
coefficients and the `k_exog` exognous coefficients, then the
`k_ar` AR coefficients, and finally the `k_ma` MA coefficients.
pvalues : array
The p-values associated with the t-values of the coefficients. Note
that the coefficients are assumed to have a Student's T distribution.
resid : array
The model residuals. If the model is fit using 'mle' then the
residuals are created via the Kalman Filter. If the model is fit
using 'css' then the residuals are obtained via `scipy.signal.lfilter`
adjusted such that the first `k_ma` residuals are zero. These zero
residuals are not returned.
scale : float
This is currently set to 1.0 and not used by the model or its results.
sigma2 : float
The variance of the residuals. If the model is fit by 'css',
sigma2 = ssr/nobs, where ssr is the sum of squared residuals. If
the model is fit by 'mle', then sigma2 = 1/nobs * sum(v**2 / F)
where v is the one-step forecast error and F is the forecast error
variance. See `nobs` for the difference in definitions depending on the
fit.
"""
_cache = {}
#TODO: use this for docstring when we fix nobs issue
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARMAResults, self).__init__(model, params, normalized_cov_params,
scale)
self.sigma2 = model.sigma2
nobs = model.nobs
self.nobs = nobs
k_exog = model.k_exog
self.k_exog = k_exog
k_trend = model.k_trend
self.k_trend = k_trend
k_ar = model.k_ar
self.k_ar = k_ar
self.n_totobs = len(model.endog)
k_ma = model.k_ma
self.k_ma = k_ma
df_model = k_exog + k_trend + k_ar + k_ma
self._ic_df_model = df_model + 1
self.df_model = df_model
self.df_resid = self.nobs - df_model
self._cache = resettable_cache()
@cache_readonly
def arroots(self):
return np.roots(np.r_[1, -self.arparams])**-1
@cache_readonly
def maroots(self):
return np.roots(np.r_[1, self.maparams])**-1
@cache_readonly
def arfreq(self):
r"""
Returns the frequency of the AR roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def mafreq(self):
r"""
Returns the frequency of the MA roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def arparams(self):
k = self.k_exog + self.k_trend
return self.params[k:k+self.k_ar]
@cache_readonly
def maparams(self):
k = self.k_exog + self.k_trend
k_ar = self.k_ar
return self.params[k+k_ar:]
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
params = self.params
hess = self.model.hessian(params)
if len(params) == 1: # can't take an inverse, ensure 1d
return np.sqrt(-1./hess[0])
return np.sqrt(np.diag(-inv(hess)))
def cov_params(self): # add scale argument?
params = self.params
hess = self.model.hessian(params)
return -inv(hess)
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * self._ic_df_model
@cache_readonly
def bic(self):
nobs = self.nobs
return -2 * self.llf + np.log(nobs) * self._ic_df_model
@cache_readonly
def hqic(self):
nobs = self.nobs
return -2 * self.llf + 2 * np.log(np.log(nobs)) * self._ic_df_model
@cache_readonly
def fittedvalues(self):
model = self.model
endog = model.endog.copy()
k_ar = self.k_ar
exog = model.exog # this is a copy
if exog is not None:
if model.method == "css" and k_ar > 0:
exog = exog[k_ar:]
if model.method == "css" and k_ar > 0:
endog = endog[k_ar:]
fv = endog - self.resid
# add deterministic part back in
#k = self.k_exog + self.k_trend
#TODO: this needs to be commented out for MLE with constant
#if k != 0:
# fv += dot(exog, self.params[:k])
return fv
@cache_readonly
def resid(self):
return self.model.geterrors(self.params)
@cache_readonly
def pvalues(self):
#TODO: same for conditional and unconditional?
df_resid = self.df_resid
return t.sf(np.abs(self.tvalues), df_resid) * 2
def predict(self, start=None, end=None, exog=None, dynamic=False):
return self.model.predict(self.params, start, end, exog, dynamic)
predict.__doc__ = _arma_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcasterr = np.sqrt(sigma2 * np.cumsum(ma_rep**2))
return fcasterr
def _forecast_conf_int(self, forecast, fcasterr, alpha):
const = norm.ppf(1 - alpha / 2.)
conf_int = np.c_[forecast - const * fcasterr,
forecast + const * fcasterr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
"""
if exog is not None:
#TODO: make a convenience function for this. we're using the
# pattern elsewhere in the codebase
exog = np.asarray(exog)
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
elif exog.ndim == 1:
if len(exog) != self.k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast = _arma_predict_out_of_sample(self.params,
steps, self.resid, self.k_ar,
self.k_ma, self.k_trend,
self.k_exog, self.model.endog,
exog, method=self.model.method)
# compute the standard errors
fcasterr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcasterr, alpha)
return forecast, fcasterr, conf_int
def summary(self, alpha=.05):
"""Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
model = self.model
title = model.__class__.__name__ + ' Model Results'
method = model.method
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += ['- ' + dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
if not k_diff:
order = str((k_ar, k_ma))
else:
order = str((k_ar, k_diff, k_ma))
top_left = [('Dep. Variable:', None),
('Model:', [model.__class__.__name__ + order]),
('Method:', [method]),
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [str(len(self.model.endog))]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('S.D. of innovations', ["%#5.3f" % self.sigma2**.5]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
# Make the roots table
from statsmodels.iolib.table import SimpleTable
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0,0 model
stubs = []
if len(stubs): # not 0, 0
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable(data,
headers=[' Real',
' Imaginary',
' Modulus',
' Frequency'],
title="Roots",
stubs=stubs,
data_fmts=["%17.4f", "%+17.4fj",
"%17.4f", "%17.4f"])
smry.tables.append(roots_table)
return smry
def summary2(self, title=None, alpha=.05, float_format="%.4f"):
"""Experimental summary function for ARIMA Results
Parameters
-----------
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
from pandas import DataFrame
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in self.model.method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += [dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
# Roots table
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0, 0 order
stubs = []
if len(stubs):
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
data = DataFrame(data)
data.columns = ['Real', 'Imaginary', 'Modulus', 'Frequency']
data.index = stubs
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
# Model info
model_info = summary2.summary_model(self)
model_info['Method:'] = self.model.method
model_info['Sample:'] = sample[0]
model_info[' '] = sample[-1]
model_info['S.D. of innovations:'] = "%#5.3f" % self.sigma2**.5
model_info['HQIC:'] = "%#5.3f" % self.hqic
model_info['No. Observations:'] = str(len(self.model.endog))
# Parameters
params = summary2.summary_params(self)
smry.add_dict(model_info)
smry.add_df(params, float_format=float_format)
if len(stubs):
smry.add_df(data, float_format="%17.4f")
smry.add_title(results=self, title=title)
return smry
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=False)
end, out_of_sample = self.model._get_predict_end(end, dynamic=False)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
ax.plot(x[:end + 1 - start], self.model.endog[start:end+1],
label=self.model.endog_names)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _plot_predict
class ARMAResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARMAResultsWrapper, ARMAResults)
class ARIMAResults(ARMAResults):
def predict(self, start=None, end=None, exog=None, typ='linear',
dynamic=False):
return self.model.predict(self.params, start, end, exog, typ, dynamic)
predict.__doc__ = _arima_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcerr = np.sqrt(np.cumsum(cumsum_n(ma_rep, self.k_diff)**2)*sigma2)
return fcerr
def _forecast_conf_int(self, forecast, fcerr, alpha):
const = norm.ppf(1 - alpha/2.)
conf_int = np.c_[forecast - const*fcerr, forecast + const*fcerr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARIMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
Notes
-----
Prediction is done in the levels of the original endogenous variable.
If you would like prediction of differences in levels use `predict`.
"""
if exog is not None:
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast = _arma_predict_out_of_sample(self.params, steps, self.resid,
self.k_ar, self.k_ma,
self.k_trend, self.k_exog,
self.model.endog,
exog, method=self.model.method)
d = self.k_diff
endog = self.model.data.endog[-d:]
forecast = unintegrate(forecast, unintegrate_levels(endog, d))[d:]
# get forecast errors
fcerr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcerr, alpha)
return forecast, fcerr, conf_int
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, 'levels', dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=dynamic)
end, out_of_sample = self.model._get_predict_end(end, dynamic=dynamic)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
import re
k_diff = self.k_diff
label = re.sub("D\d*\.", "", self.model.endog_names)
levels = unintegrate(self.model.endog,
self.model._first_unintegrate)
ax.plot(x[:end + 1 - start],
levels[start + k_diff:end + k_diff + 1], label=label)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _arima_plot_predict
class ARIMAResultsWrapper(ARMAResultsWrapper):
pass
wrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults)
if __name__ == "__main__":
import statsmodels.api as sm
# simulate arma process
from statsmodels.tsa.arima_process import arma_generate_sample
y = arma_generate_sample([1., -.75], [1., .25], nsample=1000)
arma = ARMA(y)
res = arma.fit(trend='nc', order=(1, 1))
np.random.seed(12345)
y_arma22 = arma_generate_sample([1., -.85, .35], [1, .25, -.9],
nsample=1000)
arma22 = ARMA(y_arma22)
res22 = arma22.fit(trend='nc', order=(2, 2))
# test CSS
arma22_css = ARMA(y_arma22)
res22css = arma22_css.fit(trend='nc', order=(2, 2), method='css')
data = sm.datasets.sunspots.load()
ar = ARMA(data.endog)
resar = ar.fit(trend='nc', order=(9, 0))
y_arma31 = arma_generate_sample([1, -.75, -.35, .25], [.1],
nsample=1000)
arma31css = ARMA(y_arma31)
res31css = arma31css.fit(order=(3, 1), method="css", trend="nc",
transparams=True)
y_arma13 = arma_generate_sample([1., -.75], [1, .25, -.5, .8],
nsample=1000)
arma13css = ARMA(y_arma13)
res13css = arma13css.fit(order=(1, 3), method='css', trend='nc')
# check css for p < q and q < p
y_arma41 = arma_generate_sample([1., -.75, .35, .25, -.3], [1, -.35],
nsample=1000)
arma41css = ARMA(y_arma41)
res41css = arma41css.fit(order=(4, 1), trend='nc', method='css')
y_arma14 = arma_generate_sample([1, -.25], [1., -.75, .35, .25, -.3],
nsample=1000)
arma14css = ARMA(y_arma14)
res14css = arma14css.fit(order=(4, 1), trend='nc', method='css')
# ARIMA Model
from statsmodels.datasets import webuse
dta = webuse('wpi1')
wpi = dta['wpi']
mod = ARIMA(wpi, (1, 1, 1)).fit()
|
nonemaw/pynet | refs/heads/master | learnpy_ecourse/class2/ex4_show_version.py | 4 | #!/usr/bin/env python
'''
Disclaimer - This is a solution to the below problem given the content we have
discussed in class. It is not necessarily the best solution to the problem.
In other words, I only use things we have covered up to this point in the class.
Python for Network Engineers
https://pynet.twb-tech.com
Learning Python
Class#2
4. You have the following string from "show version" on a Cisco router:
cisco_ios = "Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version
15.0(1)M4, RELEASE SOFTWARE (fc1)"
Note, the string is a single line; there is no newline in the string.
How would you process this string to retrieve only the IOS version?
ios_version = "15.0(1)M4"
Try to make it generic (i.e. assume that the IOS version can change).
You can assume that the commas divide this string into four sections and that
the string will always have 'Cisco IOS Software', 'Version', and 'RELEASE
SOFTWARE' in it.
'''
cisco_ios = "Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), \
Version 15.0(1)M4, RELEASE SOFTWARE (fc1)"
ios_version = cisco_ios.split(",")[2]
ios_version = ios_version.split("Version ")[1]
print ios_version
|
juanifioren/django-oidc-provider | refs/heads/master | oidc_provider/tests/app/urls.py | 2 | from django.contrib.auth import views as auth_views
try:
from django.urls import include, url
except ImportError:
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^accounts/login/$',
auth_views.LoginView.as_view(template_name='accounts/login.html'), name='login'),
url(r'^accounts/logout/$',
auth_views.LogoutView.as_view(template_name='accounts/logout.html'), name='logout'),
url(r'^openid/', include('oidc_provider.urls', namespace='oidc_provider')),
url(r'^admin/', admin.site.urls),
]
|
blehman/Data-Science-45min-Intros | refs/heads/master | python-oop-101/life/__init__.py | 30 | __all__ = [
"beast"
, "human"
]
|
rhyolight/soda-tap | refs/heads/master | server.py | 1 | import os
import json
import urlparse
import web
import redis
from sodatap import createCatalog, Resource
ITEMS_PER_PAGE = 10
GOOGLE_MAPS_API_KEY = os.environ["GOOGLE_MAPS_API_KEY"]
REDIS_URL = os.environ["REDIS_URL"]
REDIS_DB = 1
POOL = None
urls = (
"/", "index",
"/catalog", "catalog",
"/catalog/(.+)", "catalog",
"/resource/(.+)", "resource",
"/list", "list",
)
app = web.application(urls, globals())
render = web.template.render('templates/')
redisUrl = urlparse.urlparse(REDIS_URL)
cat = createCatalog()
# def createConnectionPool():
# redisUrl = urlparse.urlparse(REDIS_URL)
# print redisUrl.hostname
# print redisUrl.port
# return redis.ConnectionPool(
# host=redisUrl.hostname, port=redisUrl.port,
# db=REDIS_DB, password=redisUrl.password
# )
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
#################
# HTTP handlers #
#################
class index:
def GET(self):
r = redis.Redis(
host=redisUrl.hostname, port=redisUrl.port,
db=REDIS_DB, password=redisUrl.password
)
totalSodaResources = cat.getTotalSodaResourceCount()
totalTemporalResources = len(r.keys("*"))
return render.layout(
render.index(totalSodaResources, totalTemporalResources),
GOOGLE_MAPS_API_KEY
)
class catalog:
def GET(self, page=0):
# r = redis.Redis(connection_pool=POOL)
r = redis.Redis(
host=redisUrl.hostname, port=redisUrl.port,
db=REDIS_DB, password=redisUrl.password
)
query = web.input()
streamType = "*"
if "type" in query:
streamType = query["type"]
storedKeys = sorted(r.keys(streamType + ":*"))
chunked = list(chunks(storedKeys, ITEMS_PER_PAGE))
try:
pageIds = chunked[int(page)]
except IndexError:
return web.notfound("Sorry, the page you were looking for was not found.")
page = [json.loads(r.get(id)) for id in pageIds]
return render.layout(render.catalog(
page, render.resource, render.dict, render.list
), GOOGLE_MAPS_API_KEY)
class resource:
def GET(self, id=None):
# r = redis.Redis(connection_pool=POOL)
r = redis.Redis(
host=redisUrl.hostname, port=redisUrl.port,
db=REDIS_DB, password=redisUrl.password
)
keys = r.keys("*:" + id)
if len(keys) == 0:
return web.notfound("The resource " + id + " was not found.")
data = r.get(keys[0])
resource = json.loads(data)
return render.layout(render.resource(
resource, render.dict, render.list
), GOOGLE_MAPS_API_KEY)
class list:
def GET(self):
query = web.input()
r = redis.Redis(
host=redisUrl.hostname, port=redisUrl.port,
db=REDIS_DB, password=redisUrl.password
)
dataOut = {}
for key in [k for k in sorted(r.keys("*")) if not k.startswith("meta")]:
data = json.loads(r.get(key))
resource = Resource(data["catalogEntry"])
domain = resource.getDomain()
if domain not in dataOut:
dataOut[domain] = [];
dataOut[domain].append(resource)
if "md" in query:
return render.layout(render.resourceMarkdown(
dataOut, render.dict, render.list
), GOOGLE_MAPS_API_KEY)
else:
return render.layout(render.resourceList(
dataOut, render.dict, render.list
), GOOGLE_MAPS_API_KEY)
##############
# Start here #
##############
if __name__ == "__main__":
# TODO: put connection pool back (there were issues on Heroku with it).
# POOL = createConnectionPool()
app.run()
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/splom/_ids.py | 1 | import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="splom", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
Dino0631/RedRain-Bot | refs/heads/develop | cogs/lib/youtube_dl/extractor/oktoberfesttv.py | 64 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class OktoberfestTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt',
'info_dict': {
'id': 'hb-zelt',
'ext': 'mp4',
'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'thumbnail': r're:^https?://.*\.jpg$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._live_title(self._html_search_regex(
r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title'))
clip = self._search_regex(
r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip')
ncurl = self._search_regex(
r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base')
video_url = ncurl + clip
thumbnail = self._search_regex(
r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage,
'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'ext': 'mp4',
'is_live': True,
'thumbnail': thumbnail,
}
|
edevil/django | refs/heads/master | django/utils/checksums.py | 105 | """
Common checksum routines.
"""
__all__ = ['luhn']
import warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
warnings.warn(
"django.utils.checksums will be removed in Django 2.0. The "
"luhn() function is now included in django-localflavor 1.1+.",
RemovedInDjango20Warning
)
LUHN_ODD_LOOKUP = (0, 2, 4, 6, 8, 1, 3, 5, 7, 9) # sum_of_digits(index * 2)
def luhn(candidate):
"""
Checks a candidate number for validity according to the Luhn
algorithm (used in validation of, for example, credit cards).
Both numeric and string candidates are accepted.
"""
if not isinstance(candidate, six.string_types):
candidate = str(candidate)
try:
evens = sum(int(c) for c in candidate[-1::-2])
odds = sum(LUHN_ODD_LOOKUP[int(c)] for c in candidate[-2::-2])
return ((evens + odds) % 10 == 0)
except ValueError: # Raised if an int conversion fails
return False
|
xin3liang/platform_external_chromium_org | refs/heads/master | third_party/closure_linter/closure_linter/errorrules_test.py | 126 | #!/usr/bin/env python
# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Medium tests for the gjslint errorrules.
Currently its just verifying that warnings can't be disabled.
"""
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import erroraccumulator
flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class ErrorRulesTest(googletest.TestCase):
"""Test case to for gjslint errorrules."""
def testNoMaxLineLengthFlagExists(self):
"""Tests that --max_line_length flag does not exists."""
self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict())
def testGetMaxLineLength(self):
"""Tests warning are reported for line greater than 80.
"""
# One line > 100 and one line > 80 and < 100. So should produce two
# line too long error.
original = [
'goog.require(\'dummy.aa\');',
'',
'function a() {',
' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
' + 14 + 15 + 16 + 17 + 18 + 19 + 20;',
' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
' + 14 + 15 + 16 + 17 + 18;',
'}',
''
]
# Expect line too long.
expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG]
self._AssertErrors(original, expected)
def testNoDisableFlagExists(self):
"""Tests that --disable flag does not exists."""
self.assertTrue('disable' not in flags.FLAGS.FlagDict())
def testWarningsNotDisabled(self):
"""Tests warnings are reported when nothing is disabled.
"""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'',
'function a() {',
' dummy.aa.i = 1;',
' dummy.Cc.i = 1;',
' dummy.Dd.i = 1;',
'}',
]
expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
errors.FILE_MISSING_NEWLINE]
self._AssertErrors(original, expected)
def _AssertErrors(self, original, expected_errors, include_header=True):
"""Asserts that the error fixer corrects original to expected."""
if include_header:
original = self._GetHeader() + original
# Trap gjslint's output parse it to get messages added.
error_accumulator = erroraccumulator.ErrorAccumulator()
runner.Run('testing.js', error_accumulator, source=original)
error_nums = [e.code for e in error_accumulator.GetErrors()]
error_nums.sort()
expected_errors.sort()
self.assertListEqual(error_nums, expected_errors)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
'// Copyright 2011 Google Inc. All Rights Reserved.',
'',
'/**',
' * @fileoverview Fake file overview.',
' * @author [email protected] (Fake Person)',
' */',
''
]
if __name__ == '__main__':
googletest.main()
|
proversity-org/edx-platform | refs/heads/master | lms/djangoapps/lti_provider/tests/test_outcomes.py | 5 | """
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
from django.test import TestCase
from lxml import etree
from mock import ANY, MagicMock, patch
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
import lti_provider.outcomes as outcomes
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
class StoreOutcomeParametersTest(TestCase):
"""
Tests for the store_outcome_parameters method in outcomes.py
"""
def setUp(self):
super(StoreOutcomeParametersTest, self).setUp()
self.user = UserFactory.create()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.consumer = LtiConsumer(
consumer_name='consumer',
consumer_key='consumer_key',
consumer_secret='secret'
)
self.consumer.save()
def get_valid_request_params(self):
"""
Returns a dictionary containing a complete set of required LTI
parameters.
"""
return {
'lis_result_sourcedid': 'sourcedid',
'lis_outcome_service_url': 'http://example.com/service_url',
'oauth_consumer_key': 'consumer_key',
'tool_consumer_instance_guid': 'tool_instance_guid',
'usage_key': self.usage_key,
'course_key': self.course_key,
}
def test_graded_assignment_created(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
assignment = GradedAssignment.objects.get(
lis_result_sourcedid=params['lis_result_sourcedid']
)
self.assertEqual(assignment.course_key, self.course_key)
self.assertEqual(assignment.usage_key, self.usage_key)
self.assertEqual(assignment.user, self.user)
def test_outcome_service_created(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
outcome = OutcomeService.objects.get(
lti_consumer=self.consumer
)
self.assertEqual(outcome.lti_consumer, self.consumer)
def test_graded_assignment_references_outcome_service(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
outcome = OutcomeService.objects.get(
lti_consumer=self.consumer
)
assignment = GradedAssignment.objects.get(
lis_result_sourcedid=params['lis_result_sourcedid']
)
self.assertEqual(assignment.outcome_service, outcome)
def test_no_duplicate_graded_assignments(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
with self.assertNumQueries(2):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
assignments = GradedAssignment.objects.filter(
lis_result_sourcedid=params['lis_result_sourcedid']
)
self.assertEqual(len(assignments), 1)
def test_no_duplicate_outcome_services(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
with self.assertNumQueries(2):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
outcome_services = OutcomeService.objects.filter(
lti_consumer=self.consumer
)
self.assertEqual(len(outcome_services), 1)
def test_no_db_update_for_ungraded_assignment(self):
params = self.get_valid_request_params()
del params['lis_result_sourcedid']
with self.assertNumQueries(0):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
def test_no_db_update_for_bad_request(self):
params = self.get_valid_request_params()
del params['lis_outcome_service_url']
with self.assertNumQueries(0):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
def test_db_record_created_without_consumer_id(self):
params = self.get_valid_request_params()
del params['tool_consumer_instance_guid']
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
self.assertEqual(GradedAssignment.objects.count(), 1)
self.assertEqual(OutcomeService.objects.count(), 1)
class SignAndSendReplaceResultTest(TestCase):
"""
Tests for the sign_and_send_replace_result method in outcomes.py
"""
def setUp(self):
super(SignAndSendReplaceResultTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
consumer = LtiConsumer(
consumer_name='consumer',
consumer_key='consumer_key',
consumer_secret='secret'
)
consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=consumer,
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
)
self.assignment.save()
@patch('requests.post', return_value='response')
def test_sign_and_send_replace_result(self, post_mock):
response = outcomes.sign_and_send_replace_result(self.assignment, 'xml')
post_mock.assert_called_with(
'http://example.com/service_url',
data='xml',
auth=ANY,
headers={'content-type': 'application/xml'}
)
self.assertEqual(response, 'response')
class XmlHandlingTest(TestCase):
"""
Tests for the generate_replace_result_xml and check_replace_result_response
methods in outcomes.py
"""
response_xml = """
<imsx_POXEnvelopeResponse xmlns = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXResponseHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>4560</imsx_messageIdentifier>
<imsx_statusInfo>
{major_code}
<imsx_severity>status</imsx_severity>
<imsx_description>Score for result_id is now 0.25</imsx_description>
<imsx_messageRefIdentifier>999999123</imsx_messageRefIdentifier>
<imsx_operationRefIdentifier>replaceResult</imsx_operationRefIdentifier>
</imsx_statusInfo>
</imsx_POXResponseHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultResponse/>
</imsx_POXBody>
</imsx_POXEnvelopeResponse>
"""
result_id = 'result_id'
score = 0.25
@patch('uuid.uuid4', return_value='random_uuid')
def test_replace_result_message_uuid(self, _uuid_mock):
# Pylint doesn't recognize members in the LXML module
xml = outcomes.generate_replace_result_xml(self.result_id, self.score)
tree = etree.fromstring(xml)
message_id = tree.xpath(
'//ns:imsx_messageIdentifier',
namespaces={'ns': 'http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0'}
)
self.assertEqual(len(message_id), 1)
self.assertEqual(message_id[0].text, 'random_uuid')
def test_replace_result_sourced_id(self):
xml = outcomes.generate_replace_result_xml(self.result_id, self.score)
tree = etree.fromstring(xml)
sourced_id = tree.xpath(
'/ns:imsx_POXEnvelopeRequest/ns:imsx_POXBody/ns:replaceResultRequest/'
'ns:resultRecord/ns:sourcedGUID/ns:sourcedId',
namespaces={'ns': 'http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0'}
)
self.assertEqual(len(sourced_id), 1)
self.assertEqual(sourced_id[0].text, 'result_id')
def test_replace_result_score(self):
xml = outcomes.generate_replace_result_xml(self.result_id, self.score)
tree = etree.fromstring(xml)
xml_score = tree.xpath(
'/ns:imsx_POXEnvelopeRequest/ns:imsx_POXBody/ns:replaceResultRequest/'
'ns:resultRecord/ns:result/ns:resultScore/ns:textString',
namespaces={'ns': 'http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0'}
)
self.assertEqual(len(xml_score), 1)
self.assertEqual(xml_score[0].text, '0.25')
def create_response_object(
self, status, xml,
major_code='<imsx_codeMajor>success</imsx_codeMajor>'
):
"""
Returns an XML document containing a successful replace_result response.
"""
response = MagicMock()
response.status_code = status
response.content = xml.format(major_code=major_code).encode('ascii', 'ignore')
return response
def test_response_with_correct_xml(self):
xml = self.response_xml
response = self.create_response_object(200, xml)
self.assertTrue(outcomes.check_replace_result_response(response))
def test_response_with_bad_status_code(self):
response = self.create_response_object(500, '')
self.assertFalse(outcomes.check_replace_result_response(response))
def test_response_with_invalid_xml(self):
xml = '<badly>formatted</xml>'
response = self.create_response_object(200, xml)
self.assertFalse(outcomes.check_replace_result_response(response))
def test_response_with_multiple_status_fields(self):
response = self.create_response_object(
200, self.response_xml,
major_code='<imsx_codeMajor>success</imsx_codeMajor>'
'<imsx_codeMajor>failure</imsx_codeMajor>'
)
self.assertFalse(outcomes.check_replace_result_response(response))
def test_response_with_no_status_field(self):
response = self.create_response_object(
200, self.response_xml,
major_code=''
)
self.assertFalse(outcomes.check_replace_result_response(response))
def test_response_with_failing_status_field(self):
response = self.create_response_object(
200, self.response_xml,
major_code='<imsx_codeMajor>failure</imsx_codeMajor>'
)
self.assertFalse(outcomes.check_replace_result_response(response))
class TestAssignmentsForProblem(ModuleStoreTestCase):
"""
Test cases for the assignments_for_problem method in outcomes.py
"""
def setUp(self):
super(TestAssignmentsForProblem, self).setUp()
self.user = UserFactory.create()
self.user_id = self.user.id
self.outcome_service = self.create_outcome_service('outcomes')
self.course = CourseFactory.create()
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.chapter = ItemFactory.create(parent=self.course, category="chapter")
self.vertical = ItemFactory.create(parent=self.chapter, category="vertical")
self.unit = ItemFactory.create(parent=self.vertical, category="unit")
def create_outcome_service(self, id_suffix):
"""
Create and save a new OutcomeService model in the test database. The
OutcomeService model requires an LtiConsumer model, so we create one of
those as well. The method takes an ID string that is used to ensure that
unique fields do not conflict.
"""
lti_consumer = LtiConsumer(
consumer_name='lti_consumer_name' + id_suffix,
consumer_key='lti_consumer_key' + id_suffix,
consumer_secret='lti_consumer_secret' + id_suffix,
instance_guid='lti_instance_guid' + id_suffix
)
lti_consumer.save()
outcome_service = OutcomeService(
lis_outcome_service_url='https://example.com/outcomes/' + id_suffix,
lti_consumer=lti_consumer
)
outcome_service.save()
return outcome_service
def create_graded_assignment(self, desc, result_id, outcome_service):
"""
Create and save a new GradedAssignment model in the test database.
"""
assignment = GradedAssignment(
user=self.user,
course_key=self.course.id,
usage_key=desc.location,
outcome_service=outcome_service,
lis_result_sourcedid=result_id,
version_number=0
)
assignment.save()
return assignment
def test_create_two_lti_consumers_with_empty_instance_guid(self):
"""
Test ability to create two or more LTI consumers through the Django admin
with empty instance_guid field.
A blank guid field is required when a customer enables a new secret/key combination for
LTI integration with their LMS.
"""
lti_consumer_first = LtiConsumer(
consumer_name='lti_consumer_name_second',
consumer_key='lti_consumer_key_second',
consumer_secret='lti_consumer_secret_second',
instance_guid=''
)
lti_consumer_first.save()
lti_consumer_second = LtiConsumer(
consumer_name='lti_consumer_name_third',
consumer_key='lti_consumer_key_third',
consumer_secret='lti_consumer_secret_third',
instance_guid=''
)
lti_consumer_second.save()
count = LtiConsumer.objects.count()
self.assertEqual(count, 3)
def test_with_no_graded_assignments(self):
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 0)
def test_with_graded_unit(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 1)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
def test_with_graded_vertical(self):
self.create_graded_assignment(self.vertical, 'graded_vertical', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 1)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_vertical')
def test_with_graded_unit_and_vertical(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.vertical, 'graded_vertical', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_vertical')
def test_with_unit_used_twice(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.unit, 'graded_unit2', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_unit2')
def test_with_unit_graded_for_different_user(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
other_user = UserFactory.create()
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, other_user.id, self.course.id
)
self.assertEqual(len(assignments), 0)
def test_with_unit_graded_for_multiple_consumers(self):
other_outcome_service = self.create_outcome_service('second_consumer')
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.unit, 'graded_unit2', other_outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_unit2')
self.assertEqual(assignments[0].outcome_service, self.outcome_service)
self.assertEqual(assignments[1].outcome_service, other_outcome_service)
|
botswana-harvard/tshilo-dikotla | refs/heads/develop | td_maternal/admin/maternal_medical_history_admin.py | 1 | from django.contrib import admin
from collections import OrderedDict
from edc_export.actions import export_as_csv_action
from ..forms import MaternalMedicalHistoryForm
from ..models import MaternalMedicalHistory
from .base_maternal_model_admin import BaseMaternalModelAdmin
class MaternalMedicalHistoryAdmin(BaseMaternalModelAdmin):
form = MaternalMedicalHistoryForm
# fields = ('maternal_visit',
# 'chronic_since',
# # 'chronic',
# 'chronic_other',
# 'who_diagnosis',
# # 'who',
# )
list_display = ('maternal_visit', 'chronic_since', 'sero_posetive', 'date_hiv_diagnosis', 'perinataly_infected',
'lowest_cd4_known', 'cd4_count', 'cd4_date')
list_filter = (
'chronic_since', 'sero_posetive', 'date_hiv_diagnosis', 'perinataly_infected')
radio_fields = {'chronic_since': admin.VERTICAL,
'who_diagnosis': admin.VERTICAL,
'sero_posetive': admin.VERTICAL,
'perinataly_infected': admin.VERTICAL,
'know_hiv_status': admin.VERTICAL,
'lowest_cd4_known': admin.VERTICAL,
'is_date_estimated': admin.VERTICAL}
filter_horizontal = (
'who', 'mother_chronic', 'father_chronic', 'mother_medications')
actions = [
export_as_csv_action(
description="Export to CSV file",
fields=['chronic_since', 'who_diagnosis', 'who__short_name',
'mother_chronic__name', 'mother_chronic_other', 'father_chronic__name',
'father_chronic_other', 'mother_medications__name', 'mother_medications_other',
'sero_posetive', 'date_hiv_diagnosis', 'perinataly_infected',
'know_hiv_status', 'lowest_cd4_known', 'cd4_count', 'cd4_date',
'is_date_estimated', ],
delimiter=',',
exclude=['maternal_visit', 'user_created', 'user_modified', 'hostname_created',
'hostname_modified'],
extra_fields=OrderedDict(
{'subject_identifier': 'maternal_visit__appointment__registered_subject__subject_identifier',
'gender': 'maternal_visit__appointment__registered_subject__gender',
'dob': 'maternal_visit__appointment__registered_subject__dob',
'screened': 'maternal_visit__appointment__registered_subject__screening_datetime',
'registered': 'maternal_visit__appointment__registered_subject__registration_datetime',
'visit_code': 'maternal_visit__appointment__visit_definition__code',
'visit_reason': 'maternal_visit__reason',
'visit_study_status': 'maternal_visit__study_status'}),
)]
admin.site.register(MaternalMedicalHistory, MaternalMedicalHistoryAdmin)
|
ajinabraham/Mobile-Security-Framework-MobSF | refs/heads/master | MobSF/settings.py | 1 | """
Django settings for MobSF project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import imp
import os
import logging
import colorlog
from MobSF import utils
from install.windows.setup import windows_config_local
logger = logging.getLogger(__name__)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# MOBSF FRAMEWORK CONFIGURATIONS
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#==============================================
MOBSF_VER = "v1.0.5 Beta"
BANNER = """
__ __ _ ____ _____ _ ___
| \/ | ___ | |__/ ___|| ___| __ _/ | / _ \
| |\/| |/ _ \| '_ \___ \| |_ \ \ / / || | | |
| | | | (_) | |_) |__) | _| \ V /| || |_| |
|_| |_|\___/|_.__/____/|_| \_/ |_(_)___/
"""
# ASCII Standard
#==============================================
#==========MobSF Home Directory=================
USE_HOME = False
# True : All Uploads/Downloads will be stored in user's home directory
# False : All Uploads/Downloads will be stored in MobSF root directory
# If you need multiple users to share the scan results set this to False
#===============================================
MobSF_HOME = utils.getMobSFHome(USE_HOME)
# Logs Directory
LOG_DIR = os.path.join(MobSF_HOME, 'logs/')
# Download Directory
DWD_DIR = os.path.join(MobSF_HOME, 'downloads/')
# Screenshot Directory
SCREEN_DIR = os.path.join(MobSF_HOME, 'downloads/screen/')
# Upload Directory
UPLD_DIR = os.path.join(MobSF_HOME, 'uploads/')
# Database Directory
DB_DIR = os.path.join(MobSF_HOME, 'db.sqlite3')
# Tools Directory
TOOLS_DIR = os.path.join(BASE_DIR, 'DynamicAnalyzer/tools/')
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
# Sqlite3 suport
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DB_DIR,
}
}
# End Sqlite3 support
# Postgres DB - Install psycopg2
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'mobsf',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
# End Postgres support
'''
#===============================================
#==========LOAD CONFIG FROM MobSF HOME==========
try:
# Update Config from MobSF Home Directory
if USE_HOME:
USER_CONFIG = os.path.join(MobSF_HOME, 'config.py')
sett = imp.load_source('user_settings', USER_CONFIG)
locals().update(
{k: v for k, v in list(sett.__dict__.items()) if not k.startswith("__")})
CONFIG_HOME = True
else:
CONFIG_HOME = False
except:
utils.PrintException("Parsing Config")
CONFIG_HOME = False
#===============================================
#=============ALLOWED EXTENSIONS================
ALLOWED_EXTENSIONS = {
".txt": "text/plain",
".png": "image/png",
".zip": "application/zip",
".tar": "application/x-tar"
}
#===============================================
#=============ALLOWED MIMETYPES=================
APK_MIME = [
'application/octet-stream',
'application/vnd.android.package-archive',
'application/x-zip-compressed',
'binary/octet-stream',
]
IPA_MIME = [
'application/iphone',
'application/octet-stream',
'application/x-itunes-ipa',
'application/x-zip-compressed',
'binary/octet-stream',
]
ZIP_MIME = [
'application/zip',
'application/octet-stream',
'application/x-zip-compressed',
'binary/octet-stream',
]
APPX_MIME = [
'application/octet-stream',
'application/vns.ms-appx',
'application/x-zip-compressed'
]
#===============================================
#=====MOBSF SECRET GENERATION AND MIGRATION=====
# Based on https://gist.github.com/ndarville/3452907#file-secret-key-gen-py
try:
SECRET_KEY
except NameError:
SECRET_FILE = os.path.join(MobSF_HOME, "secret")
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
SECRET_KEY = utils.genRandom()
secret = open(SECRET_FILE, 'w')
secret.write(SECRET_KEY)
secret.close()
except IOError:
Exception('Please create a %s file with random characters \
to generate your secret key!' % SECRET_FILE)
# Run Once
utils.make_migrations(BASE_DIR)
utils.migrate(BASE_DIR)
utils.kali_fix(BASE_DIR)
# Windows Setup
windows_config_local(MobSF_HOME)
#=============================================
#============DJANGO SETTINGS =================
# SECURITY WARNING: don't run with debug turned on in production!
# ^ This is fine Do not turn it off until MobSF moves from Beta to Stable
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'testserver', '*']
# Application definition
INSTALLED_APPS = (
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'StaticAnalyzer',
'DynamicAnalyzer',
'MobSF',
'MalwareAnalyzer',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
MIDDLEWARE = (
'MobSF.views.api.rest_api_middleware.RestApiAuthMiddleware',
)
ROOT_URLCONF = 'MobSF.urls'
WSGI_APPLICATION = 'MobSF.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = '/uploads/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS':
[
os.path.join(BASE_DIR, 'templates')
],
'OPTIONS':
{
'debug': True,
}
},
]
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
# 256MB
DATA_UPLOAD_MAX_MEMORY_SIZE = 268435456
#===================
# USER CONFIGURATION
#===================
if CONFIG_HOME:
logger.info("Loading User config from: {}".format(USER_CONFIG))
else:
'''
IMPORTANT
If 'USE_HOME' is set to True, then below user configuration settings are not considered.
The user configuration will be loaded from config.py in MobSF Home directory.
'''
#^CONFIG-START^: Do not edit this line
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# MOBSF USER CONFIGURATIONS
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#-------------------------
# STATIC ANALYZER SETTINGS
#-------------------------
#==========ANDROID SKIP CLASSES==========================
# Common third party classes that will be skipped during static analysis
SKIP_CLASSES = [
r'com[\\\/]{1}google[\\\/]{1}',
r'com[\\\/]{1}android[\\\/]{1}',
r'android[\\\/]{1}content[\\\/]{1}',
r'android[\\\/]{1}support[\\\/]{1}',
r'android[\\\/]{1}arch[\\\/]{1}',
r'kotlin[\\\/]{1}',
r'okhttp2[\\\/]{1}', r'okhttp3[\\\/]{1}',
r'com[\\\/]{1}squareup[\\\/]{1}okhttp[\\\/]{1}',
r'com[\\\/]{1}twitter[\\\/]{1}',
r'twitter4j[\\\/]{1}',
r'org[\\\/]{1}apache[\\\/]{1}',
r'oauth[\\\/]{1}signpost[\\\/]{1}',
r'org[\\\/]{1}chromium[\\\/]{1}',
r'com[\\\/]{1}facebook[\\\/]{1}'
]
#==========DECOMPILER SETTINGS=================
DECOMPILER = "cfr"
# Three Decompilers are available
# 1. jd-core
# 2. cfr
# 3. procyon
#==============================================
#==========Dex to Jar Converter================
JAR_CONVERTER = "d2j"
# Two Dex to Jar converters are available
# 1. d2j
# 2. enjarify
#==============================================
#======WINDOWS STATIC ANALYSIS SETTINGS ===========
# Private key
WINDOWS_VM_SECRET = 'MobSF/windows_vm_priv_key.asc'
# IP and Port of the MobSF Windows VM
# eg: WINDOWS_VM_IP = '127.0.0.1'
WINDOWS_VM_IP = None
WINDOWS_VM_PORT = '8000'
#==================================================
#==============3rd Party Tools=================
'''
If you want to use a different version of 3rd party tools used by MobSF.
You can do that by specifying the path here. If specified, MobSF will run
the tool from this location.
'''
# Android 3P Tools
DEX2JAR_BINARY = ""
BACKSMALI_BINARY = ""
CFR_DECOMPILER_BINARY = ""
JD_CORE_DECOMPILER_BINARY = ""
PROCYON_DECOMPILER_BINARY = ""
APKTOOL_BINARY = ""
ADB_BINARY = ""
ENJARIFY_DIRECTORY = ""
# iOS 3P Tools
OTOOL_BINARY = ""
JTOOL_BINARY = ""
CLASSDUMPZ_BINARY = ""
# COMMON
JAVA_DIRECTORY = ""
VBOXMANAGE_BINARY = ""
PYTHON3_PATH = ""
'''
Examples:
JAVA_DIRECTORY = "C:/Program Files/Java/jdk1.7.0_17/bin/"
JAVA_DIRECTORY = "/usr/bin/"
DEX2JAR_BINARY = "/Users/ajin/dex2jar/d2j-dex2jar.sh"
ENJARIFY_DIRECTORY = "D:/enjarify/"
VBOXMANAGE_BINARY = "/usr/bin/VBoxManage"
CFR_DECOMPILER_BINARY = "/home/ajin/tools/cfr.jar"
PYTHON3_PATH = "C:/Users/Ajin/AppData/Local/Programs/Python/Python35-32/"
'''
#===============================================
#-------------------------
# DYNAMIC ANALYZER SETTINGS
#-------------------------
#========ANDROID DYNAMIC ANALYSIS SETTINGS================================
ANDROID_DYNAMIC_ANALYZER = "MobSF_VM"
# You can choose any of the below
# 1. MobSF_VM
# 2. MobSF_AVD
# 3. MobSF_REAL_DEVICE
'''
MobSF_VM - x86 Android 4.4.2 running on VirtualBox (Fast, not all Apps work)
MobSF_AVD - ARM Android 6 running on Android Emulator (Slow, Most Apps work)
MobSF_REAL_DEVICE - Rooted Android 4.03 - 4.4 Device (Very Fast, All Apps work)
Supports Android 5+ for real device. Not tested!
'''
#=========================================================================
#=======ANDROID REAL DEVICE SETTINGS===========
DEVICE_IP = '192.168.1.18'
DEVICE_ADB_PORT = 5555
DEVICE_TIMEOUT = 300
#==============================================
#===========ANDROID EMULATOR SETTINGS ===========
# Android-Studio 'emulator' binary path
AVD_EMULATOR = "/Users/[USERNAME]/Library/Android/sdk/tools/emulator"
AVD_NAME = "MobSFAPI23armV1"
AVD_ADB_PORT = 5554
AVD_SNAPSHOT = ""
AVD_COLD_BOOT = True
#================================================
#====ANDROID MOBSF VIRTUALBOX VM SETTINGS =====
# VM UUID
UUID = '408e1874-759f-4417-9453-53ef21dc2ade'
# Snapshot UUID
SUUID = '5c9deb28-def6-49c0-9233-b5e03edd85c6'
# IP of the MobSF VM
VM_IP = '192.168.56.101'
VM_ADB_PORT = 5555
VM_TIMEOUT = 100
VBOX_HEADLESS = False
#==============================================
#--------------------------
# MobSF MITM PROXY SETTINGS
#--------------------------
#================HOST/PROXY SETTINGS ===============
PROXY_IP = '192.168.56.1' # Host/Server/Proxy IP
PORT = 1337 # Proxy Port
ROOT_CA = '0026aabb.0'
SCREEN_IP = PROXY_IP # ScreenCast IP
SCREEN_PORT = 9339 # ScreenCast Port(Do not Change)
#===================================================
#========UPSTREAM PROXY SETTINGS ==============
# If you are behind a Proxy
UPSTREAM_PROXY_ENABLED = False
UPSTREAM_PROXY_SSL_VERIFY = True
UPSTREAM_PROXY_TYPE = "http"
UPSTREAM_PROXY_IP = "127.0.0.1"
UPSTREAM_PROXY_PORT = 3128
UPSTREAM_PROXY_USERNAME = ""
UPSTREAM_PROXY_PASSWORD = ""
#==============================================
#--------------------------
# MALWARE ANALYZER SETTINGS
#--------------------------
DOMAIN_MALWARE_SCAN = True
#========DISABLED COMPONENTS===================
#----------VirusTotal--------------------------
VT_ENABLED = False
VT_API_KEY = 'XXXXXXXXXXXXXX'
VT_UPLOAD = False
# Before setting VT_ENABLED to True,
# Make sure VT_API_KEY is set to your VirusTotal API key
# register at: https://www.virustotal.com/#/join-us
# You can get your API KEY from https://www.virustotal.com/en/user/<username>/apikey/
# VT has a premium features but the free account is just enough for personal use
# BE AWARE - if you enable VT, in case the file wasn't already uploaded to VirusTotal,
# It will be uploaded if you set VT_UPLOAD to True!
#==============================================
#----------APKiD-------------------------------
APKID_ENABLED = False
# Before setting APKID_ENABLED to True,
# Install rednaga fork of Yara Python
# git clone --recursive https://github.com/rednaga/yara-python-1 yara-python
# cd yara-python
# python3 setup.py build --enable-dex install
# pip install apkid
#==============================================
#^CONFIG-END^: Do not edit this line
# The below code should be loaded last.
#============JAVA SETTINGS======================
JAVA_PATH = utils.FindJava(False)
#===============================================
#================VirtualBox Settings============
VBOX = utils.FindVbox(False)
#===============================================
DJANGO_LOG_LEVEL = DEBUG
# Better logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': "[%(levelname)s] %(asctime)-15s - %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'color': {
'()': 'colorlog.ColoredFormatter',
'format': '%(log_color)s[%(levelname)s] %(asctime)-15s - %(message)s',
'datefmt': "%d/%b/%Y %H:%M:%S",
'log_colors': {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
},
},
},
'handlers': {
'logfile': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(MobSF_HOME, 'logs', 'debug.log'),
'formatter': 'standard',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color',
},
},
'loggers': {
'django': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
'propagate': True,
},
'django.db.backends': {
'handlers': ['console', 'logfile'],
# DEBUG will log all queries, so change it to WARNING.
'level': 'INFO',
'propagate': False, # Don't propagate to other handlers
},
'MobSF': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
'propagate': False,
},
'StaticAnalyzer': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
'propagate': False,
},
'MalwareAnalyzer': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
'propagate': False,
},
'DynamicAnalyzer': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
'propagate': False,
},
}
}
|
jnovinger/django | refs/heads/master | tests/utils_tests/test_glob.py | 331 | from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.glob import glob_escape
class TestUtilsGlob(SimpleTestCase):
def test_glob_escape(self):
filename = '/my/file?/name[with special chars*'
expected = '/my/file[?]/name[[]with special chars[*]'
filename_b = b'/my/file?/name[with special chars*'
expected_b = b'/my/file[?]/name[[]with special chars[*]'
self.assertEqual(glob_escape(filename), expected)
self.assertEqual(glob_escape(filename_b), expected_b)
|
googleapis/googleapis-gen | refs/heads/master | google/cloud/bigquery/migration/v2alpha/bigquery-migration-v2alpha-py/tests/unit/gapic/migration_v2alpha/test_migration_service.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.bigquery.migration_v2alpha.services.migration_service import MigrationServiceAsyncClient
from google.cloud.bigquery.migration_v2alpha.services.migration_service import MigrationServiceClient
from google.cloud.bigquery.migration_v2alpha.services.migration_service import pagers
from google.cloud.bigquery.migration_v2alpha.services.migration_service import transports
from google.cloud.bigquery.migration_v2alpha.services.migration_service.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.bigquery.migration_v2alpha.types import migration_entities
from google.cloud.bigquery.migration_v2alpha.types import migration_error_details
from google.cloud.bigquery.migration_v2alpha.types import migration_metrics
from google.cloud.bigquery.migration_v2alpha.types import migration_service
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import error_details_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MigrationServiceClient._get_default_mtls_endpoint(None) is None
assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
MigrationServiceClient,
MigrationServiceAsyncClient,
])
def test_migration_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'bigquerymigration.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
MigrationServiceClient,
MigrationServiceAsyncClient,
])
def test_migration_service_client_service_account_always_use_jwt(client_class):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
client = client_class(credentials=creds)
use_jwt.assert_not_called()
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.MigrationServiceGrpcTransport, "grpc"),
(transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_migration_service_client_service_account_always_use_jwt_true(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
@pytest.mark.parametrize("client_class", [
MigrationServiceClient,
MigrationServiceAsyncClient,
])
def test_migration_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'bigquerymigration.googleapis.com:443'
def test_migration_service_client_get_transport_class():
transport = MigrationServiceClient.get_transport_class()
available_transports = [
transports.MigrationServiceGrpcTransport,
]
assert transport in available_transports
transport = MigrationServiceClient.get_transport_class("grpc")
assert transport == transports.MigrationServiceGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"),
(MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient))
@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient))
def test_migration_service_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"),
(MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"),
(MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient))
@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"),
(MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"),
(MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_migration_service_client_client_options_from_dict():
with mock.patch('google.cloud.bigquery.migration_v2alpha.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MigrationServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_migration_workflow(transport: str = 'grpc', request_type=migration_service.CreateMigrationWorkflowRequest):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationWorkflow(
name='name_value',
display_name='display_name_value',
state=migration_entities.MigrationWorkflow.State.DRAFT,
)
response = client.create_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.CreateMigrationWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, migration_entities.MigrationWorkflow)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == migration_entities.MigrationWorkflow.State.DRAFT
def test_create_migration_workflow_from_dict():
test_create_migration_workflow(request_type=dict)
def test_create_migration_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_migration_workflow),
'__call__') as call:
client.create_migration_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.CreateMigrationWorkflowRequest()
@pytest.mark.asyncio
async def test_create_migration_workflow_async(transport: str = 'grpc_asyncio', request_type=migration_service.CreateMigrationWorkflowRequest):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationWorkflow(
name='name_value',
display_name='display_name_value',
state=migration_entities.MigrationWorkflow.State.DRAFT,
))
response = await client.create_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.CreateMigrationWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, migration_entities.MigrationWorkflow)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == migration_entities.MigrationWorkflow.State.DRAFT
@pytest.mark.asyncio
async def test_create_migration_workflow_async_from_dict():
await test_create_migration_workflow_async(request_type=dict)
def test_create_migration_workflow_field_headers():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.CreateMigrationWorkflowRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_migration_workflow),
'__call__') as call:
call.return_value = migration_entities.MigrationWorkflow()
client.create_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_migration_workflow_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.CreateMigrationWorkflowRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_migration_workflow),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationWorkflow())
await client.create_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_migration_workflow_flattened():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationWorkflow()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_migration_workflow(
parent='parent_value',
migration_workflow=migration_entities.MigrationWorkflow(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].migration_workflow == migration_entities.MigrationWorkflow(name='name_value')
def test_create_migration_workflow_flattened_error():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_migration_workflow(
migration_service.CreateMigrationWorkflowRequest(),
parent='parent_value',
migration_workflow=migration_entities.MigrationWorkflow(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_migration_workflow_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationWorkflow()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationWorkflow())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_migration_workflow(
parent='parent_value',
migration_workflow=migration_entities.MigrationWorkflow(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].migration_workflow == migration_entities.MigrationWorkflow(name='name_value')
@pytest.mark.asyncio
async def test_create_migration_workflow_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_migration_workflow(
migration_service.CreateMigrationWorkflowRequest(),
parent='parent_value',
migration_workflow=migration_entities.MigrationWorkflow(name='name_value'),
)
def test_get_migration_workflow(transport: str = 'grpc', request_type=migration_service.GetMigrationWorkflowRequest):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationWorkflow(
name='name_value',
display_name='display_name_value',
state=migration_entities.MigrationWorkflow.State.DRAFT,
)
response = client.get_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.GetMigrationWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, migration_entities.MigrationWorkflow)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == migration_entities.MigrationWorkflow.State.DRAFT
def test_get_migration_workflow_from_dict():
test_get_migration_workflow(request_type=dict)
def test_get_migration_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_workflow),
'__call__') as call:
client.get_migration_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.GetMigrationWorkflowRequest()
@pytest.mark.asyncio
async def test_get_migration_workflow_async(transport: str = 'grpc_asyncio', request_type=migration_service.GetMigrationWorkflowRequest):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationWorkflow(
name='name_value',
display_name='display_name_value',
state=migration_entities.MigrationWorkflow.State.DRAFT,
))
response = await client.get_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.GetMigrationWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, migration_entities.MigrationWorkflow)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == migration_entities.MigrationWorkflow.State.DRAFT
@pytest.mark.asyncio
async def test_get_migration_workflow_async_from_dict():
await test_get_migration_workflow_async(request_type=dict)
def test_get_migration_workflow_field_headers():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.GetMigrationWorkflowRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_workflow),
'__call__') as call:
call.return_value = migration_entities.MigrationWorkflow()
client.get_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_migration_workflow_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.GetMigrationWorkflowRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_workflow),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationWorkflow())
await client.get_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_migration_workflow_flattened():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationWorkflow()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_migration_workflow(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_migration_workflow_flattened_error():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_migration_workflow(
migration_service.GetMigrationWorkflowRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_migration_workflow_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationWorkflow()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationWorkflow())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_migration_workflow(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_migration_workflow_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_migration_workflow(
migration_service.GetMigrationWorkflowRequest(),
name='name_value',
)
def test_list_migration_workflows(transport: str = 'grpc', request_type=migration_service.ListMigrationWorkflowsRequest):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.ListMigrationWorkflowsResponse(
next_page_token='next_page_token_value',
)
response = client.list_migration_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.ListMigrationWorkflowsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMigrationWorkflowsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_migration_workflows_from_dict():
test_list_migration_workflows(request_type=dict)
def test_list_migration_workflows_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
client.list_migration_workflows()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.ListMigrationWorkflowsRequest()
@pytest.mark.asyncio
async def test_list_migration_workflows_async(transport: str = 'grpc_asyncio', request_type=migration_service.ListMigrationWorkflowsRequest):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.ListMigrationWorkflowsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_migration_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.ListMigrationWorkflowsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMigrationWorkflowsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_migration_workflows_async_from_dict():
await test_list_migration_workflows_async(request_type=dict)
def test_list_migration_workflows_field_headers():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.ListMigrationWorkflowsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
call.return_value = migration_service.ListMigrationWorkflowsResponse()
client.list_migration_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_migration_workflows_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.ListMigrationWorkflowsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.ListMigrationWorkflowsResponse())
await client.list_migration_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_migration_workflows_flattened():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.ListMigrationWorkflowsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_migration_workflows(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_migration_workflows_flattened_error():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_migration_workflows(
migration_service.ListMigrationWorkflowsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_migration_workflows_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.ListMigrationWorkflowsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.ListMigrationWorkflowsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_migration_workflows(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_migration_workflows_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_migration_workflows(
migration_service.ListMigrationWorkflowsRequest(),
parent='parent_value',
)
def test_list_migration_workflows_pager():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
],
next_page_token='abc',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[],
next_page_token='def',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
],
next_page_token='ghi',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_migration_workflows(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, migration_entities.MigrationWorkflow)
for i in results)
def test_list_migration_workflows_pages():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
],
next_page_token='abc',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[],
next_page_token='def',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
],
next_page_token='ghi',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
],
),
RuntimeError,
)
pages = list(client.list_migration_workflows(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_migration_workflows_async_pager():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
],
next_page_token='abc',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[],
next_page_token='def',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
],
next_page_token='ghi',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
],
),
RuntimeError,
)
async_pager = await client.list_migration_workflows(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, migration_entities.MigrationWorkflow)
for i in responses)
@pytest.mark.asyncio
async def test_list_migration_workflows_async_pages():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_workflows),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
],
next_page_token='abc',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[],
next_page_token='def',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
],
next_page_token='ghi',
),
migration_service.ListMigrationWorkflowsResponse(
migration_workflows=[
migration_entities.MigrationWorkflow(),
migration_entities.MigrationWorkflow(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_migration_workflows(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_migration_workflow(transport: str = 'grpc', request_type=migration_service.DeleteMigrationWorkflowRequest):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.DeleteMigrationWorkflowRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_migration_workflow_from_dict():
test_delete_migration_workflow(request_type=dict)
def test_delete_migration_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_migration_workflow),
'__call__') as call:
client.delete_migration_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.DeleteMigrationWorkflowRequest()
@pytest.mark.asyncio
async def test_delete_migration_workflow_async(transport: str = 'grpc_asyncio', request_type=migration_service.DeleteMigrationWorkflowRequest):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.DeleteMigrationWorkflowRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_migration_workflow_async_from_dict():
await test_delete_migration_workflow_async(request_type=dict)
def test_delete_migration_workflow_field_headers():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.DeleteMigrationWorkflowRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_migration_workflow),
'__call__') as call:
call.return_value = None
client.delete_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_migration_workflow_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.DeleteMigrationWorkflowRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_migration_workflow),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_migration_workflow_flattened():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_migration_workflow(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_migration_workflow_flattened_error():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_migration_workflow(
migration_service.DeleteMigrationWorkflowRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_migration_workflow_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_migration_workflow(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_migration_workflow_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_migration_workflow(
migration_service.DeleteMigrationWorkflowRequest(),
name='name_value',
)
def test_start_migration_workflow(transport: str = 'grpc', request_type=migration_service.StartMigrationWorkflowRequest):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.start_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.StartMigrationWorkflowRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_start_migration_workflow_from_dict():
test_start_migration_workflow(request_type=dict)
def test_start_migration_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_migration_workflow),
'__call__') as call:
client.start_migration_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.StartMigrationWorkflowRequest()
@pytest.mark.asyncio
async def test_start_migration_workflow_async(transport: str = 'grpc_asyncio', request_type=migration_service.StartMigrationWorkflowRequest):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.start_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.StartMigrationWorkflowRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_start_migration_workflow_async_from_dict():
await test_start_migration_workflow_async(request_type=dict)
def test_start_migration_workflow_field_headers():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.StartMigrationWorkflowRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_migration_workflow),
'__call__') as call:
call.return_value = None
client.start_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_start_migration_workflow_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.StartMigrationWorkflowRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_migration_workflow),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.start_migration_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_start_migration_workflow_flattened():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.start_migration_workflow(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_start_migration_workflow_flattened_error():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.start_migration_workflow(
migration_service.StartMigrationWorkflowRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_start_migration_workflow_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_migration_workflow),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.start_migration_workflow(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_start_migration_workflow_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.start_migration_workflow(
migration_service.StartMigrationWorkflowRequest(),
name='name_value',
)
def test_get_migration_subtask(transport: str = 'grpc', request_type=migration_service.GetMigrationSubtaskRequest):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_subtask),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationSubtask(
name='name_value',
task_id='task_id_value',
type_='type__value',
state=migration_entities.MigrationSubtask.State.ACTIVE,
resource_error_count=2169,
)
response = client.get_migration_subtask(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.GetMigrationSubtaskRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, migration_entities.MigrationSubtask)
assert response.name == 'name_value'
assert response.task_id == 'task_id_value'
assert response.type_ == 'type__value'
assert response.state == migration_entities.MigrationSubtask.State.ACTIVE
assert response.resource_error_count == 2169
def test_get_migration_subtask_from_dict():
test_get_migration_subtask(request_type=dict)
def test_get_migration_subtask_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_subtask),
'__call__') as call:
client.get_migration_subtask()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.GetMigrationSubtaskRequest()
@pytest.mark.asyncio
async def test_get_migration_subtask_async(transport: str = 'grpc_asyncio', request_type=migration_service.GetMigrationSubtaskRequest):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_subtask),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationSubtask(
name='name_value',
task_id='task_id_value',
type_='type__value',
state=migration_entities.MigrationSubtask.State.ACTIVE,
resource_error_count=2169,
))
response = await client.get_migration_subtask(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.GetMigrationSubtaskRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, migration_entities.MigrationSubtask)
assert response.name == 'name_value'
assert response.task_id == 'task_id_value'
assert response.type_ == 'type__value'
assert response.state == migration_entities.MigrationSubtask.State.ACTIVE
assert response.resource_error_count == 2169
@pytest.mark.asyncio
async def test_get_migration_subtask_async_from_dict():
await test_get_migration_subtask_async(request_type=dict)
def test_get_migration_subtask_field_headers():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.GetMigrationSubtaskRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_subtask),
'__call__') as call:
call.return_value = migration_entities.MigrationSubtask()
client.get_migration_subtask(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_migration_subtask_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.GetMigrationSubtaskRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_subtask),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationSubtask())
await client.get_migration_subtask(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_migration_subtask_flattened():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_subtask),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationSubtask()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_migration_subtask(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_migration_subtask_flattened_error():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_migration_subtask(
migration_service.GetMigrationSubtaskRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_migration_subtask_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_migration_subtask),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_entities.MigrationSubtask()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_entities.MigrationSubtask())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_migration_subtask(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_migration_subtask_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_migration_subtask(
migration_service.GetMigrationSubtaskRequest(),
name='name_value',
)
def test_list_migration_subtasks(transport: str = 'grpc', request_type=migration_service.ListMigrationSubtasksRequest):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.ListMigrationSubtasksResponse(
next_page_token='next_page_token_value',
)
response = client.list_migration_subtasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.ListMigrationSubtasksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMigrationSubtasksPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_migration_subtasks_from_dict():
test_list_migration_subtasks(request_type=dict)
def test_list_migration_subtasks_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
client.list_migration_subtasks()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.ListMigrationSubtasksRequest()
@pytest.mark.asyncio
async def test_list_migration_subtasks_async(transport: str = 'grpc_asyncio', request_type=migration_service.ListMigrationSubtasksRequest):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.ListMigrationSubtasksResponse(
next_page_token='next_page_token_value',
))
response = await client.list_migration_subtasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.ListMigrationSubtasksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMigrationSubtasksAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_migration_subtasks_async_from_dict():
await test_list_migration_subtasks_async(request_type=dict)
def test_list_migration_subtasks_field_headers():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.ListMigrationSubtasksRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
call.return_value = migration_service.ListMigrationSubtasksResponse()
client.list_migration_subtasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_migration_subtasks_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.ListMigrationSubtasksRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.ListMigrationSubtasksResponse())
await client.list_migration_subtasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_migration_subtasks_flattened():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.ListMigrationSubtasksResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_migration_subtasks(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_migration_subtasks_flattened_error():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_migration_subtasks(
migration_service.ListMigrationSubtasksRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_migration_subtasks_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.ListMigrationSubtasksResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.ListMigrationSubtasksResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_migration_subtasks(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_migration_subtasks_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_migration_subtasks(
migration_service.ListMigrationSubtasksRequest(),
parent='parent_value',
)
def test_list_migration_subtasks_pager():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
],
next_page_token='abc',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[],
next_page_token='def',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
],
next_page_token='ghi',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_migration_subtasks(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, migration_entities.MigrationSubtask)
for i in results)
def test_list_migration_subtasks_pages():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
],
next_page_token='abc',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[],
next_page_token='def',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
],
next_page_token='ghi',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
],
),
RuntimeError,
)
pages = list(client.list_migration_subtasks(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_migration_subtasks_async_pager():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
],
next_page_token='abc',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[],
next_page_token='def',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
],
next_page_token='ghi',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
],
),
RuntimeError,
)
async_pager = await client.list_migration_subtasks(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, migration_entities.MigrationSubtask)
for i in responses)
@pytest.mark.asyncio
async def test_list_migration_subtasks_async_pages():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_migration_subtasks),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
],
next_page_token='abc',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[],
next_page_token='def',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
],
next_page_token='ghi',
),
migration_service.ListMigrationSubtasksResponse(
migration_subtasks=[
migration_entities.MigrationSubtask(),
migration_entities.MigrationSubtask(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_migration_subtasks(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MigrationServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.MigrationServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.MigrationServiceGrpcTransport,
)
def test_migration_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.MigrationServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_migration_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.bigquery.migration_v2alpha.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.MigrationServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_migration_workflow',
'get_migration_workflow',
'list_migration_workflows',
'delete_migration_workflow',
'start_migration_workflow',
'get_migration_subtask',
'list_migration_subtasks',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_migration_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.bigquery.migration_v2alpha.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MigrationServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_migration_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.bigquery.migration_v2alpha.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MigrationServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_migration_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.bigquery.migration_v2alpha.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MigrationServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_migration_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MigrationServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_migration_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MigrationServiceClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_migration_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_migration_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.MigrationServiceGrpcTransport, grpc_helpers),
(transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_migration_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"bigquerymigration.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="bigquerymigration.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport])
def test_migration_service_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_migration_service_host_no_port():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='bigquerymigration.googleapis.com'),
)
assert client.transport._host == 'bigquerymigration.googleapis.com:443'
def test_migration_service_host_with_port():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='bigquerymigration.googleapis.com:8000'),
)
assert client.transport._host == 'bigquerymigration.googleapis.com:8000'
def test_migration_service_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MigrationServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_migration_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MigrationServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport])
def test_migration_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport])
def test_migration_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_migration_subtask_path():
project = "squid"
location = "clam"
workflow = "whelk"
subtask = "octopus"
expected = "projects/{project}/locations/{location}/workflows/{workflow}/subtasks/{subtask}".format(project=project, location=location, workflow=workflow, subtask=subtask, )
actual = MigrationServiceClient.migration_subtask_path(project, location, workflow, subtask)
assert expected == actual
def test_parse_migration_subtask_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"workflow": "cuttlefish",
"subtask": "mussel",
}
path = MigrationServiceClient.migration_subtask_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_migration_subtask_path(path)
assert expected == actual
def test_migration_workflow_path():
project = "winkle"
location = "nautilus"
workflow = "scallop"
expected = "projects/{project}/locations/{location}/workflows/{workflow}".format(project=project, location=location, workflow=workflow, )
actual = MigrationServiceClient.migration_workflow_path(project, location, workflow)
assert expected == actual
def test_parse_migration_workflow_path():
expected = {
"project": "abalone",
"location": "squid",
"workflow": "clam",
}
path = MigrationServiceClient.migration_workflow_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_migration_workflow_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = MigrationServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = MigrationServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder, )
actual = MigrationServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = MigrationServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization, )
actual = MigrationServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = MigrationServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project, )
actual = MigrationServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = MigrationServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = MigrationServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = MigrationServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep:
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = MigrationServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
rversteegen/commandergenius | refs/heads/sdl_android | project/jni/python/src/Lib/encodings/iso8859_11.py | 593 | """ Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-11',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
CourseTalk/edx-platform | refs/heads/master | lms/djangoapps/mobile_api/__init__.py | 218 | """
Mobile API
"""
|
ssbarnea/ansible | refs/heads/devel | lib/ansible/cli/__init__.py | 10 | # Copyright: (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright: (c) 2016, Toshio Kuratomi <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from ansible.cli.arguments import option_helpers as opt_help
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.release import __version__
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
from ansible.utils.unsafe_proxy import to_unsafe_text
from ansible.vars.manager import VariableManager
try:
import argcomplete
HAS_ARGCOMPLETE = True
except ImportError:
HAS_ARGCOMPLETE = False
display = Display()
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
if C.DEVEL_WARNING and __version__.endswith('dev0'):
display.warning(
'You are running the development version of Ansible. You should only run Ansible from "devel" if '
'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
'changing source of code and can become unstable at any point.'
)
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1].get('version')
date = deprecated[1].get('date')
collection_name = deprecated[1].get('collection_name')
display.deprecated("%s option, %s %s" % (name, why, alt),
version=ver, date=date, collection_name=collection_name)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globally
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
@staticmethod
def ask_passwords():
''' prompt for connection and become passwords if needed '''
op = context.CLIARGS
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
try:
if op['ask_pass']:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
else:
become_prompt = "%s password: " % become_prompt_method
if op['become_ask_pass']:
becomepass = getpass.getpass(prompt=become_prompt)
if op['ask_pass'] and becomepass == '':
becomepass = sshpass
except EOFError:
pass
# we 'wrap' the passwords to prevent templating as
# they can contain special chars and trigger it incorrectly
if sshpass:
sshpass = to_unsafe_text(sshpass)
if becomepass:
becomepass = to_unsafe_text(becomepass)
return (sshpass, becomepass)
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
return op
@abstractmethod
def init_parser(self, usage="", desc=None, epilog=None):
"""
Create an options parser for most ansible scripts
Subclasses need to implement this method. They will usually call the base class's
init_parser to create a basic version and then add their own options on top of that.
An implementation will look something like this::
def init_parser(self):
super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
ansible.arguments.option_helpers.add_runas_options(self.parser)
self.parser.add_option('--my-option', dest='my_option', action='store')
"""
self.parser = opt_help.create_base_parser(os.path.basename(self.args[0]), usage=usage, desc=desc, epilog=epilog, )
@abstractmethod
def post_process_args(self, options):
"""Process the command line args
Subclasses need to implement this method. This method validates and transforms the command
line arguments. It can be used to check whether conflicting values were given, whether filenames
exist, etc.
An implementation will look something like this::
def post_process_args(self, options):
options = super(MyCLI, self).post_process_args(options)
if options.addition and options.subtraction:
raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
if isinstance(options.listofhosts, string_types):
options.listofhosts = string_types.split(',')
return options
"""
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
# More specifically, we want `--tags` to be additive. So we cannot
# simply change C.TAGS_RUN's default to ["all"] because then passing
# --tags foo would cause us to have ['all', 'foo']
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
for tag_set in options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
options.tags = list(tags)
# process skip_tags
if hasattr(options, 'skip_tags') and options.skip_tags:
skip_tags = set()
for tag_set in options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if options.inventory:
# should always be list
if isinstance(options.inventory, string_types):
options.inventory = [options.inventory]
# Ensure full paths when needed
options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
else:
options.inventory = C.DEFAULT_HOST_LIST
# Dup args set on the root parser and sub parsers results in the root parser ignoring the args. e.g. doing
# 'ansible-galaxy -vvv init' has no verbosity set but 'ansible-galaxy init -vvv' sets a level of 3. To preserve
# back compat with pre-argparse changes we manually scan and set verbosity based on the argv values.
if self.parser.prog in ['ansible-galaxy', 'ansible-vault'] and not options.verbosity:
verbosity_arg = next(iter([arg for arg in self.args if arg.startswith('-v')]), None)
if verbosity_arg:
display.deprecated("Setting verbosity before the arg sub command is deprecated, set the verbosity "
"after the sub command", "2.13", collection_name='ansible.builtin')
options.verbosity = verbosity_arg.count('v')
return options
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
context.CLIARGS.
Subclasses need to implement two helper methods, init_parser() and post_process_args() which
are called from this function before and after parsing the arguments.
"""
self.init_parser()
if HAS_ARGCOMPLETE:
argcomplete.autocomplete(self.parser)
try:
options = self.parser.parse_args(self.args[1:])
except SystemExit as e:
if(e.code != 0):
self.parser.exit(status=2, message=" \n%s " % self.parser.format_help())
raise
options = self.post_process_args(options)
context._init_global_context(options)
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = opt_help.version()
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def pager(text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
CLI.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
CLI.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@staticmethod
def _play_prereqs():
options = context.CLIARGS
# all needs loader
loader = DataLoader()
basedir = options.get('basedir', False)
if basedir:
loader.set_basedir(basedir)
add_all_plugin_dirs(basedir)
AnsibleCollectionConfig.playbook_paths = basedir
default_collection = _get_collection_name_from_path(basedir)
if default_collection:
display.warning(u'running with default collection {0}'.format(default_collection))
AnsibleCollectionConfig.default_collection = default_collection
vault_ids = list(options['vault_ids'])
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(options['vault_password_files']),
ask_vault_pass=options['ask_vault_pass'],
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'])
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if not hosts and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts
|
mcgettin/githubLabNMG | refs/heads/master | cloudComp/euler/eu6.py | 1 | """
The sum of the squares of the first ten natural numbers is,
12 + 22 + ... + 102 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)2 = 552 = 3025
Hence the difference between the sum of the squares of the first ten
natural numbers and the square of the sum is 3025 − 385 = 2640.
Find the difference between the sum of the squares of the first
one hundred natural numbers and the square of the sum.
"""
#function for the sum of the squares of numbers from 1 to max
def sumSqu(maxi):
tally=0 #for counting up the squares
for i in range(maxi):
tally+=((i+1)**2) #add squared number to tally
return tally #return the sum of all te squares
#function for the square of the sum of numbers from 1 to max
def sqSum(maxi):
tally=0 #for getting the sum of the numbers
for i in range(maxi):
tally+=(i+1)
return (tally**2) #return to square of the sum of numbers
print("Difference is:",sqSum(100)-sumSqu(100)) #result
|
hslee16/ansible-modules-extras | refs/heads/devel | packaging/os/pkg5.py | 75 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- Multiple packages may be specified, separated by C(,).
required: true
state:
description:
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
package.
required: false
default: present
choices: [ present, latest, absent ]
accept_licenses:
description:
- Accept any licences.
required: false
default: false
choices: [ true, false ]
aliases: [ accept_licences, accept ]
'''
EXAMPLES = '''
# Install Vim:
- pkg5: name=editor/vim
# Remove finger daemon:
- pkg5: name=service/network/finger state=absent
# Install several packages at once:
- pkg5:
name:
- /file/gnu-findutils
- /text/gnu-grep
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='list'),
state=dict(
default='present',
choices=[
'present',
'installed',
'latest',
'absent',
'uninstalled',
'removed',
]
),
accept_licenses=dict(
type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
)
)
params = module.params
packages = []
# pkg(5) FRMIs include a comma before the release number, but
# AnsibleModule will have split this into multiple items for us.
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:
packages.append(fragment)
if params['state'] in ['present', 'installed']:
ensure(module, 'present', packages, params)
elif params['state'] in ['latest']:
ensure(module, 'latest', packages, params)
elif params['state'] in ['absent', 'uninstalled', 'removed']:
ensure(module, 'absent', packages, params)
def ensure(module, state, packages, params):
response = {
'results': [],
'msg': '',
}
behaviour = {
'present': {
'filter': lambda p: not is_installed(module, p),
'subcommand': 'install',
},
'latest': {
'filter': lambda p: not is_latest(module, p),
'subcommand': 'install',
},
'absent': {
'filter': lambda p: is_installed(module, p),
'subcommand': 'uninstall',
},
}
if params['accept_licenses']:
accept_licenses = ['--accept']
else:
accept_licenses = []
to_modify = filter(behaviour[state]['filter'], packages)
if to_modify:
rc, out, err = module.run_command(
[
'pkg', behaviour[state]['subcommand']
]
+ accept_licenses
+ [
'-q', '--'
] + to_modify
)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
response['changed'] = True
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def is_installed(module, package):
rc, out, err = module.run_command(['pkg', 'list', '--', package])
return not bool(int(rc))
def is_latest(module, package):
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
return bool(int(rc))
from ansible.module_utils.basic import *
main()
|
Tivix/django-rest-auth | refs/heads/master | rest_auth/tests/test_api.py | 2 | from django.test import TestCase, override_settings
from django.contrib.auth import get_user_model
from django.core import mail
from django.conf import settings
from django.utils.encoding import force_text
from allauth.account import app_settings as account_app_settings
from rest_framework import status
from rest_framework.test import APIRequestFactory
from rest_auth.registration.views import RegisterView
from rest_auth.registration.app_settings import register_permission_classes
from .mixins import TestsMixin, CustomPermissionClass
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
@override_settings(ROOT_URLCONF="tests.urls")
class APIBasicTests(TestsMixin, TestCase):
"""
Case #1:
- user profile: defined
- custom registration: backend defined
"""
# urls = 'tests.urls'
USERNAME = 'person'
PASS = 'person'
EMAIL = "[email protected]"
NEW_PASS = 'new-test-pass'
REGISTRATION_VIEW = 'rest_auth.runtests.RegistrationView'
# data without user profile
REGISTRATION_DATA = {
"username": USERNAME,
"password1": PASS,
"password2": PASS
}
REGISTRATION_DATA_WITH_EMAIL = REGISTRATION_DATA.copy()
REGISTRATION_DATA_WITH_EMAIL['email'] = EMAIL
BASIC_USER_DATA = {
'first_name': "John",
'last_name': 'Smith',
'email': EMAIL
}
USER_DATA = BASIC_USER_DATA.copy()
USER_DATA['newsletter_subscribe'] = True
def setUp(self):
self.init()
def _generate_uid_and_token(self, user):
result = {}
from django.utils.encoding import force_bytes
from django.contrib.auth.tokens import default_token_generator
from django.utils.http import urlsafe_base64_encode
result['uid'] = urlsafe_base64_encode(force_bytes(user.pk))
result['token'] = default_token_generator.make_token(user)
return result
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=account_app_settings.AuthenticationMethod.EMAIL)
def test_login_failed_email_validation(self):
payload = {
"email": '',
"password": self.PASS
}
resp = self.post(self.login_url, data=payload, status_code=400)
self.assertEqual(resp.json['non_field_errors'][0], u'Must include "email" and "password".')
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=account_app_settings.AuthenticationMethod.USERNAME)
def test_login_failed_username_validation(self):
payload = {
"username": '',
"password": self.PASS
}
resp = self.post(self.login_url, data=payload, status_code=400)
self.assertEqual(resp.json['non_field_errors'][0], u'Must include "username" and "password".')
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=account_app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_failed_username_email_validation(self):
payload = {
"password": self.PASS
}
resp = self.post(self.login_url, data=payload, status_code=400)
self.assertEqual(resp.json['non_field_errors'][0], u'Must include either "username" or "email" and "password".')
def test_allauth_login_with_username(self):
payload = {
"username": self.USERNAME,
"password": self.PASS
}
# there is no users in db so it should throw error (400)
self.post(self.login_url, data=payload, status_code=400)
self.post(self.password_change_url, status_code=403)
# create user
user = get_user_model().objects.create_user(self.USERNAME, '', self.PASS)
self.post(self.login_url, data=payload, status_code=200)
self.assertEqual('key' in self.response.json.keys(), True)
self.token = self.response.json['key']
self.post(self.password_change_url, status_code=400)
# test inactive user
user.is_active = False
user.save()
self.post(self.login_url, data=payload, status_code=400)
# test wrong username/password
payload = {
"username": self.USERNAME + '?',
"password": self.PASS
}
self.post(self.login_url, data=payload, status_code=400)
# test empty payload
self.post(self.login_url, data={}, status_code=400)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=account_app_settings.AuthenticationMethod.EMAIL)
def test_allauth_login_with_email(self):
payload = {
"email": self.EMAIL,
"password": self.PASS
}
# there is no users in db so it should throw error (400)
self.post(self.login_url, data=payload, status_code=400)
self.post(self.password_change_url, status_code=403)
# create user
get_user_model().objects.create_user(self.EMAIL, email=self.EMAIL, password=self.PASS)
self.post(self.login_url, data=payload, status_code=200)
@override_settings(REST_USE_JWT=True)
def test_login_jwt(self):
payload = {
"username": self.USERNAME,
"password": self.PASS
}
get_user_model().objects.create_user(self.USERNAME, '', self.PASS)
self.post(self.login_url, data=payload, status_code=200)
self.assertEqual('token' in self.response.json.keys(), True)
self.token = self.response.json['token']
def test_login_by_email(self):
# starting test without allauth app
settings.INSTALLED_APPS.remove('allauth')
payload = {
"email": self.EMAIL.lower(),
"password": self.PASS
}
# there is no users in db so it should throw error (400)
self.post(self.login_url, data=payload, status_code=400)
self.post(self.password_change_url, status_code=403)
# create user
user = get_user_model().objects.create_user(self.USERNAME, self.EMAIL, self.PASS)
# test auth by email
self.post(self.login_url, data=payload, status_code=200)
self.assertEqual('key' in self.response.json.keys(), True)
self.token = self.response.json['key']
# test auth by email in different case
payload = {
"email": self.EMAIL.upper(),
"password": self.PASS
}
self.post(self.login_url, data=payload, status_code=200)
self.assertEqual('key' in self.response.json.keys(), True)
self.token = self.response.json['key']
# test inactive user
user.is_active = False
user.save()
self.post(self.login_url, data=payload, status_code=400)
# test wrong email/password
payload = {
"email": 't' + self.EMAIL,
"password": self.PASS
}
self.post(self.login_url, data=payload, status_code=400)
# test empty payload
self.post(self.login_url, data={}, status_code=400)
# bring back allauth
settings.INSTALLED_APPS.append('allauth')
def test_password_change(self):
login_payload = {
"username": self.USERNAME,
"password": self.PASS
}
get_user_model().objects.create_user(self.USERNAME, '', self.PASS)
self.post(self.login_url, data=login_payload, status_code=200)
self.token = self.response.json['key']
new_password_payload = {
"new_password1": "new_person",
"new_password2": "new_person"
}
self.post(
self.password_change_url,
data=new_password_payload,
status_code=200
)
# user should not be able to login using old password
self.post(self.login_url, data=login_payload, status_code=400)
# new password should work
login_payload['password'] = new_password_payload['new_password1']
self.post(self.login_url, data=login_payload, status_code=200)
# pass1 and pass2 are not equal
new_password_payload = {
"new_password1": "new_person1",
"new_password2": "new_person"
}
self.post(
self.password_change_url,
data=new_password_payload,
status_code=400
)
# send empty payload
self.post(self.password_change_url, data={}, status_code=400)
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_password_change_with_old_password(self):
login_payload = {
"username": self.USERNAME,
"password": self.PASS
}
get_user_model().objects.create_user(self.USERNAME, '', self.PASS)
self.post(self.login_url, data=login_payload, status_code=200)
self.token = self.response.json['key']
new_password_payload = {
"old_password": "%s!" % self.PASS, # wrong password
"new_password1": "new_person",
"new_password2": "new_person"
}
self.post(
self.password_change_url,
data=new_password_payload,
status_code=400
)
new_password_payload = {
"old_password": self.PASS,
"new_password1": "new_person",
"new_password2": "new_person"
}
self.post(
self.password_change_url,
data=new_password_payload,
status_code=200
)
# user should not be able to login using old password
self.post(self.login_url, data=login_payload, status_code=400)
# new password should work
login_payload['password'] = new_password_payload['new_password1']
self.post(self.login_url, data=login_payload, status_code=200)
def test_password_reset(self):
user = get_user_model().objects.create_user(self.USERNAME, self.EMAIL, self.PASS)
# call password reset
mail_count = len(mail.outbox)
payload = {'email': self.EMAIL}
self.post(self.password_reset_url, data=payload, status_code=200)
self.assertEqual(len(mail.outbox), mail_count + 1)
url_kwargs = self._generate_uid_and_token(user)
url = reverse('rest_password_reset_confirm')
# wrong token
data = {
'new_password1': self.NEW_PASS,
'new_password2': self.NEW_PASS,
'uid': force_text(url_kwargs['uid']),
'token': '-wrong-token-'
}
self.post(url, data=data, status_code=400)
# wrong uid
data = {
'new_password1': self.NEW_PASS,
'new_password2': self.NEW_PASS,
'uid': '-wrong-uid-',
'token': url_kwargs['token']
}
self.post(url, data=data, status_code=400)
# wrong token and uid
data = {
'new_password1': self.NEW_PASS,
'new_password2': self.NEW_PASS,
'uid': '-wrong-uid-',
'token': '-wrong-token-'
}
self.post(url, data=data, status_code=400)
# valid payload
data = {
'new_password1': self.NEW_PASS,
'new_password2': self.NEW_PASS,
'uid': force_text(url_kwargs['uid']),
'token': url_kwargs['token']
}
url = reverse('rest_password_reset_confirm')
self.post(url, data=data, status_code=200)
payload = {
"username": self.USERNAME,
"password": self.NEW_PASS
}
self.post(self.login_url, data=payload, status_code=200)
def test_password_reset_with_email_in_different_case(self):
get_user_model().objects.create_user(self.USERNAME, self.EMAIL.lower(), self.PASS)
# call password reset in upper case
mail_count = len(mail.outbox)
payload = {'email': self.EMAIL.upper()}
self.post(self.password_reset_url, data=payload, status_code=200)
self.assertEqual(len(mail.outbox), mail_count + 1)
def test_password_reset_with_invalid_email(self):
"""
Invalid email should not raise error, as this would leak users
"""
get_user_model().objects.create_user(self.USERNAME, self.EMAIL, self.PASS)
# call password reset
mail_count = len(mail.outbox)
payload = {'email': '[email protected]'}
self.post(self.password_reset_url, data=payload, status_code=200)
self.assertEqual(len(mail.outbox), mail_count)
def test_user_details(self):
user = get_user_model().objects.create_user(self.USERNAME, self.EMAIL, self.PASS)
payload = {
"username": self.USERNAME,
"password": self.PASS
}
self.post(self.login_url, data=payload, status_code=200)
self.token = self.response.json['key']
self.get(self.user_url, status_code=200)
self.patch(self.user_url, data=self.BASIC_USER_DATA, status_code=200)
user = get_user_model().objects.get(pk=user.pk)
self.assertEqual(user.first_name, self.response.json['first_name'])
self.assertEqual(user.last_name, self.response.json['last_name'])
self.assertEqual(user.email, self.response.json['email'])
@override_settings(REST_USE_JWT=True)
def test_user_details_using_jwt(self):
user = get_user_model().objects.create_user(self.USERNAME, self.EMAIL, self.PASS)
payload = {
"username": self.USERNAME,
"password": self.PASS
}
self.post(self.login_url, data=payload, status_code=200)
self.token = self.response.json['token']
self.get(self.user_url, status_code=200)
self.patch(self.user_url, data=self.BASIC_USER_DATA, status_code=200)
user = get_user_model().objects.get(pk=user.pk)
self.assertEqual(user.email, self.response.json['email'])
def test_registration(self):
user_count = get_user_model().objects.all().count()
# test empty payload
self.post(self.register_url, data={}, status_code=400)
result = self.post(self.register_url, data=self.REGISTRATION_DATA, status_code=201)
self.assertIn('key', result.data)
self.assertEqual(get_user_model().objects.all().count(), user_count + 1)
new_user = get_user_model().objects.latest('id')
self.assertEqual(new_user.username, self.REGISTRATION_DATA['username'])
self._login()
self._logout()
@override_settings(REST_AUTH_REGISTER_PERMISSION_CLASSES=(CustomPermissionClass,))
def test_registration_with_custom_permission_class(self):
class CustomRegisterView(RegisterView):
permission_classes = register_permission_classes()
authentication_classes = ()
factory = APIRequestFactory()
request = factory.post('/customer/details', self.REGISTRATION_DATA, format='json')
response = CustomRegisterView.as_view()(request)
self.assertEqual(response.data['detail'], CustomPermissionClass.message)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@override_settings(REST_USE_JWT=True)
def test_registration_with_jwt(self):
user_count = get_user_model().objects.all().count()
self.post(self.register_url, data={}, status_code=400)
result = self.post(self.register_url, data=self.REGISTRATION_DATA, status_code=201)
self.assertIn('token', result.data)
self.assertEqual(get_user_model().objects.all().count(), user_count + 1)
self._login()
self._logout()
def test_registration_with_invalid_password(self):
data = self.REGISTRATION_DATA.copy()
data['password2'] = 'foobar'
self.post(self.register_url, data=data, status_code=400)
@override_settings(
ACCOUNT_EMAIL_VERIFICATION='mandatory',
ACCOUNT_EMAIL_REQUIRED=True,
ACCOUNT_EMAIL_CONFIRMATION_HMAC=False
)
def test_registration_with_email_verification(self):
user_count = get_user_model().objects.all().count()
mail_count = len(mail.outbox)
# test empty payload
self.post(
self.register_url,
data={},
status_code=status.HTTP_400_BAD_REQUEST
)
result = self.post(
self.register_url,
data=self.REGISTRATION_DATA_WITH_EMAIL,
status_code=status.HTTP_201_CREATED
)
self.assertNotIn('key', result.data)
self.assertEqual(get_user_model().objects.all().count(), user_count + 1)
self.assertEqual(len(mail.outbox), mail_count + 1)
new_user = get_user_model().objects.latest('id')
self.assertEqual(new_user.username, self.REGISTRATION_DATA['username'])
# email is not verified yet
payload = {
"username": self.USERNAME,
"password": self.PASS
}
self.post(
self.login_url,
data=payload,
status=status.HTTP_400_BAD_REQUEST
)
# verify email
email_confirmation = new_user.emailaddress_set.get(email=self.EMAIL)\
.emailconfirmation_set.order_by('-created')[0]
self.post(
self.verify_email_url,
data={"key": email_confirmation.key},
status_code=status.HTTP_200_OK
)
# try to login again
self._login()
self._logout()
@override_settings(ACCOUNT_LOGOUT_ON_GET=True)
def test_logout_on_get(self):
payload = {
"username": self.USERNAME,
"password": self.PASS
}
# create user
get_user_model().objects.create_user(self.USERNAME, '', self.PASS)
self.post(self.login_url, data=payload, status_code=200)
self.get(self.logout_url, status=status.HTTP_200_OK)
@override_settings(ACCOUNT_LOGOUT_ON_GET=False)
def test_logout_on_post_only(self):
payload = {
"username": self.USERNAME,
"password": self.PASS
}
# create user
get_user_model().objects.create_user(self.USERNAME, '', self.PASS)
self.post(self.login_url, data=payload, status_code=status.HTTP_200_OK)
self.get(self.logout_url, status_code=status.HTTP_405_METHOD_NOT_ALLOWED)
|
nhippenmeyer/django | refs/heads/master | django/contrib/sites/migrations/0002_alter_domain_unique.py | 170 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='site',
name='domain',
field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'),
),
]
|
XeCycle/indico | refs/heads/master | indico/web/http_api/metadata/jsonp.py | 2 | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.web.http_api.metadata.json import JSONSerializer
class JSONPSerializer(JSONSerializer):
"""
Just adds prefix
"""
_mime = 'application/javascript'
def _execute(self, results):
return "// fetched from Indico\n%s(%s);" % \
(self._query_params.get('jsonp', 'read'),
super(JSONPSerializer, self)._execute(results))
|
Mark24Code/python | refs/heads/master | NKUCodingCat/0015/0015.py | 40 | #coding=utf-8
import json, xlwt, os
f = open(os.path.split(os.path.realpath(__file__))[0]+"/city.txt")
dict = json.loads(f.read().decode("GBK"))
xls = xlwt.Workbook()
sheet = xls.add_sheet("city")
for i in range(len(dict.keys())):
row = i
col = 0
sheet.write(row, col, dict.keys()[i])
sheet.write(row, col+1, dict[dict.keys()[i]])
xls.save(os.path.split(os.path.realpath(__file__))[0]+"/city.xls") |
heimdalerp/heimdalerp | refs/heads/master | invoice/serializers.py | 2 | from decimal import Decimal
from contact.models import Contact
from contact.serializers import ContactSerializer
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from invoice import models
from persons.models import Company, PhysicalAddress
from persons.serializers import CompanySerializer, PhysicalAddressSerializer
from rest_framework.serializers import (HyperlinkedIdentityField,
HyperlinkedModelSerializer,
ValidationError)
class FiscalPositionSerializer(HyperlinkedModelSerializer):
companies = HyperlinkedIdentityField(
view_name='api:invoice:fiscalposition-companies'
)
contacts = HyperlinkedIdentityField(
view_name='api:invoice:fiscalposition-contacts'
)
class Meta:
model = models.FiscalPosition
fields = (
'url',
'id',
'name',
'code',
'companies',
'contacts'
)
extra_kwargs = {
'url': {
'view_name': 'api:invoice:fiscalposition-detail'
}
}
class ContactInvoiceSerializer(HyperlinkedModelSerializer):
contact_contact = ContactSerializer()
fiscal_address = PhysicalAddressSerializer()
invoices = HyperlinkedIdentityField(
view_name='api:invoice:contactinvoice-invoices'
)
class Meta:
model = models.ContactInvoice
fields = (
'url',
'id',
'contact_contact',
'legal_name',
'fiscal_position',
'fiscal_address',
'invoices'
)
extra_kwargs = {
'url': {
'view_name': 'api:invoice:contactinvoice-detail'
},
'fiscal_position': {
'view_name': 'api:invoice:fiscalposition-detail'
}
}
@transaction.atomic
def create(self, validated_data): # TODO: Use existing objects.
contact_contact_data = validated_data.pop('contact_contact')
home_address_data = contact_contact_data.pop('home_address')
home_address = PhysicalAddress.objects.create(
**home_address_data
)
contact_contact_data['home_address'] = home_address
contact_contact = Contact.objects.create(
**contact_contact_data
)
validated_data['contact_contact'] = contact_contact
fiscal_address_data = validated_data.pop('fiscal_address')
fiscal_address = PhysicalAddress.objects.create(
**fiscal_address_data
)
validated_data['fiscal_address'] = fiscal_address
invoice_contact = models.ContactInvoice.objects.create(
**validated_data
)
return invoice_contact
@transaction.atomic
def update(self, instance, validated_data):
contact_contact_data = validated_data.pop('contact_contact')
home_address_data = contact_contact_data.pop('home_address')
instance.contact_contact.home_address.street_address = (
home_address_data.get(
'street_address',
instance.contact_contact.home_address.street_address
)
)
instance.contact_contact.home_address.floor_number = (
home_address_data.get(
'floor_number',
instance.contact_contact.home_address.floor_number
)
)
instance.contact_contact.home_address.apartment_number = (
home_address_data.get(
'apartment_number',
instance.contact_contact.home_address.apartment_number
)
)
instance.contact_contact.home_address.locality = (
home_address_data.get(
'locality',
instance.contact_contact.home_address.locality
)
)
instance.contact_contact.home_address.postal_code = (
home_address_data.get(
'postal_code',
instance.contact_contact.home_address.postal_code
)
)
instance.contact_contact.home_address.save()
instance.contact_contact.birth_date = contact_contact_data.get(
'birth_date',
instance.contact_contact.birth_date
)
instance.contact_contact.born_in = contact_contact_data.get(
'born_in',
instance.contact_contact.born_in
)
instance.contact_contact.phone_numbers = contact_contact_data.get(
'phone_numbers',
instance.contact_contact.phone_numbers
)
instance.contact_contact.extra_emails = contact_contact_data.get(
'extra_emails',
instance.contact_contact.extra_emails
)
instance.contact_contact.name = contact_contact_data.get(
'name',
instance.contact_contact.name
)
instance.contact_contact.contact_type = contact_contact_data.get(
'contact_type',
instance.contact_contact.contact_type
)
instance.contact_contact.save()
fiscal_address_data = validated_data.pop('fiscal_address')
instance.fiscal_address.street_address = fiscal_address_data.get(
'street_address', instance.fiscal_address.street_address
)
instance.fiscal_address.floor_number = fiscal_address_data.get(
'floor_number', instance.fiscal_address.floor_number
)
instance.fiscal_address.apartment_number = fiscal_address_data.get(
'apartment_number', instance.fiscal_address.apartment_number
)
instance.fiscal_address.locality = fiscal_address_data.get(
'locality', instance.fiscal_address.locality
)
instance.fiscal_address.postal_code = fiscal_address_data.get(
'postal_code', instance.fiscal_address.postal_code
)
instance.fiscal_address.save()
instance.fiscal_position = validated_data.get(
'fiscal_position', instance.fiscal_position
)
instance.legal_name = validated_data.get(
'legal_name', instance.legal_name
)
instance.save()
return instance
class CompanyInvoiceSerializer(HyperlinkedModelSerializer):
persons_company = CompanySerializer()
fiscal_address = PhysicalAddressSerializer()
products = HyperlinkedIdentityField(
view_name='api:invoice:companyinvoice-products'
)
invoices = HyperlinkedIdentityField(
view_name='api:invoice:companyinvoice-invoices'
)
class Meta:
model = models.CompanyInvoice
fields = (
'url',
'id',
'persons_company',
'legal_name',
'initiated_activities',
'fiscal_position',
'fiscal_address',
'products',
'default_invoice_debit_account',
'default_invoice_credit_account',
'invoices'
)
extra_kwargs = {
'url': {
'view_name': 'api:invoice:companyinvoice-detail'
},
'fiscal_position': {
'view_name': 'api:invoice:fiscalposition-detail',
'required': False,
'allow_null': True
},
'fiscal_address': {
'required': False
},
'default_invoice_debit_account': {
'view_name': 'api:accounting:account-detail',
'required': False,
'allow_null': True
},
'default_invoice_credit_account': {
'view_name': 'api:accounting:account-detail',
'required': False,
'allow_null': True
}
}
@transaction.atomic
def create(self, validated_data): # TODO: Use existing objects.
persons_company_data = validated_data.pop('persons_company')
persons_company = Company.objects.create(
**persons_company_data
)
validated_data['persons_company'] = persons_company
fiscal_address_data = validated_data.pop('fiscal_address')
fiscal_address = PhysicalAddress.objects.create(
**fiscal_address_data
)
validated_data['fiscal_address'] = fiscal_address
company = models.CompanyInvoice.objects.create(**validated_data)
return company
@transaction.atomic
def update(self, instance, validated_data):
persons_company_data = validated_data.pop('persons_company')
instance.persons_company.fantasy_name = persons_company_data.get(
'fantasy_name',
instance.persons_company.fantasy_name
)
instance.persons_company.slogan = persons_company_data.get(
'slogan',
instance.persons_company.slogan
)
instance.persons_company.save()
instance.legal_name = validated_data.get(
'legal_name',
instance.legal_name
)
instance.initiated_activities = (
validated_data.get(
'initiated_activities',
instance.initiated_activities
)
)
fiscal_address_data = validated_data.pop('fiscal_address')
instance.fiscal_address.street_address = fiscal_address_data.get(
'street_address', instance.fiscal_address.street_address
)
instance.fiscal_address.floor_number = fiscal_address_data.get(
'floor_number', instance.fiscal_address.floor_number
)
instance.fiscal_address.apartment_number = fiscal_address_data.get(
'apartment_number', instance.fiscal_address.apartment_number
)
instance.fiscal_address.locality = fiscal_address_data.get(
'locality', instance.fiscal_address.locality
)
instance.fiscal_address.postal_code = fiscal_address_data.get(
'postal_code', instance.fiscal_address.postal_code
)
instance.fiscal_address.save()
instance.fiscal_position = validated_data.get(
'fiscal_position',
instance.fiscal_position
)
instance.default_invoice_debit_account = validated_data.get(
'default_invoice_debit_account',
instance.default_invoice_debit_account
)
instance.default_invoice_credit_account = validated_data.get(
'default_invoice_credit_account',
instance.default_invoice_credit_account
)
instance.save()
return instance
class VATSerializer(HyperlinkedModelSerializer):
products = HyperlinkedIdentityField(
view_name='api:invoice:vat-products'
)
class Meta:
model = models.VAT
fields = (
'url',
'id',
'name',
'tax',
'code',
'products'
)
extra_kwargs = {
'url': {
'view_name': 'api:invoice:vat-detail'
},
'tax': {
'coerce_to_string': False
}
}
class ProductSerializer(HyperlinkedModelSerializer):
invoice_lines = HyperlinkedIdentityField(
view_name='api:invoice:product-invoicelines'
)
class Meta:
model = models.Product
fields = (
'url',
'id',
'invoice_company',
'name',
'current_price',
'vat',
'invoice_lines'
)
extra_kwargs = {
'url': {
'view_name': 'api:invoice:product-detail'
},
'invoice_company': {
'view_name': 'api:invoice:companyinvoice-detail'
},
'vat': {
'view_name': 'api:invoice:vat-detail'
},
'current_price': {
'coerce_to_string': False
}
}
class InvoiceLineSerializer(HyperlinkedModelSerializer):
class Meta:
model = models.InvoiceLine
fields = (
'url',
'id',
'product',
'price_sold',
'discount',
'quantity',
'description'
)
extra_kwargs = {
'url': {
'view_name': 'api:invoice:invoiceline-detail'
},
'product': {
'view_name': 'api:invoice:product-detail'
},
'price_sold': {
'coerce_to_string': False
}
}
class InvoiceTypeSerializer(HyperlinkedModelSerializer):
invoices = HyperlinkedIdentityField(
view_name='api:invoice:invoicetype-invoices'
)
class Meta:
model = models.InvoiceType
fields = (
'url',
'id',
'name',
'invoice_type_class',
'code',
'invoices'
)
extra_kwargs = {
'url': {
'view_name': 'api:invoice:invoicetype-detail'
}
}
class InvoiceSerializer(HyperlinkedModelSerializer):
invoice_lines = InvoiceLineSerializer(many=True)
class Meta:
model = models.Invoice
fields = (
'url',
'id',
'invoice_company',
'invoice_contact',
'related_invoice',
'number',
'invoice_lines',
'invoice_type',
'invoice_date',
'notes',
'subtotal',
'total',
'status',
'transaction'
)
extra_kwargs = {
'url': {
'view_name': 'api:invoice:invoice-detail'
},
'related_invoice': {
'view_name': 'api:invoice:invoice-detail'
},
'invoice_type': {
'view_name': 'api:invoice:invoicetype-detail',
'required': False,
'allow_null': True
},
'invoice_company': {
'view_name': 'api:invoice:companyinvoice-detail'
},
'invoice_contact': {
'view_name': 'api:invoice:contactinvoice-detail'
},
'number': {
'required': False
},
'subtotal': {
'read_only': True,
'coerce_to_string': False
},
'total': {
'read_only': True,
'coerce_to_string': False
},
'status': {
'read_only': True
},
'transaction': {
'view_name': 'api:accounting:transaction-detail',
'read_only': True
}
}
@transaction.atomic
def create(self, validated_data):
validated_data['status'] = models.INVOICE_STATUSTYPE_DRAFT
number = validated_data.get('number')
if number is None or number == 0:
validated_data['number'] = 0
invoice_lines_data = validated_data.pop('invoice_lines')
invoice = models.Invoice.objects.create(
**validated_data
)
if invoice_lines_data is not None:
subtotal = Decimal('0.00')
total = Decimal('0.00')
for l_data in invoice_lines_data:
l = models.InvoiceLine.objects.create(**l_data)
if l.discount > 0.00:
price_aux = Decimal(
l.quantity * (
l.price_sold - (l.price_sold * l.discount)
)
)
subtotal += price_aux
total += (
price_aux + (price_aux * l.product.vat.tax)
)
else:
subtotal += l.price_sold * l.quantity
total += l.quantity * (
l.price_sold + (l.price_sold * l.product.vat.tax)
)
invoice.invoice_lines.add(l)
invoice.subtotal = subtotal
invoice.total = total
related_invoice = validated_data.get('related_invoice')
if related_invoice is not None:
if subtotal > related_invoice.subtotal:
raise ValidationError(_(
"This invoice can't exceed related invoice's subtotal."
))
else:
invoice.total = subtotal
invoice.save()
return invoice
@transaction.atomic
def update(self, instance, validated_data):
if instance.status is models.INVOICE_STATUSTYPE_DRAFT:
instance.invoice_company = validated_data.get(
'invoice_company',
instance.invoice_company
)
instance.invoice_contact = validated_data.get(
'invoice_contact',
instance.invoice_contact
)
instance.related_invoice = validated_data.get(
'related_invoice',
instance.related_invoice
)
instance.invoice_date = validated_data.get(
'invoice_date',
instance.invoice_date
)
instance.invoice_type = validated_data.get(
'invoice_type',
instance.invoice_type
)
instance.notes = validated_data.get(
'notes',
instance.notes
)
instance.number = validated_data.get(
'number',
instance.number
)
invoice_lines_data = validated_data.get('invoice_lines')
if invoice_lines_data is not None:
instance.invoice_lines.clear()
subtotal = Decimal('0.00')
total = Decimal('0.00')
for l_data in invoice_lines_data:
l = models.InvoiceLine.objects.create(**l_data)
instance.invoice_lines.add(l)
if l.discount > 0.00:
price_aux = Decimal(
l.quantity * (
l.price_sold - (l.price_sold * l.discount)
)
)
subtotal += price_aux
total += (
price_aux + (price_aux * l.product.vat.tax)
)
else:
subtotal += l.quantity * l.price_sold
total += l.quantity * (
l.price_sold + (
l.price_sold * l.product.vat.tax
)
)
instance.subtotal = subtotal
instance.total = total
if instance.related_invoice is not None:
if subtotal > instance.related_invoice.subtotal:
raise ValidationError(_(
"This invoice can't exceed related invoice's subtotal."
))
else:
instance.total = subtotal
instance.save()
return instance
class FiscalPositionHasInvoiceTypeAllowedSerializer(
HyperlinkedModelSerializer
):
class Meta:
model = models.FiscalPositionHasInvoiceTypeAllowed
read_only_fields = (
'url',
'id',
'fiscal_position_issuer',
'invoice_type',
'fiscal_position_receiver'
)
extra_kwargs = {
'url': {
'view_name': (
'api:invoice:fiscalpositionhasinvoicetypeallowed-detail'
)
},
'fiscal_position_issuer': {
'view_name': 'api:invoice:fiscalposition-detail'
},
'invoice_type': {
'view_name': 'api:invoice:invoicetype-detail'
},
'fiscal_position_receiver': {
'view_name': 'api:invoice:fiscalposition-detail'
}
}
|
google-research/evoflow | refs/heads/master | tests/backend/test_random.py | 1 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from termcolor import cprint
def arr2d():
return [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
def arr2dlarge():
return [[1, 2, 3, 10, 11, 12], [4, 5, 6, 13, 14, 15],
[7, 8, 9, 16, 17, 18]]
def test_full_shuffle_shape(backends):
# use arrays where each dimension is different to make sure we get back
# the same. This might seems odd but there is a real risk it is broken
# due to how the tf.op is implemented
SHAPES = [(10, ), (10, 20), (20, 10), (10, 20, 30), (30, 10, 20),
(10, 20, 30, 40), (40, 30, 20, 10)]
for B in backends:
print('\n==[Backend]==\n')
for shape in SHAPES:
t = B.normal(shape)
assert t.shape == shape
t = B.full_shuffle(t)
assert t.shape == shape
print('shape', shape, 't.shape', t.shape)
def test_shuffle_axis0(backends):
for B in backends:
t = B.tensor(arr2d())
# give it 20 tries to ensure consistency
for _ in range(20):
t = B.shuffle(t)
if t[0][0] != 1 or t[1][0] != 4:
break
assert t[0][0] in [1, 4, 7]
assert t[1][0] in [1, 4, 7]
assert t[2][0] in [1, 4, 7]
assert t[0][0] != 1 or t[1][0] != 4
def test_shuffle_axis1(backends):
for B in backends:
t = B.tensor(arr2d())
cprint(t, 'blue')
# give it 20 tries to ensure consistency
for _ in range(20):
t = B.shuffle(t, axis=1)
if t[0][0] != 1 or t[1][0] != 4:
break
cprint(t, 'green')
assert t[0][0] in [1, 2, 3]
assert t[1][0] in [4, 5, 6]
assert t[2][0] in [7, 8, 9]
assert t[0][0] != 1 or t[1][0] != 4
def test_full_shuffle(backends):
for B in backends:
t = B.tensor(arr2d())
cprint(t, 'blue')
# give it multiple try as identity is a valid shuffle
for _ in range(20):
t = B.full_shuffle(t)
if (t[0][0] != 1 or t[1][0] != 4) and (t[0][1] != 2
or t[1][1] != 5): # noqa
break
cprint(t, 'green')
assert (t[0][0] != 1 or t[1][0] != 4)
assert (t[0][1] != 2 or t[1][1] != 5)
def test_randint_1D(backends):
for B in backends:
print(B)
t = B.randint(0, 11, shape=10)
print(t)
assert len(t) == 10
assert B.max(t) <= 10
assert B.min(t) >= 0
def test_single_number(backends):
for B in backends:
t = B.randint(11)
assert t <= 10
assert t >= 0
t = B.randint(5, 11)
assert t <= 10
assert t >= 5
def test_randint_2SD(backends):
for B in backends:
t = B.randint(0, 11, shape=(10, 20))
assert t.shape == (10, 20)
assert B.max(t) <= 10
assert B.min(t) >= 0
|
nash-x/hws | refs/heads/master | neutron/agent/l3_proxy.py | 1 | '''
Created on 2014-5-23
'''
import sys
import datetime
import eventlet
eventlet.monkey_patch()
import netaddr
import os
from oslo.config import cfg
from oslo import messaging
import Queue
import random
import socket
import time
from neutron.agent.common import config
from neutron.agent import l3_ha_agent
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.common import constants as l3_constants
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils as common_utils
from neutron import context
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.openstack.common import service
from neutron.openstack.common import timeutils
from neutron.openstack.common import jsonutils
from neutron import service as neutron_service
from neutron.services.firewall.agents.l3reference import firewall_l3_proxy
from neutron.plugins.l2_proxy.agent import neutron_proxy_context
from neutron.plugins.l2_proxy.agent import clients
from neutronclient.common import exceptions
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
INTERNAL_DEV_PREFIX = 'qr-'
EXTERNAL_DEV_PREFIX = 'qg-'
SNAT_INT_DEV_PREFIX = 'sg-'
FIP_NS_PREFIX = 'fip-'
SNAT_NS_PREFIX = 'snat-'
FIP_2_ROUTER_DEV_PREFIX = 'fpr-'
ROUTER_2_FIP_DEV_PREFIX = 'rfp-'
FIP_EXT_DEV_PREFIX = 'fg-'
FIP_LL_SUBNET = '169.254.30.0/23'
# Route Table index for FIPs
FIP_RT_TBL = 16
# Rule priority range for FIPs
FIP_PR_START = 32768
FIP_PR_END = FIP_PR_START + 40000
RPC_LOOP_INTERVAL = 1
FLOATING_IP_CIDR_SUFFIX = '/32'
# Lower value is higher priority
PRIORITY_RPC = 0
PRIORITY_SYNC_ROUTERS_TASK = 1
DELETE_ROUTER = 1
AGENTS_SCHEDULER_OPTS = [
cfg.IntOpt('dhcp_agents_per_network', default=2,
help=_('Number of DHCP agents scheduled to host a network.')),
]
class L3PluginApi(n_rpc.RpcProxy):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
1.1 - Floating IP operational status updates
1.2 - DVR support: new L3 plugin methods added.
- get_ports_by_subnet
- get_agent_gateway_port
Needed by the agent when operating in DVR/DVR_SNAT mode
1.3 - Get the list of activated services
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic, host):
super(L3PluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.host = host
def get_routers(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
return self.call(context,
self.make_msg('sync_routers', host=self.host,
router_ids=router_ids))
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise n_rpc.RemoteError: with TooManyExternalNetworks as
exc_type if there are more than one
external network
"""
return self.call(context,
self.make_msg('get_external_network_id',
host=self.host))
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Call the plugin update floating IPs's operational status."""
return self.call(context,
self.make_msg('update_floatingip_statuses',
router_id=router_id,
fip_statuses=fip_statuses),
version='1.1')
def get_ports_by_subnet(self, context, subnet_id):
"""Retrieve ports by subnet id."""
return self.call(context,
self.make_msg('get_ports_by_subnet', host=self.host,
subnet_id=subnet_id),
topic=self.topic,
version='1.2')
def get_agent_gateway_port(self, context, fip_net):
"""Get or create an agent_gateway_port."""
return self.call(context,
self.make_msg('get_agent_gateway_port',
network_id=fip_net, host=self.host),
topic=self.topic,
version='1.2')
def get_service_plugin_list(self, context):
"""Make a call to get the list of activated services."""
return self.call(context,
self.make_msg('get_service_plugin_list'),
topic=self.topic,
version='1.3')
def update_router_extern_ip_map(self, context, router_id, gateway_ip):
"""update router and extern ip mapping"""
return self.call(context,
self.make_msg('update_router_extern_ip_map',
router_id=router_id, host=self.host,
gateway_ip=gateway_ip),
topic=self.topic,
version='1.2')
def get_extra_routes_by_subnet(self, context, router_id, subnet_id, gw_port_id=None):
"""get extra routes for router by subnet id"""
return self.call(context,
self.make_msg('get_extra_routes_by_subnet',
router_id=router_id, host=self.host,
subnet_id=subnet_id,
gw_port_id=gw_port_id),
topic=self.topic,
version='1.2')
def get_network_binding_host(self, context, router_id, networks):
"""get network binding host by network id"""
return self.call(context,
self.make_msg('get_network_binding_host',
networks=networks, host=self.host,
router_id=router_id),
topic=self.topic,
version='1.2')
class LinkLocalAddressPair(netaddr.IPNetwork):
def __init__(self, addr):
super(LinkLocalAddressPair, self).__init__(addr)
def get_pair(self):
"""Builds an address pair from the first and last addresses. """
return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)),
netaddr.IPNetwork("%s/%s" % (self.broadcast, self.prefixlen)))
class LinkLocalAllocator(object):
"""Manages allocation of link local IP addresses.
These link local addresses are used for routing inside the fip namespaces.
The associations need to persist across agent restarts to maintain
consistency. Without this, there is disruption in network connectivity
as the agent rewires the connections with the new IP address assocations.
Persisting these in the database is unnecessary and would degrade
performance.
"""
def __init__(self, state_file, subnet):
"""Read the file with previous allocations recorded.
See the note in the allocate method for more detail.
"""
self.state_file = state_file
subnet = netaddr.IPNetwork(subnet)
self.allocations = {}
self.remembered = {}
for line in self._read():
key, cidr = line.strip().split(',')
self.remembered[key] = LinkLocalAddressPair(cidr)
self.pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
self.pool.difference_update(self.remembered.values())
def allocate(self, key):
"""Try to allocate a link local address pair.
I expect this to work in all cases because I expect the pool size to be
large enough for any situation. Nonetheless, there is some defensive
programming in here.
Since the allocations are persisted, there is the chance to leak
allocations which should have been released but were not. This leak
could eventually exhaust the pool.
So, if a new allocation is needed, the code first checks to see if
there are any remembered allocations for the key. If not, it checks
the free pool. If the free pool is empty then it dumps the remembered
allocations to free the pool. This final desparate step will not
happen often in practice.
"""
if key in self.remembered:
self.allocations[key] = self.remembered.pop(key)
return self.allocations[key]
if not self.pool:
# Desparate times. Try to get more in the pool.
self.pool.update(self.remembered.values())
self.remembered.clear()
if not self.pool:
# More than 256 routers on a compute node!
raise RuntimeError(_("Cannot allocate link local address"))
self.allocations[key] = self.pool.pop()
self._write_allocations()
return self.allocations[key]
def release(self, key):
self.pool.add(self.allocations.pop(key))
self._write_allocations()
def _write_allocations(self):
current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()]
remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()]
current.extend(remembered)
self._write(current)
def _write(self, lines):
with open(self.state_file, "w") as f:
f.writelines(lines)
def _read(self):
if not os.path.exists(self.state_file):
return []
with open(self.state_file) as f:
return f.readlines()
class CascadedRouterInfo():
def __init__(self, router):
self.router = router
self.router_port = []
class RouterInfo(l3_ha_agent.RouterMixin):
def __init__(self, router_id, root_helper, use_namespaces, router,
use_ipv6=False):
self.router_id = router_id
self.cascaded_router_id = None
self.extern_extra_routes = {}
self.extra_routes_is_update = False
self.local_internal_ports = []
self.cascaded_router = None
self.cascaded_extern_net_id = None
self.cascaded_gateway_subnets = {'tunnel_subnet_id': None,
'extern_subnet_id': None}
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.snat_ports = []
self.floating_ips = set()
self.floating_ips_dict = {}
self.root_helper = root_helper
# Invoke the setter for establishing initial SNAT action
self.router = router
self.routes = []
# DVR Data
self.dist_fip_count = 0
super(RouterInfo, self).__init__()
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self, self._router.get('gw_port'),
*args, action=self._snat_action)
self._snat_action = None
class RouterUpdate(object):
"""Encapsulates a router update
An instance of this object carries the information necessary to prioritize
and process a request to update a router.
"""
def __init__(self, router_id, priority,
action=None, router=None, timestamp=None):
self.priority = priority
self.timestamp = timestamp
if not timestamp:
self.timestamp = timeutils.utcnow()
self.id = router_id
self.action = action
self.router = router
def __lt__(self, other):
"""Implements priority among updates
Lower numerical priority always gets precedence. When comparing two
updates of the same priority then the one with the earlier timestamp
gets procedence. In the unlikely event that the timestamps are also
equal it falls back to a simple comparison of ids meaning the
precedence is essentially random.
"""
if self.priority != other.priority:
return self.priority < other.priority
if self.timestamp != other.timestamp:
return self.timestamp < other.timestamp
return self.id < other.id
class ExclusiveRouterProcessor(object):
"""Manager for access to a router for processing
This class controls access to a router in a non-blocking way. The first
instance to be created for a given router_id is granted exclusive access to
the router.
Other instances may be created for the same router_id while the first
instance has exclusive access. If that happens then it doesn't block and
wait for access. Instead, it signals to the master instance that an update
came in with the timestamp.
This way, a thread will not block to wait for access to a router. Instead
it effectively signals to the thread that is working on the router that
something has changed since it started working on it. That thread will
simply finish its current iteration and then repeat.
This class keeps track of the last time that a router data was fetched and
processed. The timestamp that it keeps must be before when the data used
to process the router last was fetched from the database. But, as close as
possible. The timestamp should not be recorded, however, until the router
has been processed using the fetch data.
"""
_masters = {}
_router_timestamps = {}
def __init__(self, router_id):
self._router_id = router_id
if router_id not in self._masters:
self._masters[router_id] = self
self._queue = []
self._master = self._masters[router_id]
def _i_am_master(self):
return self == self._master
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self._i_am_master():
del self._masters[self._router_id]
def _get_router_data_timestamp(self):
return self._router_timestamps.get(self._router_id,
datetime.datetime.min)
def fetched_and_processed(self, timestamp):
"""Records the data timestamp after it is used to update the router"""
new_timestamp = max(timestamp, self._get_router_data_timestamp())
self._router_timestamps[self._router_id] = new_timestamp
def queue_update(self, update):
"""Queues an update from a worker
This is the queue used to keep new updates that come in while a router
is being processed. These updates have already bubbled to the front of
the RouterProcessingQueue.
"""
self._master._queue.append(update)
def updates(self):
"""Processes the router until updates stop coming
Only the master instance will process the router. However, updates may
come in from other workers while it is in progress. This method loops
until they stop coming.
"""
if self._i_am_master():
while self._queue:
# Remove the update from the queue even if it is old.
update = self._queue.pop(0)
# Process the update only if it is fresh.
if self._get_router_data_timestamp() < update.timestamp:
yield update
class RouterProcessingQueue(object):
"""Manager of the queue of routers to process."""
def __init__(self):
self._queue = Queue.PriorityQueue()
def add(self, update):
self._queue.put(update)
def each_update_to_next_router(self):
"""Grabs the next router from the queue and processes
This method uses a for loop to process the router repeatedly until
updates stop bubbling to the front of the queue.
"""
next_update = self._queue.get()
with ExclusiveRouterProcessor(next_update.id) as rp:
# Queue the update whether this worker is the master or not.
rp.queue_update(next_update)
# Here, if the current worker is not the master, the call to
# rp.updates() will not yield and so this will essentially be a
# noop.
for update in rp.updates():
yield (rp, update)
class L3NATAgent(firewall_l3_proxy.FWaaSL3AgentRpcCallback,
l3_ha_agent.AgentMixin,
manager.Manager):
"""Manager for L3NatAgent
API version history:
1.0 initial Version
1.1 changed the type of the routers parameter
to the routers_updated method.
It was previously a list of routers in dict format.
It is now a list of router IDs only.
Per rpc versioning rules, it is backwards compatible.
1.2 - DVR support: new L3 agent methods added.
- add_arp_entry
- del_arp_entry
Needed by the L3 service when dealing with DVR
"""
RPC_API_VERSION = '1.2'
OPTS = [
cfg.StrOpt('agent_mode', default='legacy',
help=_("The working mode for the agent. Allowed modes are: "
"'legacy' - this preserves the existing behavior "
"where the L3 agent is deployed on a centralized "
"networking node to provide L3 services like DNAT, "
"and SNAT. Use this mode if you do not want to "
"adopt DVR. 'dvr' - this mode enables DVR "
"functionality and must be used for an L3 agent "
"that runs on a compute host. 'dvr_snat' - this "
"enables centralized SNAT support in conjunction "
"with DVR. This mode must be used for an L3 agent "
"running on a centralized node (or in single-host "
"deployments, e.g. devstack)")),
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
cfg.StrOpt('router_id', default='',
help=_("If namespaces is disabled, the l3 agent can only"
" configure a router that has the matching router "
"ID.")),
cfg.BoolOpt('handle_internal_only_routers',
default=True,
help=_("Agent should implement routers with no gateway")),
cfg.StrOpt('gateway_external_network_id', default='',
help=_("UUID of external network for routers implemented "
"by the agents.")),
cfg.StrOpt('nexthop_over_tunneling',
default="none",
help=_('The mode for nexthop over tunneling '
'The Allowed values are:none or gre')),
cfg.BoolOpt('is_notify_l2proxy', default=False,
help=_("is_notify_l2proxy, default value is False, "
"means it will not notify l2proxy when creating"
" router ports")),
cfg.StrOpt('l2proxy_sock_path', default='/var/l2proxysock',
help=_("socket path when query ports from nova_proxy")),
cfg.IntOpt('pagination_limit', default=2,
help=_("list ports pagination limit, if value is -1,"
"means no pagination")),
cfg.StrOpt('cascaded_extern_subnet_cidr',
default='100.64.1.0/24',
help=_("cascaded_extern_subnet_cidr")),
cfg.StrOpt('cascaded_start_extern_ip',
default='100.64.1.2',
help=_("cascaded_start_extern_ip")),
cfg.StrOpt('cascaded_end_extern_ip',
default='100.64.1.254',
help=_("cascaded_end_extern_ip")),
cfg.StrOpt('cascaded_extern_network_type',
default='flat',
help=_("cascaded_extern_net_type")),
cfg.StrOpt('cascaded_extern_physical_network',
default='external',
help=_("cascaded_extern_physical_net")),
cfg.StrOpt('proxy_router_distributed', default='False',
help=_("Setting the 'proxy_router_distributed' flag "
"to 'False' will default to the creation "
"of distributed tenant routers in cascaded.")),
cfg.BoolOpt('is_public_cloud', default=False,
help=_("Setting True when its public_cloud sense")),
cfg.StrOpt('internal_relay_network_name',
default='default',
help=_('Allow the port has duplicate floatingips')),
cfg.StrOpt('proxy_router_enable_snat', default='',
help=_("Setting the 'proxy_router_enable_snat' flag "
"to 'False' will default to the creation "
"of router in cascaded."))
]
AGENT_OPTS = [
cfg.StrOpt('region_name', default=None,
help=_("cascading neutron_region name to use")),
cfg.StrOpt('neutron_region_name', default=None,
help=_("cascaded neutron_region name to use")),
cfg.StrOpt('neutron_admin_auth_url',
default='http://127.0.0.1:35357/v2.0',
help=_("keystone auth url to use")),
cfg.StrOpt('neutron_admin_user',
help=_("access neutron user name to use"),
secret=True),
cfg.StrOpt('neutron_admin_tenant_name',
help=_("access neutron tenant to use"),
secret=True),
cfg.BoolOpt('auth_insecure',
default=False,
help=_("Turn off verification of the certificate for"
" ssl")),
cfg.StrOpt('admin_password',
help=_("access neutron password to use"),
secret=True),
]
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.root_helper = config.get_root_helper(self.conf)
self.router_info = {}
self._check_config_params()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
self.fullsync = True
self.sync_progress = False
# Get the list of service plugins from Neutron Server
# This is the first place where we contact neutron-server on startup
# so retry in case its not ready to respond.
retry_count = 5
while True:
retry_count = retry_count - 1
try:
self.neutron_service_plugins = (
self.plugin_rpc.get_service_plugin_list(self.context))
except n_rpc.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service plugins '
'enabled at the neutron server when '
'startup due to RPC error. It happens '
'when the server does not support this '
'RPC API. If the error is '
'UnsupportedVersion you can ignore this '
'warning. Detail message: %s'), e)
self.neutron_service_plugins = None
except messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service '
'plugins enabled on the neutron '
'server. Retrying. '
'Detail message: %s'), e)
continue
break
self.cascaded_router_map = {} #key is the casacaded router_name as same as cascading router_id
self.network_map = {} #key is the casacaded network_name as same as cascading network_id
self.subnet_map = {}
self.csd_router_port_map = {}
self.cascaded_extern_net_id = None
self.l2proxy_sock_path = self.conf.l2proxy_sock_path
self.csg_client = clients.CascadeNeutronClient(clients.CASCADING)
self.csd_client = clients.CascadeNeutronClient(clients.CASCADED)
#must after csg_client and csd_client init
self.cache_object_map_info()
self._queue = RouterProcessingQueue()
super(L3NATAgent, self).__init__(conf=self.conf)
self.target_ex_net_id = None
self.use_ipv6 = ipv6_utils.is_enabled()
def _check_config_params(self):
"""Check items in configuration files.
Check for required and invalid configuration items.
The actual values are not verified for correctness.
"""
if not self.conf.interface_driver:
msg = _('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(1)
if not self.conf.use_namespaces and not self.conf.router_id:
msg = _('Router id is required if not using namespaces.')
LOG.error(msg)
raise SystemExit(1)
def _fetch_external_net_id(self, force=False):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
# L3 agent doesn't use external_network_bridge to handle external
# networks, so bridge_mappings with provider networks will be used
# and the L3 agent is able to handle any external networks.
if not self.conf.external_network_bridge:
return
if not force and self.target_ex_net_id:
return self.target_ex_net_id
try:
self.target_ex_net_id = self.plugin_rpc.get_external_network_id(
self.context)
return self.target_ex_net_id
except n_rpc.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.exc_type == 'TooManyExternalNetworks':
ctx.reraise = False
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Neutron has more than "
"one external network.")
raise Exception(msg)
def _router_added(self, router_id, router):
ri = RouterInfo(router_id, self.root_helper,
self.conf.use_namespaces, router,
use_ipv6=self.use_ipv6)
self.router_info[router_id] = ri
def _router_removed(self, router_id):
ri = self.router_info.get(router_id)
if ri is None:
LOG.warn(_("Info for router %s were not found. "
"Skipping router removal"), router_id)
return
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
self.process_router(ri)
del self.router_info[router_id]
#hyp delete fip_agent_gw_port and ext_gw_port when the router delete
if self.conf.is_public_cloud:
req_props = {'device_id': router_id}
ports_ret = self.csg_client('list_ports', **req_props)
if ports_ret and ports_ret['ports'] and len(ports_ret['ports']):
for port in ports_ret['ports']:
bodyResponse = self.csg_client('delete_port', port['id'])
LOG.debug(_('TRICIRCLE delete port, Response:%s'), str(bodyResponse))
def get_one_compute_port(self, ri, port):
# Get DVR ports for subnet
if 'id' not in port['subnet'] or ri.router['distributed'] is False:
return
subnet_ports = (
self.plugin_rpc.get_ports_by_subnet(self.context,
port['subnet']['id']))
LOG.debug(_('DVR: subnet_ports: %s'), subnet_ports)
for p in subnet_ports:
# TODO: check for multiple subnets on port case
if ('compute' in p['device_owner'] and
p['binding:host_id'] == self.conf.host and
p['binding:profile']):
return p
def _set_subnet_arp_info(self, ri, port):
"""Set ARP info retrieved from Plugin for existing ports."""
if 'id' not in port['subnet'] or not ri.router['distributed']:
return
subnet_id = port['subnet']['id']
subnet_ports = (
self.plugin_rpc.get_ports_by_subnet(self.context,
subnet_id))
for p in subnet_ports:
if (p['device_owner'] not in (
l3_constants.DEVICE_OWNER_ROUTER_INTF,
l3_constants.DEVICE_OWNER_DVR_INTERFACE)):
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(ri, fixed_ip['ip_address'],
p['mac_address'],
subnet_id, 'add')
def _set_subnet_info(self, port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
LOG.error(_("Ignoring multiple IPs on router port %s"),
port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
def create_cascaded_router(self, ri, external_gateway_info=None):
distributed = ri.router['distributed']
router_name = self._get_cascaded_router_name(ri.router['id'])
req_props = {'name': router_name,
'tenant_id': ri.router['tenant_id']}
if not self.conf.proxy_router_distributed:
req_props['distributed'] = distributed or False
else:
req_props['distributed'] = self.conf.proxy_router_distributed
if(external_gateway_info):
req_props["external_gateway_info"] = external_gateway_info
router_ret = self.csd_client('create_router', {'router': req_props})
if(not router_ret or
(router_ret and (not router_ret.get('router')))):
LOG.debug(_("cascaded router created failed, "
"router name:%s"), router_name)
return
LOG.debug(_('TRICIRCLE create router, Response:%s'), str(router_ret))
self.cascaded_router_map[router_name] = CascadedRouterInfo(router_ret['router'])
return router_ret['router']['id']
def delete_cascaded_router_sync(self, router_id, csd_router_id):
try:
self.delete_cascaded_router(router_id, csd_router_id)
except Exception, e:
LOG.error("TRICIRCLE delete router failed, clean csd router and try again")
self._delete_cascaded_floating_ips_by_router_id(csd_router_id)
self._delete_cascaded_interface_port(router_id, csd_router_id)
try:
self.delete_cascaded_router(router_id, csd_router_id)
except Exception, e:
LOG.error("TRICIRCLE delete router failed again")
def delete_cascaded_router(self, router_id, csd_router_id):
self.csd_client('delete_router', csd_router_id)
LOG.debug(_('TRICIRCLE delete router,router_id:%s,cascaded_router_id:'
'%s'), str(router_id), str(csd_router_id))
csd_router_name = self.get_router_name(router_id)
csd_router = self.cascaded_router_map.get(csd_router_name, None)
if(not csd_router):
LOG.error('TRICIRCLE Get router failed when delete_cascaded_router'
' %s, router %s', csd_router_id, router_id)
return
if(len(csd_router.router_port) == 0):
self.cascaded_router_map.pop(csd_router_name)
else:
LOG.warn(_('TRICIRCLE The router %s still has some router ports '
'[%s]'), csd_router_name, csd_router.router_port)
def clear_router_port_cache(self, router_id, port_id):
csd_router_name = self.get_router_name(router_id)
csd_port_name = self.get_router_port_name(port_id)
csd_router = self.cascaded_router_map.get(csd_router_name, None)
if(not csd_router):
LOG.error('TRICIRCLE get router failed when remove interface %s '
' from router %s', router_id, port_id)
return
csd_port = self.csd_router_port_map.pop(csd_port_name, None)
if(not csd_port):
LOG.error('TRICIRCLE get cascaded router port failed when remove'
' interface %s from router %s', router_id, port_id)
return
if(csd_port['id'] in csd_router.router_port):
csd_router.router_port.remove(csd_port['id'])
def list_cascaded_network_by_name(self, name):
search_opts = {'name': name}
cascaded_net = self.csd_client('list_networks', **search_opts)
return cascaded_net
def list_cascading_network_by_id(self, id):
search_opts = {'id': id}
cascaded_net = self.csg_client('list_networks', **search_opts)
return cascaded_net
def get_network_req(self, network):
req_network = {'network': {
'admin_state_up': network['admin_state_up'],
'name': self._get_cascaded_network_name(network['id']),
'tenant_id': network['tenant_id'],
'router:external': network['router:external'],
'shared': network['shared'],
}}
if network['provider:network_type'] in ['vxlan', 'gre']:
req_provider = {
'provider:network_type': network['provider:network_type'],
'provider:segmentation_id': network['provider:segmentation_id']}
elif network['provider:network_type'] == 'flat':
req_provider = {
'provider:network_type': network['provider:network_type'],
'provider:physical_network': network['provider:physical_network']}
elif network['provider:network_type'] == 'local':
req_provider = {
'provider:network_type': network['provider:network_type']}
else:
req_provider = {
'provider:network_type': network['provider:network_type'],
'provider:physical_network': network['provider:physical_network'],
'provider:segmentation_id': network['provider:segmentation_id']}
req_network['network'].update(req_provider)
return req_network
def create_cascaded_network(self, cascading_net_id):
network_ret = self.list_cascading_network_by_id(cascading_net_id)
if(not network_ret or
(network_ret and (not network_ret.get('networks')))):
LOG.debug(_("cascading network list failed, "
"network id:%s"), cascading_net_id)
return
network_req = self.get_network_req(network_ret['networks'][0])
try:
bodyResponse = self.csd_client('create_network', network_req)
LOG.debug(_('TRICIRCLE Create network, Response:%s'),
str(bodyResponse))
return bodyResponse
except exceptions.Conflict:
LOG.debug(_('TRICIRCLE create network Conflicted, so list'))
name = self.get_network_name(cascading_net_id)
csd_net_ret = self.list_cascaded_network_by_name(name)
if(not csd_net_ret or
(csd_net_ret and (not csd_net_ret.get('networks')))):
LOG.debug(_("TRICIRCLE Cascading network list failed, name:%s, "
"try to delete conflict network"), name)
if self.delete_cascaded_conflict_network(network_req):
LOG.debug(_("TRICIRCLE Create network again"))
try:
bodyResponse = self.csd_client('create_network', network_req)
LOG.debug(_('TRICIRCLE Create network, Response:%s'),
str(bodyResponse))
return bodyResponse
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('TRICIRCLE Create network failed!Request:%s'), network_req)
return None
return None
LOG.debug(_('TRICIRCLE list network, Response:%s'),
str(csd_net_ret))
return {u'network': csd_net_ret['networks'][0]}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('TRICIRCLE Create network failed!Request:%s'), network_req)
return None
def delete_cascaded_conflict_network(self, cas_network):
cas_network = cas_network['network']
network_req = {}
if cas_network.get('provider:network_type', None):
network_req['provider:network_type'] = cas_network.get('provider:network_type')
if cas_network.get('provider:segmentation_id', None):
network_req['provider:segmentation_id'] = cas_network.get('provider:segmentation_id')
if cas_network.get('provider:physical_network', None):
network_req['provider:physical_network'] = cas_network.get('provider:physical_network')
if not network_req['provider:segmentation_id']:
LOG.debug(_("network has no provider:segmentation_id, can't delete conflict network"))
return False
cad_network_ret = self.csd_client('list_networks', **network_req)
if (not cad_network_ret) or (cad_network_ret and not cad_network_ret.get('networks')):
LOG.debug(_("cannot find conflict network for cas_network_id: %s"), cas_network['id'])
return False
cad_networks = cad_network_ret.get('networks')
LOG.debug("find conflict networks: %s, delete them", cad_networks)
for cad_network in cad_networks:
self.delete_cascaded_network_by_id(cad_network['id'])
return True
def delete_cascaded_network_by_id(self, network_id):
"""
add by data consistency, if modify, modify delete_cascaded_network_by_id in l2_proxy.py too
"""
subnets_ret = self.list_cascaded_subnet_by_network_id(network_id)
if subnets_ret and len(subnets_ret.get('subnets')) > 0:
for subnet in subnets_ret.get('subnets'):
subnet_id = subnet['id']
ports_ret = self.list_cascaded_port_by_subnet_id(subnet_id)
if ports_ret and len(ports_ret.get('ports')) > 0:
for port in ports_ret.get('ports'):
if port['device_owner'] in [l3_constants.DEVICE_OWNER_DVR_INTERFACE,
l3_constants.DEVICE_OWNER_ROUTER_SNAT,
l3_constants.DEVICE_OWNER_AGENT_GW,
l3_constants.DEVICE_OWNER_ROUTER_INTF]:
LOG.info(_("Subnet %s is bound on a router"), subnet_id)
router_id = port['device_id']
self.delete_interface_for_cascaded_router_sync(router_id, subnet_id)
if self.validate_router_port_name(port.get('name')):
self.clear_router_port_cache(router_id, port.get('name').split('@')[1])
else:
try:
self.csd_client('delete_port', port['id'])
LOG.debug(_("Port %s was deleted successfully."), port['id'])
except Exception as e:
LOG.error('Delete cascaded port %s failed! Exception:%s',
port['id'], str(e))
continue
try:
self.csd_client('delete_subnet', subnet_id)
LOG.debug(_("Subnet %s was deleted successfully."), subnet_id)
except Exception as e:
LOG.error('Delete cascaded subnet %s failed! Exception:%s',
subnet_id, str(e))
continue
for i in range(l3_constants.DESTROY_RETRY):
try:
self.csd_client('delete_network', network_id)
LOG.debug(_("Network %s was deleted successfully."), network_id)
break
except Exception as e:
LOG.error('Delete cascaded network %s failed! Exception:%s',
network_id, str(e))
continue
def get_cascaded_network_id(self, csg_network_id):
cascaded_net_name = self.get_network_name(csg_network_id)
cascaded_network = self.network_map.get(cascaded_net_name)
if cascaded_network and not self.conf.is_public_cloud:
LOG.debug(_("TRICIRCLE cascaded network is get, "
"network:%s"), cascaded_network)
return cascaded_network['id']
network_ret = self.list_cascaded_network_by_name(cascaded_net_name)
if(network_ret and network_ret.get('networks')):
if(len(network_ret['networks']) > 1):
LOG.warn(_('TRICIRCLE There exist more than one network in'
' cascaded neutron, all network:%s'), network_ret)
cascaded_network = network_ret['networks'][0]
self.network_map[cascaded_net_name] = cascaded_network
LOG.debug(_("TRICIRCLE cascaded network is listed, "
"network:%s"), cascaded_network)
return cascaded_network['id']
return None
def get_cascaded_network(self, csg_network_id):
cascaded_net_name = self.get_network_name(csg_network_id)
cascaded_network = self.network_map.get(cascaded_net_name)
if cascaded_network:
LOG.debug(_("TRICIRCLE cascaded network is get, "
"network:%s"), cascaded_network)
return cascaded_network
network_ret = self.list_cascaded_network_by_name(cascaded_net_name)
if(network_ret and network_ret.get('networks')):
if(len(network_ret['networks']) > 1):
LOG.warn(_('TRICIRCLE There exist more than one network in'
' cascaded neutron, all network:%s'), network_ret)
cascaded_network = network_ret['networks'][0]
self.network_map[cascaded_net_name] = cascaded_network
LOG.debug(_("TRICIRCLE cascaded network is listed, "
"network:%s"), cascaded_network)
return cascaded_network
return None
def get_or_create_cascaded_net(self, csg_network_id):
'''get cascaded net_id from cascaded neutron or create network'''
'''if not exists in cascaded neutron'''
csd_network = self.get_cascaded_network(csg_network_id)
if(csd_network):
return csd_network
cascaded_net_name = self.get_network_name(csg_network_id)
network_ret = self.create_cascaded_network(cascaded_net_name[8:])
if(not network_ret or
(network_ret and (not network_ret.get('network')))):
LOG.error(_("TRICIRCLE cascaded network created failed, "
"network name:%s"), cascaded_net_name)
return
cascaded_net = network_ret.get('network')
self.network_map[cascaded_net_name] = cascaded_net
return cascaded_net
def list_cascaded_port_by_subnet_id(self, id):
search_opts = {'fixed_ips': 'subnet_id=%s' % id}
cascaded_ports = self.csd_client('list_ports', **search_opts)
return cascaded_ports
def list_cascaded_subnet_by_network_id(self, id):
search_opts = {'network_id': [id]}
cascaded_subnet = self.csd_client('list_subnets', **search_opts)
return cascaded_subnet
def list_cascaded_subnet_by_name(self, name):
search_opts = {'name': name}
cascaded_subnet = self.csd_client('list_subnets', **search_opts)
return cascaded_subnet
def list_cascading_subnet_by_name(self, sub_name):
search_opts = {'name': sub_name}
cascading_subnet = self.csg_client('list_subnets', **search_opts)
return cascading_subnet
def list_cascading_subnet_by_id(self, sub_id):
search_opts = {'id': sub_id}
cascading_subnet = self.csg_client('list_subnets', **search_opts)
return cascading_subnet
def get_subnet_req(self, subnet):
csg_network_id = subnet['network_id']
csd_network_id = self.get_cascaded_network_id(csg_network_id)
if(not csd_network_id):
LOG.error(_("TRICIRCLE cascaded network get failed, "
"csg network id:%s"), csg_network_id)
return
subnet_req = {'subnet': {
'name': self._get_cascaded_subnet_name(subnet['id']),
'cidr': subnet['cidr'],
'enable_dhcp': False,
'allocation_pools': subnet['allocation_pools'],
'host_routes': subnet['host_routes'],
'dns_nameservers': subnet['dns_nameservers'],
'gateway_ip': subnet['gateway_ip'],
'ip_version': subnet['ip_version'],
'network_id': csd_network_id,
'tenant_id': subnet['tenant_id']}}
return subnet_req
def create_cascaded_subnet(self, cascading_subnet_id, cascaded_sub_name):
subnet_ret = self.list_cascading_subnet_by_id(cascading_subnet_id)
if(not subnet_ret or
(subnet_ret and (not subnet_ret.get('subnets')))):
LOG.debug(_("TRICIRCLE cascading subnet list failed, "
"subnet id:%s"), cascading_subnet_id)
return
subnet_req = self.get_subnet_req(subnet_ret['subnets'][0])
try:
bodyResponse = self.csd_client('create_subnet', subnet_req)
LOG.debug(_('TRICIRCLE Create subnet, Response:%s'),
str(bodyResponse))
try:
subnet_ret = self.list_cascaded_subnet_by_name(cascaded_sub_name)
if(len(subnet_ret['subnets']) > 1):
subs = []
for subnet in subnet_ret['subnets']:
if subnet.get('id') == bodyResponse['subnet'].get('id'):
self.csd_client('delete_subnet', subnet.get('id'))
subs.append(subnet)
final_sub = [sub for sub in subnet_ret['subnets'] if sub not in subs]
return {u'subnet': final_sub[0]}
except Exception as e:
LOG.error('Delete cascaded subnet %s failed! Exception:%s',
bodyResponse.get('subnet'), str(e))
return bodyResponse
except exceptions.BadRequest:
LOG.debug(_('TRICIRCLE create subnet failed, so list!'))
name = self.get_subnet_name(cascading_subnet_id)
csd_sub_ret = self.list_cascaded_subnet_by_name(name)
if(not csd_sub_ret or
(csd_sub_ret and (not csd_sub_ret.get('subnets')))):
LOG.debug(_("TRICIRCLE Cascading subnet list failed, name:%s"), name)
return None
LOG.debug(_('TRICIRCLE list subnet, Response:%s'),
str(csd_sub_ret))
return {u'subnet': csd_sub_ret['subnets'][0]}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('TRICIRCLE Create subnet failed! Request:%s'), subnet_req)
return None
def get_cascaded_subnet(self, csg_subnet_id):
cascaded_sub_name = self.get_subnet_name(csg_subnet_id)
cascaded_subnet = self.subnet_map.get(cascaded_sub_name)
if cascaded_subnet:
LOG.debug(_("TRICIRCLE cascaded subnet is get, "
"subnet:%s"), cascaded_subnet)
return cascaded_subnet
subnet_ret = self.list_cascaded_subnet_by_name(cascaded_sub_name)
if(subnet_ret and subnet_ret.get('subnets')):
if(len(subnet_ret['subnets']) > 1):
LOG.warn(_('TRICIRCLE There exist more than one subnet in'
' cascaded neutron, all subnet:%s'), subnet_ret)
cascaded_subnet = subnet_ret['subnets'][0]
self.subnet_map[cascaded_sub_name] = cascaded_subnet
LOG.debug(_("TRICIRCLE cascaded subnet is listed, "
"subnet:%s"), cascaded_subnet)
return cascaded_subnet
return None
def get_or_create_cascaded_subnet(self, csg_subnet_id):
cascaded_subnet = self.get_cascaded_subnet(csg_subnet_id)
if cascaded_subnet:
return cascaded_subnet
cascaded_sub_name = self.get_subnet_name(csg_subnet_id)
subnet_ret = self.create_cascaded_subnet(csg_subnet_id, cascaded_sub_name)
if(not subnet_ret or
(subnet_ret and (not subnet_ret.get('subnet')))):
LOG.error(_("TRICIRCLE cascaded subnet created failed, "
"cascading subnet id:%s"), cascaded_sub_name)
return
cascaded_subnet = subnet_ret.get('subnet')
self.subnet_map[cascaded_sub_name] = cascaded_subnet
return cascaded_subnet
def get_network_name(self, network_id):
return ('network@' + network_id)
def get_subnet_name(self, subnet_id):
return ('subnet@' + subnet_id)
def get_router_name(self, router_id):
return ('router@' + router_id)
def get_router_port_name(self, port_id):
return ('router_port@' + port_id)
def get_or_create_cascaded_router_port(self, cascaded_net_id, port):
cascaded_port_name = self.get_router_port_name(port['id'])
cascaded_port = self.csd_router_port_map.get(cascaded_port_name)
if cascaded_port:
LOG.debug(_("TRICIRCLE cascaded router port is get, "
"port:%s"), cascaded_port)
return cascaded_port['id']
mac_address = port['mac_address']
ip_address = port['fixed_ips'][0]['ip_address']
tenant_id = port['tenant_id']
profile = {'cascading_port_id': port['id']}
name = self.get_router_port_name(port['id'])
req_props = {'network_id': cascaded_net_id,
'name': name,
'admin_state_up': True,
'fixed_ips': [{'ip_address': ip_address}],
'mac_address': mac_address,
'binding:profile': profile,
'device_owner': l3_constants.DEVICE_OWNER_DVR_INTERFACE
}
if tenant_id:
req_props['tenant_id'] = tenant_id
port_ret = self.csd_client('create_port', {'port': req_props})
if(not port_ret or
(port_ret and (not port_ret.get('port')))):
LOG.error(_("ERR:router port created failed, "
"ip_address:%s, mac_address:%s"),
ip_address, mac_address)
return
LOG.debug(_('TRICIRCLE create router port, Response:%s'),
str(port_ret))
cascaded_port = port_ret['port']
self.csd_router_port_map[cascaded_port_name] = cascaded_port
return port_ret['port'].get('id')
def delete_cascaded_router_port(self, cascaded_port_id):
try:
bodyResponse = self.csd_client('delete_port', cascaded_port_id)
LOG.debug(_('TRICIRCLE delete port, Response:%s'), str(bodyResponse))
return bodyResponse
except Exception, e:
LOG.error(_("TRICIRCLE delete port: %s failed: %s"), cascaded_port_id, e)
def validate_network_name(self, name):
if(name):
return True
return False
def validate_subnet_name(self, name):
if(name):
return True
return False
def validate_router_name(self, name):
if(name and name.startswith('router@')):
return True
return False
def validate_router_port_name(self, name):
if(name and name.startswith('router_port@')):
return True
return False
def get_params_limit(self):
pagination_limit = self.conf.pagination_limit
if(pagination_limit > 0):
params = {'limit': pagination_limit}
else:
params = None
return params
def cache_network_map(self):
params_limit = self.get_params_limit()
if(params_limit):
net_ret = self.csd_client('list_networks', params_limit)
else:
net_ret = self.csd_client('list_networks')
if(not net_ret or
(net_ret and (not net_ret.get('networks')))):
LOG.error(_("ERR:cascaded networks list failed!"))
return
net_info = net_ret.get('networks')
for net in net_info:
if(not self.validate_network_name(net['name'])):
continue
self.network_map[net['name']] = net
def cache_subnet_map(self):
params_limit = self.get_params_limit()
if(params_limit):
subnet_ret = self.csd_client('list_subnets', params_limit)
else:
subnet_ret = self.csd_client('list_subnets')
if(not subnet_ret or
(subnet_ret and (not subnet_ret.get('subnets')))):
LOG.error(_("ERR:cascaded subnets list failed!"))
return
subnet_info = subnet_ret.get('subnets')
for subnet in subnet_info:
if(not self.validate_subnet_name(subnet['name'])):
continue
self.subnet_map[subnet['name']] = subnet
def cache_router_map(self):
params_limit = self.get_params_limit()
if(params_limit):
router_ret = self.csd_client('list_routers', params_limit)
else:
router_ret = self.csd_client('list_routers')
if(not router_ret or
(router_ret and (not router_ret.get('routers')))):
return
routers_info = router_ret.get('routers')
for router in routers_info:
if(not self.validate_router_name(router['name'])):
continue
self.cascaded_router_map[router['name']] = CascadedRouterInfo(router)
def cache_router_port_map(self):
params_limit = self.get_params_limit()
if(params_limit):
port_ret = self.csd_client('list_ports', params_limit)
else:
port_ret = self.csd_client('list_ports')
if(not port_ret or
(port_ret and (not port_ret.get('ports')))):
LOG.error(_("TRICIRCLE:cascaded ports list failed!"))
return
ports_info = port_ret.get('ports')
associated_router_port = {}
for port in ports_info:
if(not self.validate_router_port_name(port['name'])):
continue
self.csd_router_port_map[port['name']] = port
if(port['device_id']):
router_id = port['device_id']
p_list = associated_router_port.get(router_id, [])
p_list.append(port['id'])
associated_router_port[router_id] = p_list
for router_id, port_list in associated_router_port.items():
router_exists = False
for csd_name, csd_router_info in self.cascaded_router_map.items():
if(csd_router_info.router['id'] == router_id):
router_exists = True
csd_router_info.router_port.extend(port_list)
if(not router_exists):
LOG.error(_("TRICIRCLE:cascaded ports %s has been associated "
"to router %s, but not find router in "
"cascaded_router_map!"), port_list, router_id)
def print_all_object_map(self):
LOG.debug(_('TRICIRCLE network_map: %s'), str(self.network_map))
LOG.debug(_('TRICIRCLE subnet_map: %s'), str(self.subnet_map))
LOG.debug(_('TRICIRCLE csd_router_port_map: %s'),
str(self.csd_router_port_map))
csd_router_map = {}
for csd_router_name, router_info in self.cascaded_router_map.items():
csd_router_map[csd_router_name] = {'router': router_info.router,
'router_port': router_info.router_port}
LOG.debug(_('TRICIRCLE cascaded_router_map: %s'),
str(csd_router_map))
def cache_object_map_info(self):
self.cache_network_map()
self.cache_subnet_map()
self.cache_router_map()
self.cache_router_port_map()
self.print_all_object_map()
def notify_l2_proxy(self, action, cascaded_port_id):
if(not self.conf.is_notify_l2proxy):
return
retry = 3
while retry:
try:
retry = retry - 1
port_data = {"ports": {action: [cascaded_port_id]}}
port_data_str = jsonutils.dumps(port_data)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.l2proxy_sock_path)
sock.send(str(port_data_str))
sock.close()
LOG.debug(_('TRICIRCLE Notify the router port info %s to l2proxy!'),
port_data_str)
break
except socket.error as e:
LOG.error(_('TRICIRCLE Notify the router port occur exception! %s'), e)
time.sleep(1)
def add_interface_for_cascaded_router(self, router_id,
cascaded_router_id,
cascaded_subnet_id,
cascaded_port_id):
router_name = self.get_router_name(router_id)
csd_rouer_info = self.cascaded_router_map.get(router_name)
if(not csd_rouer_info):
LOG.error(_('TRICIRCLE add router interface failed, can not get '
'cascaded router, router_name :%s'), str(router_name))
return
if(cascaded_port_id in csd_rouer_info.router_port):
LOG.debug(_('TRICIRCLE router %s has been added interface %s'),
csd_rouer_info.router['id'], str(cascaded_port_id))
return
req_props = {'port_id': cascaded_port_id}
ret = self.csd_client('add_interface_router', cascaded_router_id, req_props)
csd_rouer_info.router_port.append(cascaded_port_id)
LOG.debug(_("TRICIRCLE Add interface for cascaded router, router:"
"%s, cascaded_subnet_id:%s, cascaded_port_id:%s, Ret:%s"),
cascaded_router_id, cascaded_subnet_id,
cascaded_port_id, str(ret))
self.notify_l2_proxy("add", cascaded_port_id)
def _delete_router_routes_for_interface(self, csd_router, subnet_cidr):
subnet_cidr = netaddr.IPNetwork(subnet_cidr)
extra_routes = csd_router.get('routes')
final_routes = [route for route in extra_routes
if not netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr])]
req_props = {"routes": final_routes}
LOG.debug("update router: %s", req_props)
self.csd_client('update_router', csd_router.get('id'), {'router': req_props})
if self.validate_router_name(csd_router.get('name')):
csg_router_id = csd_router.get('name').split('@')[1]
ri = self.router_info.get(csg_router_id)
if ri:
ri.routes = final_routes
LOG.debug("final_routes: %s", ri.routes)
def delete_interface_for_cascaded_router_sync(self, cascaded_router_id, cascaded_subnet_id):
"""
add for data consistency, if modify, modify function clear_cascaded_port_for_subnet in l2_proxy.py too
"""
try:
self.delete_interface_for_cascaded_router(cascaded_router_id, cascaded_subnet_id)
except Exception, e:
LOG.error(_("Disassociate subnet %s from router %s failed: %s, try to delete FIP & rotes and try again"),
cascaded_subnet_id, cascaded_router_id, e)
self._delete_cascaded_floating_ips_by_subnet_id(cascaded_subnet_id)
router_ret = self.csd_client('show_router', cascaded_router_id)
if not router_ret or not router_ret.get('router'):
return
subnet_ret = self.csd_client('show_subnet', cascaded_subnet_id)
cidr = subnet_ret.get('subnet', {}).get('cidr')
if cidr:
LOG.debug(_("Update Router routes to delete router_interface"))
self._delete_router_routes_for_interface(router_ret.get('router'), cidr)
self.delete_interface_for_cascaded_router(cascaded_router_id, cascaded_subnet_id)
def delete_interface_for_cascaded_router(self, cascaded_router_id,
cascaded_subnet_id):
req_props = {'subnet_id': cascaded_subnet_id}
ret = self.csd_client('remove_interface_router', cascaded_router_id, req_props)
LOG.debug(_('TRICIRCLE Disassociate subnet %s from router %s, Ret:'
'%s'), cascaded_subnet_id, cascaded_router_id, str(ret))
return
def get_cascaded_router_gateway_ip(self, router_id):
search_opts = {'device_id': router_id,
'device_owner': 'network:router_gateway'}
port_ret = self.csd_client('list_ports', **search_opts)
if(not port_ret or
(port_ret and (not port_ret.get('ports')))):
LOG.debug(_("cascaded router gateway_ip get failed, "
"router id:%s"), router_id)
return
port = port_ret['ports']
if(len(port) == 0):
return
for fixed_ip in port[0]['fixed_ips']:
if fixed_ip['subnet_id'] == self.cascaded_gateway_subnets.get('tunnel_subnet_id'):
return fixed_ip['ip_address']
return None
def create_fip_gw_port_in_csd_and_csg(self, port_ret, cascading_fip, cascaded_net_id):
#hyp create cascaded floatingip_host_gateway_port by vm port
if self.conf.is_public_cloud:
vm_port_host_id = port_ret.get('ports')[0]['binding:host_id']
#get cascading fip_gw_port if is not exist then create
csg_fip_gw_port_exist = False
req_props = {'name': 'fip_gw_port@' + vm_port_host_id}
port_ret = self.csg_client('list_ports', **req_props).get('ports')
if port_ret:
port_ret = port_ret[0]
csg_fip_gw_port_exist = True
if not csg_fip_gw_port_exist:
#get cascading internal_relay_subnet by host
req_props = {'name' : self.conf.host}
subnet_ret = self.csg_client('list_subnets', **req_props).get('subnets')
subnet_id = subnet_ret[0].get('id', None)
subnet_network_id = subnet_ret[0].get('network_id', None)
#create vm host fip gateway port in cascading
req_props = {'tenant_id': cascading_fip['tenant_id'],
'network_id': subnet_network_id,
'name': 'fip_gw_port@' + vm_port_host_id,
'admin_state_up': True,
'device_id' : cascading_fip['router_id'],
'device_owner' : 'network:floatingip_agent_gateway',
'fixed_ips': [{'subnet_id': subnet_id}],
}
port_ret = self.csg_client('create_port', {'port': req_props}).get('port')
fip_gw_ip_address = port_ret.get('fixed_ips')[0].get('ip_address')
fip_gw_mac_address = port_ret.get('mac_address')
fip_gw_ip_id = port_ret.get('id')
#get cascaded l3_agent id by vm port host_id as device_id
req_props = {'host' : vm_port_host_id, 'binary': 'neutron-l3-agent'}
agent_ret = self.csd_client('list_agents', **req_props).get('agents')
if not agent_ret:
LOG.error('agent not exsit')
return
agent_id = agent_ret[0].get('id', None)
#get cascaded fip_gw_port if is not exist then create
csd_fip_gw_port_exist = False
req_props = {'device_id': agent_id, 'device_owner':'network:floatingip_agent_gateway', 'network_id': cascaded_net_id}
port_ret = self.csd_client('list_ports', **req_props).get('ports')
if port_ret:
csd_fip_gw_port_exist = True
if not csd_fip_gw_port_exist:
#create vm host fip gateway port in cascaded
req_props = {'name': 'port@' + fip_gw_ip_id,
'binding:host_id' :vm_port_host_id,
'admin_state_up': True,
'fixed_ips': [{'ip_address': fip_gw_ip_address}],
'mac_address': fip_gw_mac_address,
#fip_gw_port constants
'network_id': cascaded_net_id,
'device_owner' : 'network:floatingip_agent_gateway',
'device_id' : agent_id
}
try:
port_ret = self.csd_client('create_port', {'port': req_props})
except Exception, e:
LOG.error(_("create fip_gw_port failed:%s, clean csd FIP and try again"), e)
self.clean_cascaded_floatingip(cascaded_net_id, fip_gw_ip_address)
req_filters = {'mac_address': fip_gw_mac_address}
self.delete_cascaded_ports(req_filters)
port_ret = self.csd_client('create_port', {'port': req_props})
if not port_ret:
LOG.error('create fip_gw_port failed in csd')
def delete_cascaded_ports(self, filters):
port_ret = self.csd_client('list_ports', **filters)
if port_ret and len(port_ret.get('ports')) > 0:
ports = port_ret.get('ports')
port_name = str(ports[0]['name'])
if (len(port_name) > 36 and port_name.startswith("port@"))\
or port_name == l3_constants.REMOTE_PORT_KEY:
self._destroy_port(ports[0]['id'])
def _destroy_port(self, port_id):
if not port_id:
LOG.error(_("No port id is specified, cannot destroy port"))
return
for retry in range(l3_constants.DESTROY_RETRY):
try:
body_response = self.csd_client('delete_port', port_id)
LOG.debug(_('destroy port, Response:%s'), str(body_response))
return body_response
except Exception as e:
LOG.error('Delete port %s failed! Exception:%s',
port_id, str(e))
continue
return
def create_cascaded_external_net_for_gateway(self):
#get cascading_internal_relay_network
csg_network_ret = self.get_cascading_network_by_name(self.conf.internal_relay_network_name)
if(not csg_network_ret):
LOG.error(_("TRICIRCLE get cascading_network failed, "
"network name:%s"), self.conf.internal_relay_network_name)
return
cascading_fip_net = csg_network_ret.get('id', None)
#create cascaded_internal_network
network_ret = self.get_or_create_cascaded_net(cascading_fip_net)
if not network_ret:
LOG.error(_("TRICIRCLE cascaded network created failed, "
"network name:%s"), cascading_fip_net)
return
#get cascading_internal_subnet id
cascaded_net = network_ret
cascading_subnet = self.list_cascading_subnet_by_name(self.conf.host)
if(not cascading_subnet or
(cascading_subnet and (not cascading_subnet.get('subnets')))):
LOG.error(_("TRICIRCLE get cascading subnet failed, "
"cascading subnet name: %s"), self.conf.host)
return
cascading_subnet_id = cascading_subnet.get('subnets')[0].get('id', None)
#create cascaded_external subnet for specified
subnet_ret = self.get_or_create_cascaded_subnet(cascading_subnet_id)
if not subnet_ret:
LOG.error(_("TRICIRCLE cascaded subnet created failed, "
"cascading subnet id:%s"), cascading_subnet_id)
return
return cascaded_net.get('id', None)
def set_cascaded_router_gw_by_cascading_ip(self, ri, cascaded_net_id):
#set tenant router gateway on csd
enable_snat = 'False'
if self.conf.proxy_router_enable_snat:
enable_snat = self.conf.proxy_router_enable_snat
router_name = self._get_cascaded_router_name(ri.router['id'])
router_ret = self.csd_client('list_routers', **{'name': router_name}).get('routers')
if router_ret and len(router_ret):
router_external_gateway_info = router_ret[0].get('external_gateway_info', None)
if not router_external_gateway_info:
router_id = router_ret[0].get('id')
tenant_id = router_ret[0].get('tenant_id')
#get fix_ip in internal_raley_network by host(like az2.dc1)
req_props = {'name' : self.conf.host}
subnet_ret = self.csg_client('list_subnets', **req_props).get('subnets')
subnet_ret_id = subnet_ret[0].get('id', None)
gw_relay_net_id = subnet_ret[0].get('network_id', None)
if not gw_relay_net_id:
LOG.error(_('Set router gw in csd faild , cannot get gw_relay_net by host is (%s)'), self.conf.host)
return
#get fix_ip for set gw port ip in csd
req_props = {'network_id': gw_relay_net_id,
'name': 'ext_gw_port@' + cascaded_net_id,
'device_id' : router_id,
'device_owner' : router_name,
'fixed_ips': [{'subnet_id': subnet_ret_id}],
'admin_state_up': True
}
if tenant_id:
req_props['tenant_id'] = tenant_id
#create gateway port in csg for getting is fix_ip
port_ret = self.csg_client('create_port', {'port': req_props}).get('port')
extern_ip_address = port_ret.get('fixed_ips')[0].get('ip_address')
LOG.debug(_('add gateway by specified fix_ip(%s) for router(%s) '), extern_ip_address, router_id)
req_props = {"external_gateway_info": {
"network_id": cascaded_net_id,
"enable_snat":enable_snat,
"external_fixed_ips":
[{"ip_address": extern_ip_address}]}}
try:
self.csd_client('update_router', router_id, {'router': req_props})
except Exception, e:
LOG.error(_("update router failed: %s, clean cascaded FIP and try again"), e)
self.clean_cascaded_floatingip(cascaded_net_id, extern_ip_address)
try:
self.csd_client('update_router', router_id, {'router': req_props})
except:
LOG.debug(_('set gateway info by specified fix_ip(%s) failed'),extern_ip_address)
try:
self.csd_client('delete_port', port_ret['id'])
LOG.debug(_("Delete port %s successfully."), port_ret['id'])
except Exception as e:
LOG.error('Delete cascaded port %s failed! Exception:%s',
port_ret['id'], e)
else:
LOG.debug(_('Router(%s) is deleted already'), router_name)
def update_extra_routes_for_cascaded_router(self, router_id, extra_routes, snat_ports=None):
LOG.debug("The extra_routes is %s, and the sg_ports is %s"
% (extra_routes, snat_ports))
routes = []
if snat_ports:
for d in extra_routes.keys():
routes.extend([
{
'destination': d,
'nexthop': sg['fixed_ips'][0]['ip_address']
} for sg in snat_ports
])
req_props = {"routes": routes}
try:
router_ret = self.csd_client('update_router', router_id,
{'router': req_props})
if(not router_ret or
(router_ret and (not router_ret.get('router')))):
LOG.debug(_("cascaded router update failed, "
"router id:%s"), router_id)
return
except:
LOG.error(_("cascaded router update failed, "
"router id:%s"), router_id)
return
LOG.debug(_('update router, Response:%s'), str(router_ret))
return router_ret['router']['id']
def get_or_create_cascaded_router(self, existing_port_ids,
internal_ports, ri):
if(len(existing_port_ids) == 0 and len(internal_ports) > 0 and
not ri.cascaded_router_id):
cascaded_name = self.get_router_name(ri.router['id'])
cascaded_router = self.cascaded_router_map.get(cascaded_name, None)
if(cascaded_router and cascaded_router.router):
ri.cascaded_router_id = cascaded_router.router['id']
LOG.debug(_('TRICIRCLE get router:%s'), str(cascaded_router))
return
router_id = self.create_cascaded_router(ri)
if(not router_id):
LOG.error(_('ERR: can not create cascaded router: router@%s'),
router_id)
return
ri.cascaded_router_id = router_id
def _update_extern_extra_routes_for_snat(self, ri, ex_gw_port, snat_ports):
if self.conf.nexthop_over_tunneling == 'gre' and snat_ports:
snat_networks = dict((s['network_id'], s['subnet']['cidr']) for s in snat_ports)
network_binding = self.plugin_rpc.get_network_binding_host(self.context, ri.router_id,
snat_networks.keys())
for n in network_binding.keys():
next_hop = network_binding[n]
dest_cidr = snat_networks[n]
ri.extern_extra_routes[dest_cidr] = next_hop
ri.extra_routes_is_update = True
if ri.router['gw_port_host'] != self.host:
network_binding = self.plugin_rpc.get_network_binding_host(self.context, ri.router_id,
[ex_gw_port['network_id']])
if network_binding:
ri.extern_extra_routes[ex_gw_port['subnet']['cidr']] = network_binding[ex_gw_port['network_id']]
ri.extra_routes_is_update = True
def _delete_extra_routes_for_snat(self, ri):
if self.conf.nexthop_over_tunneling == 'gre' and ri.ex_gw_port['binding:host_id'] != self.host and \
ri.extern_extra_routes.has_key(ri.ex_gw_port['subnet']['cidr']):
ri.extern_extra_routes.pop(ri.ex_gw_port['subnet']['cidr'])
ri.extra_routes_is_update = True
def _update_extern_extra_routes(self, ri, port, ex_gw_port):
if self.conf.nexthop_over_tunneling == 'gre':
extra_routes = self.plugin_rpc.get_extra_routes_by_subnet(
self.context,
ri.router['id'],
port['fixed_ips'][0]['subnet_id'],
ex_gw_port and ex_gw_port['id'])
LOG.debug(_("Cascade Info, new ports, extra_routes:%s from "
"plugin_rpc.get_extra_routes_by_subnet"), extra_routes)
if('not_bound_network' in extra_routes):
return False
if ('big2Layer' not in extra_routes and
'local_network' not in extra_routes):
next_hop = extra_routes[0][0]
dest_cidr = extra_routes[0][1]
if(not next_hop):
return False
ri.extern_extra_routes[dest_cidr] = next_hop
ri.extra_routes_is_update = True
ri.internal_ports.append(port)
return False
return True
def _delete_extern_extra_routes(self, ri, port, ex_gw_port):
if self.conf.nexthop_over_tunneling == 'gre':
extra_routes = self.plugin_rpc.get_extra_routes_by_subnet(
self.context,
ri.router['id'],
port['fixed_ips'][0]['subnet_id'],
ex_gw_port and ex_gw_port['id'])
LOG.debug(_("Cascade Info, old ports, extra_routes:%s from "
"plugin_rpc.get_extra_routes_by_subnet"), extra_routes)
if('not_bound_network' in extra_routes):
return False
if ('big2Layer' not in extra_routes and
'local_network' not in extra_routes):
next_hop = extra_routes[0][0]
dest_cidr = extra_routes[0][1]
ri.extern_extra_routes.pop(dest_cidr, None)
ri.extra_routes_is_update = True
ri.internal_ports.remove(port)
return False
return True
def _get_cascaded_router_name(self, id):
return ('router@' + id)
def _get_cascaded_network_name(self, id):
return ('network@' + id)
def _get_cascaded_subnet_name(self, id):
return ('subnet@' + id)
def _get_cascaded_port_name(self, id):
return ('port@' + id)
@common_utils.exception_logger()
def process_router(self, ri):
#TODO(mrsmith) - we shouldn't need to check here
if 'distributed' not in ri.router:
ri.router['distributed'] = False
ex_gw_port = self._get_ex_gw_port(ri)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
snat_ports = ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports if
p['id'] in current_port_ids and
p['id'] not in existing_port_ids]
old_ports = [p for p in ri.internal_ports if
p['id'] not in current_port_ids]
LOG.debug(_("process_router: internal_ports: %s"), internal_ports)
LOG.debug(_("process_router: existing_port_ids: %s"), existing_port_ids)
LOG.debug(_("process_router: current_port_ids: %s"), current_port_ids)
LOG.debug(_("process_router: new_ports: %s"), new_ports)
LOG.debug(_("process_router: old_ports: %s"), old_ports)
cascaded_router = self._get_cascaded_router(ri)
if cascaded_router:
ri.cascaded_router_id = cascaded_router['id']
for p in old_ports:
try:
LOG.debug("process_router: start delete interface: cas_subnet:%s, port_id:%s",
p['fixed_ips'][0]['subnet_id'], p['id'])
if not self._delete_extern_extra_routes(ri, p, ex_gw_port):
LOG.debug("process_router: _delete_extern_extra_routes return False, delete failed ")
continue
cascaded_subnet_id = self.get_cascaded_subnet(
p['fixed_ips'][0]['subnet_id'])['id']
if(not cascaded_subnet_id):
LOG.error(_('ERR: can not delete interface for cascaded'
' router, not find cascaded_subnet_id!'))
continue
self.delete_interface_for_cascaded_router_sync(ri.cascaded_router_id,
cascaded_subnet_id)
self.clear_router_port_cache(ri.router['id'], p['id'])
ri.internal_ports.remove(p)
ri.local_internal_ports.remove(p)
except Exception, e:
LOG.error(_("delete interface %s failed: %s"), p, e)
for p in new_ports:
try:
if not self._update_extern_extra_routes(ri, p, ex_gw_port):
continue
local_existing_port_ids = set([pt['id']
for pt in ri.local_internal_ports])
self.get_or_create_cascaded_router(local_existing_port_ids,
internal_ports, ri)
cascaded_net = self.get_or_create_cascaded_net(p['network_id'])
cascaded_net_id = cascaded_net['id']
if not cascaded_net_id:
LOG.error(_('ERR: can not get cascaded net_id from port'
' %s by get_or_create_cascaded_net_id!'), p)
continue
cascaded_subnet_id = self.get_or_create_cascaded_subnet(
p['fixed_ips'][0]['subnet_id'])['id']
if not cascaded_subnet_id:
LOG.error(_('ERR: can not get cascaded subnet_id from port'
' %s by get_or_create_cascaded_subnet_id!'), p)
continue
#modify by data consistency, clean cascaded router port if create port failed
cascaded_port_id = None
try:
cascaded_port_id = self.get_or_create_cascaded_router_port(cascaded_net_id, p)
if not cascaded_port_id:
raise Exception
except Exception, e:
LOG.warn(_("create rotuer port failed: %s, try to clean cascaded router port"), e)
router = ri.router
self._delete_cascaded_illegal_interface(router['id'], router.get(l3_constants.INTERFACE_KEY, []),
ri.internal_ports)
req_props_list = {'network_id': cascaded_net_id,
'fixed_ips': "ip_address=" + p['fixed_ips'][0]['ip_address'],
}
cad_router_ports = self.csd_client('list_ports', **req_props_list)
if cad_router_ports and cad_router_ports.get('ports'):
for cad_router_port in cad_router_ports.get('ports'):
self.delete_cascaded_router_port(cad_router_port['id'])
cascaded_port_id = self.get_or_create_cascaded_router_port(cascaded_net_id, p)
if not cascaded_port_id:
continue
p['cascaded_port_id'] = cascaded_port_id
if not ri.cascaded_router_id:
LOG.error(_('ERR: can not create cascaded router port from'
'port %s by create_cascaded_router_port!'), p)
self.delete_cascaded_router_port(cascaded_port_id)
continue
#modify by data consistency, delete illegal router interface if add interface failed
try:
self.add_interface_for_cascaded_router(ri.router['id'],
ri.cascaded_router_id,
cascaded_subnet_id,
cascaded_port_id)
except Exception, e:
LOG.error(_("add interface for cascaded router failed: %s, "
"csg_router: %s, csd_router: %s, csd_subnet: %s, csd_port: %s, "
"clean cascaded router interface and try again")
, e, ri.router['id'], ri.cascaded_router_id, cascaded_subnet_id, cascaded_port_id)
router = ri.router
self._delete_cascaded_illegal_interface(router['id'], router.get(l3_constants.INTERFACE_KEY, []),
ri.internal_ports)
try:
self.add_interface_for_cascaded_router(ri.router['id'],
ri.cascaded_router_id,
cascaded_subnet_id,
cascaded_port_id)
except Exception, e:
LOG.error(_("add interface for cascaded router failed again: %s, "
"csg_router: %s, csd_router: %s, csd_subnet: %s, csd_port: %s")
, e, ri.router['id'], ri.cascaded_router_id, cascaded_subnet_id, cascaded_port_id)
self.delete_cascaded_router_port(cascaded_port_id)
raise e
ri.internal_ports.append(p)
ri.local_internal_ports.append(p)
except Exception, e:
LOG.error(_("add interface %s failed: %s"), p, e)
# Process external gateway
try:
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
ri.ex_gw_port and ri.ex_gw_port['id'])
ext_port_exists = False
if ex_gw_port_id and not ri.ex_gw_port:
ext_port_exists = self._check_external_port_exists(
ri.router_id, ex_gw_port_id)
elif ex_gw_port_id and ri.ex_gw_port:
ext_port_exists = True
if ex_gw_port:
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return dict((k, v) for k, v in d.iteritems()
if k not in ignore)
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
self._set_subnet_info(ex_gw_port)
if (not ri.ex_gw_port or (ri.router['distributed'] and
not ext_port_exists)) or (not _gateway_ports_equal(ex_gw_port, ri.ex_gw_port) or \
(ri.router['gw_port_host'] is None and ext_port_exists) or \
(ri.enable_snat != ri.router.get('enable_snat'))):
self._update_extern_extra_routes_for_snat(ri, ex_gw_port, snat_ports)
self.external_gateway_added(ri, ex_gw_port)
elif not ex_gw_port and ri.ex_gw_port:
self._delete_extra_routes_for_snat(ri)
self.external_gateway_removed(ri)
except Exception, e:
LOG.error(_("process external gateway failed: %s"), e)
# Process static routes for router
try:
self.routes_updated(ri)
except Exception, e:
LOG.error(_("process static routes failed: %s"), e)
# Process floatingip
fip_statuses = {}
existing_floating_ips = ri.floating_ips
try:
if ex_gw_port and self.conf.agent_mode == 'dvr_snat' and ri.router['gw_port_host'] == self.host:
cascading_floating_ips = self.get_cascading_floating_ips(ri)
cascaded_floating_ips = self.get_cascaded_floating_ips(ri.cascaded_router_id)
cas_fip = []
cad_fip = []
#handle floating_ips
for cascading_fip in cascading_floating_ips:
for fip in cascaded_floating_ips:
if fip['fixed_ip_address'] == cascading_fip['fixed_ip_address'] \
and fip['floating_ip_address'] == cascading_fip['floating_ip_address']:
fip_statuses[cascading_fip['id']] = fip['status']
cas_fip.append(cascading_fip)
cad_fip.append(fip)
break
LOG.debug("cas_fip is %s", cas_fip)
for fip_port in cas_fip:
cascading_floating_ips.remove(fip_port)
LOG.debug("cad_fip is %s", cad_fip)
for fip_port in cad_fip:
cascaded_floating_ips.remove(fip_port)
#delete floating_ip
for fip in cascaded_floating_ips:
floating_ip_ret = self.csd_client('delete_floatingip', fip['id'])
LOG.debug(_('delete cascaded_floatingip for %s, Response:%s') %
(fip['id'], str(floating_ip_ret)))
#add floating_ip
ext_net_map = {}
for cascading_fip in cascading_floating_ips:
try:
cascaded_net_id = ext_net_map.get(cascading_fip['floating_network_id'], None)
if not cascaded_net_id:
cascaded_net = self.get_cascaded_network_by_cascading(cascading_fip['floating_network_id'])
if cascaded_net:
cascaded_net_id = cascaded_net['id']
else:
fip_statuses[cascading_fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
LOG.error(_("cascaded ext_net for %s get failed"), cascading_fip['floating_network_id'])
continue
ext_net_map[cascading_fip['floating_network_id']] = cascaded_net_id
if self.host != cascading_fip['host']:
result = self._create_cascaded_fip_with_no_port(cascading_fip, cascaded_net_id, ri)
else:
result = self._create_cascaded_fip_with_port(cascading_fip, cascaded_net_id)
if result:
fip_statuses[cascading_fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
else:
fip_statuses[cascading_fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
except Exception, e:
fip_statuses[cascading_fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
LOG.error(_("create cascaded floatingip for %s failed: %s"),
cascading_fip, e)
continue
#handler the dvr mode router
elif self.conf.agent_mode == 'dvr' and self.conf.is_public_cloud:
cascaded_floating_ips = self.get_cascaded_floating_ips(ri.cascaded_router_id)
cascading_floating_ips = self.get_floating_ips(ri)
pc_cas_fip = []
pc_cad_fip = []
#handle floating_ips
for cascading_fip in cascading_floating_ips:
for fip in cascaded_floating_ips:
if fip['fixed_ip_address'] == cascading_fip['fixed_ip_address'] \
and fip['floating_ip_address'] == cascading_fip['floating_ip_address']:
fip_statuses[cascading_fip['id']] = fip['status']
pc_cas_fip.append(cascading_fip)
pc_cad_fip.append(fip)
break
LOG.debug("pc_cas_fip is %s", pc_cas_fip)
for fip_port in pc_cas_fip:
cascading_floating_ips.remove(fip_port)
LOG.debug("pc_cad_fip is %s", pc_cad_fip)
for fip_port in pc_cad_fip:
cascaded_floating_ips.remove(fip_port)
#delete floating_ip
for fip in cascaded_floating_ips:
floating_ip_ret = self.csd_client('delete_floatingip', fip['id'])
LOG.debug(_('delete cascaded_floatingip for %s, Response:%s') %
(fip['id'], str(floating_ip_ret)))
#add floating_ip
ext_net_map = {}
for cascading_fip in cascading_floating_ips:
try:
cascaded_net_id = ext_net_map.get(cascading_fip['floating_network_id'], None)
if not cascaded_net_id:
cascaded_net = self.get_cascaded_network_by_cascading_name(self.conf.internal_relay_network_name)
if cascaded_net:
cascaded_net_id = cascaded_net['id']
elif self.conf.is_public_cloud and not cascaded_net:
#hyp create exteranl_relay_work in csd for tenant_router set gateway
cascaded_net_id = self.create_cascaded_external_net_for_gateway()
else:
fip_statuses[cascading_fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
LOG.error(_("cascaded ext_net for %s get failed"), cascading_fip['floating_network_id'])
continue
# hyp set tenant router gateway on csd
self.set_cascaded_router_gw_by_cascading_ip(ri, cascaded_net_id)
ext_net_map[cascading_fip['floating_network_id']] = cascaded_net_id
result = self._create_cascaded_fip_with_port(cascading_fip, cascaded_net_id)
if result:
fip_statuses[cascading_fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
else:
fip_statuses[cascading_fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
except Exception, e:
fip_statuses[cascading_fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
LOG.error(_("create cascaded floatingip for %s failed: %s"),
cascading_fip, e)
continue
# Update floating IP status on the neutron server
self.plugin_rpc.update_floatingip_statuses(
self.context, ri.router_id, fip_statuses)
# Identify floating IPs which were disabled
ri.floating_ips = set(fip_statuses.keys())
for fip_id in existing_floating_ips - ri.floating_ips:
fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN
# Update floating IP status on the neutron server
self.plugin_rpc.update_floatingip_statuses(
self.context, ri.router_id, fip_statuses)
except Exception, e:
# TODO(salv-orlando): Less broad catching
# All floating IPs must be put in error state
for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
LOG.error(_("process floatingip failed: %s"), e)
try:
if ex_gw_port and self.conf.agent_mode == 'dvr_snat' and ri.router['gw_port_host'] == self.host:
# Identify floating IPs which were disabled
ri.floating_ips = set(fip_statuses.keys())
for fip_id in existing_floating_ips - ri.floating_ips:
fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN
# Update floating IP status on the neutron server
self.plugin_rpc.update_floatingip_statuses(
self.context, ri.router_id, fip_statuses)
except Exception, e:
LOG.error(_("update fip failed: %s"), e)
try:
if ri.cascaded_router_id:
self.update_extra_routes_for_cascaded_router(
ri.cascaded_router_id,
ri.extern_extra_routes,
ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []))
ri.extra_routes_is_update = False
except Exception, e:
LOG.error(_("update cascaded_router %s failed: %s"), ri.cascaded_router_id, e)
# Update ex_gw_port and enable_snat on the router info cache
ri.ex_gw_port = ex_gw_port
ri.snat_ports = snat_ports
ri.enable_snat = ri.router.get('enable_snat')
# Process if delete router
try:
LOG.info("local_internal_ports:%s, cascaded_router_id:%s, ex_gw_port:%s, gw_port_host:%s" % (
len(ri.local_internal_ports), ri.cascaded_router_id, ri.ex_gw_port, ri.router.get('gw_port_host')
))
if(len(ri.local_internal_ports) == 0 and ri.cascaded_router_id and
(not ri.ex_gw_port or ri.router.get('gw_port_host') != self.host)):
ri.internal_ports = []
ri.local_internal_ports = []
ri.extern_extra_routes = {}
ri.routes = []
ri.extra_routes_is_update = False
self.delete_cascaded_router_sync(ri.router['id'], ri.cascaded_router_id)
if self.conf.nexthop_over_tunneling == 'gre':
self.plugin_rpc.update_router_extern_ip_map(self.context,
ri.router['id'],
None)
ri.cascaded_router_id = None
except Exception, e:
LOG.error(_("process whether delete router failed: %s"), e)
def clean_cascaded_floatingip(self, csd_network_id, csd_fip_ip):
filters = {'floating_network_id': csd_network_id,
'floating_ip_address': csd_fip_ip}
floating_ips_ret = self.csd_client('list_floatingips', **filters)
if floating_ips_ret and floating_ips_ret.get('floatingips'):
for fip in floating_ips_ret.get('floatingips'):
try:
self.csd_client('delete_floatingip', fip['id'])
except Exception, e:
LOG.error(_("Delete floatingip failed: %s"), e)
def _create_floatingip_sync(self, floating_ip):
try:
floating_ip_ret = self.csd_client('create_floatingip', {'floatingip': floating_ip})
if (not floating_ip_ret) or not floating_ip_ret.get('floatingip'):
raise Exception
return floating_ip_ret
except Exception, e:
LOG.error(_("Create floatingip failed: %s, clean cascaded floatingip and try again"), e)
self.clean_cascaded_floatingip(floating_ip['floating_network_id'], floating_ip['floating_ip_address'])
floating_ip_ret = self.csd_client('create_floatingip', {'floatingip': floating_ip})
return floating_ip_ret
def _create_cascaded_fip_with_no_port(self, cascading_fip, cascaded_net_id, ri):
try:
floating_ip = {'floating_network_id': cascaded_net_id,
'tenant_id': cascading_fip['tenant_id'],
'floating_ip_address': cascading_fip['floating_ip_address']
}
# create_floatingip
floating_ip_ret = self._create_floatingip_sync(floating_ip)
if (not floating_ip_ret) or not floating_ip_ret.get('floatingip'):
return False
else:
cascaded_floating_ip = floating_ip_ret.get('floatingip')
req_props = {'fixed_ips': 'ip_address=%s' % cascaded_floating_ip.get('floating_ip_address'),
'network': cascaded_net_id}
# search floatingip_port
port_ret = self.csd_client('list_ports', **req_props)
if not port_ret or not port_ret.get('ports'):
self.csd_client('delete_floatingip', floating_ip_ret.get('floatingip')['id'])
return False
else:
#update floatingip ,write router_id into floatingip
floating_port = port_ret.get('ports')[0]
port_props = {'name': ri.router['id']}
port_ret = self.csd_client('update_port', floating_port.get('id'), \
{'port': port_props})
if not port_ret or not port_ret.get('port'):
self.csd_client('delete_floatingip', floating_ip_ret.get('floatingip')['id'])
return False
floating_ip = {'fixed_ip_address': cascading_fip['fixed_ip_address']}
floating_ip_ret = self.csd_client('update_floatingip', floating_ip_ret.get('floatingip')['id'], \
{'floatingip': floating_ip})
if not floating_ip_ret or not floating_ip_ret.get('floatingip'):
self.csd_client('delete_floatingip', floating_ip_ret.get('floatingip')['id'])
LOG.error(_("update cascaded floatingip for %s failed, Response is %s") %
(floating_ip_ret, str(floating_ip_ret)))
return False
return True
except Exception:
LOG.error(_("create cascaded floatingip with no port_id for %s failed, Response is %s") %
(floating_ip_ret, str(floating_ip_ret)))
self.csd_client('delete_floatingip', floating_ip_ret.get('floatingip')['id'])
return False
def _create_cascaded_fip_with_port(self, cascading_fip, cascaded_net_id):
floating_ip = {'floating_network_id': cascaded_net_id,
'tenant_id': cascading_fip['tenant_id'],
'fixed_ip_address': cascading_fip['fixed_ip_address'],
'floating_ip_address': cascading_fip['floating_ip_address']
}
search_opts = {'name': 'port@' + cascading_fip['port_id']}
port_ret = self.csd_client('list_ports', **search_opts)
if not port_ret or not port_ret.get('ports'):
LOG.error(_("cascaded port for %s get failed"), cascading_fip['port_id'])
return False
floating_ip['port_id'] = port_ret.get('ports')[0]['id']
#hyp create floatingip_agent_gateway port
self.create_fip_gw_port_in_csd_and_csg(port_ret, cascading_fip, cascaded_net_id)
floating_ip_ret = self._create_floatingip_sync(floating_ip)
if not floating_ip_ret or not floating_ip_ret.get('floatingip'):
LOG.error(_("create cascaded floatingip for %s failed, Response is %s") %
(floating_ip_ret, str(floating_ip_ret)))
return False
return True
def _get_ex_gw_port(self, ri):
return ri.router.get('gw_port')
def _check_external_port_exists(self, router_id, port_id):
"""Return True if external gateway port is present."""
router_ret = self.csd_client('list_routers', **{'name': self._get_cascaded_router_name(router_id)})
if(not router_ret or
(router_ret and (not router_ret.get('routers')))):
return False
routers_info = router_ret.get('routers')
LOG.debug("_check_external_port_exists routers_info:%s " % routers_info)
if len(routers_info) and not routers_info[0].get('external_gateway_info'):
return False
return True
def get_internal_port(self, ri, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_fip_ext_device_name(self, port_id):
return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_snat_int_device_name(self, port_id):
return (SNAT_INT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_snat_ns_name(self, router_id):
return (SNAT_NS_PREFIX + router_id)
def get_snat_interfaces(self, ri):
return ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
def get_floating_ips(self, ri):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, [])
if ri.router['distributed']:
floating_ips = [i for i in floating_ips if i['host'] == self.host]
return floating_ips
def get_cascading_floating_ips(self, ri):
return ri.router.get(l3_constants.FLOATINGIP_KEY, [])
def get_cascaded_floating_ips(self, cascaded_router_id):
filters = {'router_id': cascaded_router_id}
floating_ips_ret = self.csd_client('list_floatingips', **filters)
if (not floating_ips_ret) or (floating_ips_ret and not floating_ips_ret.get('floatingips')):
return []
else:
return floating_ips_ret.get('floatingips')
def get_cascaded_floating_ips_by_port(self, port_id):
filters = {'port_id': port_id}
floating_ips_ret = self.csd_client('list_floatingips', **filters)
if (not floating_ips_ret) or (floating_ips_ret and not floating_ips_ret.get('floatingips')):
return []
else:
return floating_ips_ret.get('floatingips')
def get_cascaded_network_by_cascading(self, cascading_network_id):
filters = {'name': 'network@' + cascading_network_id}
network_ret = self.csd_client('list_networks', **filters)
if (not network_ret) or (network_ret and not network_ret.get('networks')):
return
else:
return network_ret.get('networks')[0]
def get_cascading_network_by_name(self, cascading_network_name):
filters = {'name': cascading_network_name}
network_ret = self.csg_client('list_networks', **filters)
if (not network_ret) or (network_ret and not network_ret.get('networks')):
return
else:
return network_ret.get('networks')[0]
def get_cascaded_network_by_cascading_name(self, cascading_network_name):
filters = {'name': cascading_network_name}
network_ret = self.csg_client('list_networks', **filters)
if (not network_ret) or (network_ret and not network_ret.get('networks')):
return
else:
filters = {'name': 'network@' + network_ret.get('networks')[0].get('id')}
network_ret = self.csd_client('list_networks', **filters)
if (not network_ret) or (network_ret and not network_ret.get('networks')):
return
else:
return network_ret.get('networks')[0]
def _map_internal_interfaces(self, ri, int_port, snat_ports):
"""Return the SNAT port for the given internal interface port."""
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports if
p['fixed_ips'][0]['subnet_id'] == subnet_id]
if match_port:
return match_port[0]
else:
LOG.error(_('DVR: no map match_port found!'))
def _get_cascaded_router(self, ri):
router_ret = self.csd_client('list_routers', **{'name': self._get_cascaded_router_name(ri.router['id'])})
if len(router_ret['routers']):
return router_ret['routers'][0]
return None
def _get_cascaded_router_interface(self, cad_router_id, distributed):
if distributed:
filter = {'device_id': cad_router_id,
'device_owner': l3_constants.DEVICE_OWNER_DVR_INTERFACE}
ports_ret = self.csd_client('list_ports', **filter)
if ports_ret and ports_ret.get('ports'):
return ports_ret.get('ports')
else:
filter = {'device_id': cad_router_id,
'device_owner': l3_constants.DEVICE_OWNER_ROUTER_INTF}
ports_ret = self.csd_client('list_ports', **filter)
if ports_ret and ports_ret.get('ports'):
return ports_ret.get('ports')
return []
def _get_cascaded_router_port(self, cad_router_id):
filter = {'device_id': cad_router_id}
ports_ret = self.csd_client('list_ports', **filter)
if ports_ret and ports_ret.get('ports'):
return ports_ret.get('ports')
return []
def _delete_cascaded_illegal_interface(self, csg_router_id, cas_router_interfaces, existing_rouer_interfaces=[]):
router_int = cas_router_interfaces + existing_rouer_interfaces
cad_router_names = list(set([self.get_router_port_name(p.get('id')) for p in router_int]))
cad_router = None
router_ret = self.csd_client('list_routers', **{'name': self._get_cascaded_router_name(csg_router_id)})
if len(router_ret['routers']):
cad_router = router_ret['routers'][0]
if cad_router:
cad_router_interfaces = self._get_cascaded_router_interface(cad_router['id'], cad_router[
'distributed'])
for cad_router_interface in cad_router_interfaces:
if cad_router_interface.get('name') not in cad_router_names:
self.delete_interface_for_cascaded_router_sync(cad_router.get('id'),
cad_router_interface['fixed_ips'][0]['subnet_id'])
if self.validate_router_port_name(cad_router_interface.get('name')):
self.clear_router_port_cache(csg_router_id, cad_router_interface.get('name').split("@")[1])
def _delete_cascaded_interface_port(self, router_id, csd_router_id):
cad_router_ports = self._get_cascaded_router_port(csd_router_id)
for cad_router_port in cad_router_ports:
if cad_router_port.get('device_owner') in [l3_constants.DEVICE_OWNER_DVR_INTERFACE,
l3_constants.DEVICE_OWNER_ROUTER_INTF]:
try:
self.delete_interface_for_cascaded_router_sync(csd_router_id,
cad_router_port['fixed_ips'][0]['subnet_id'])
if self.validate_router_port_name(cad_router_port.get('name')):
self.clear_router_port_cache(router_id, cad_router_port.get('name').split("@")[1])
LOG.debug(_("Delete router_interface %s successfully."), cad_router_port['id'])
except Exception, e:
LOG.error(_("Delete router_interface %s failed: %s"), cad_router_port['id'], e)
else:
try:
self.csd_client('delete_port', cad_router_port['id'])
LOG.debug(_("Delete port %s successfully."), cad_router_port['id'])
except Exception as e:
LOG.error('Delete cascaded port %s failed! Exception:%s',
cad_router_port['id'], e)
def _delete_cascaded_floating_ips_by_subnet_id(self, csd_subnet_id):
req_props_list = {'fixed_ips': "subnet_id=" + csd_subnet_id}
csd_ports = self.csd_client('list_ports', **req_props_list)
if not csd_ports or not csd_ports.get('ports'):
return
csd_ports = csd_ports.get('ports')
for csd_port in csd_ports:
fips = self.get_cascaded_floating_ips_by_port(csd_port.get('id'))
for fip in fips:
try:
floating_ip_ret = self.csd_client('delete_floatingip', fip['id'])
LOG.debug(_('delete cascaded_floatingip for %s, Response:%s') %
(fip.get('id'), str(floating_ip_ret)))
except Exception, e:
LOG.error(_("delete cascaded_floatingip for %s, failed: %s"), fip.get('id'), e)
def _delete_cascaded_floating_ips_by_router_id(self, csd_router_id):
csd_floating_ips = self.get_cascaded_floating_ips(csd_router_id)
for csd_floating_ip in csd_floating_ips:
try:
floating_ip_ret = self.csd_client('delete_floatingip', csd_floating_ip['id'])
LOG.debug(_('delete cascaded_floatingip for %s, Response:%s') %
(csd_floating_ip['id'], str(floating_ip_ret)))
except Exception, e:
LOG.error(_("delete cascaded_floatingip failed: %s"), e)
def _get_cascaded_gateway_subnets(self, ri, network_id):
if ri.cascaded_gateway_subnets.get('tunnel_subnet_id') \
and ri.cascaded_gateway_subnets.get('extern_subnet_id'):
return ri.cascaded_gateway_subnets
external_net_id = self.conf.gateway_external_network_id
if external_net_id:
req_props = {'name': external_net_id,
'router:external': True}
net_ret = self.csd_client('list_networks', **req_props)
subnet_ids = net_ret.get('networks', [])[0].get('subnets')
for subnet_id in subnet_ids:
subnet_ret = self.csd_client('show_subnet', subnet_id)
cidr = subnet_ret.get('subnet', {}).get('cidr')
if cidr == self.conf.cascaded_extern_subnet_cidr:
ri.cascaded_gateway_subnets['tunnel_subnet_id'] = subnet_id
else:
ri.cascaded_gateway_subnets['extern_subnet_id'] = subnet_id
else:
LOG.error('[_get_cascaded_gateway_subnets] Must be specify gateway_external_network_id in l3_proxy.ini')
return ri.cascaded_gateway_subnets
def external_gateway_removed(self, ri):
#(TODO)it's invoked when remove gateway every router, Maybe can improved.
if (not ri.router['distributed'] or
self.conf.agent_mode != 'dvr_snat'):
return
LOG.info('[_external_gateway_removed] remove external gateway port. router:(%s)' % ri.router['id'])
external_net_id = self.conf.gateway_external_network_id
if external_net_id:
cascaded_ext_net_name = self._get_cascaded_network_name(external_net_id)
req_props = {'name': cascaded_ext_net_name,
'router:external': True}
net_ret = self.csd_client('list_networks', **req_props)
if not net_ret or not net_ret['networks']:
LOG.debug(_('Can not find external network(%s) on cascaded'), cascaded_ext_net_name)
return
router_name = self._get_cascaded_router_name(ri.router['id'])
#check router exist
router_ret = self.csd_client('list_routers', **{'name': router_name}).get('routers')
if router_ret and len(router_ret):
router_id = router_ret[0].get('id')
self.csd_client('remove_gateway_router', router_id)
else:
LOG.debug(_('Router(%s) is deleted already'), router_name)
else:
LOG.error('[_external_gateway_removed] Must be specify gateway_external_network_id in l3_proxy.ini')
def external_gateway_added(self, ri, ex_gw_port):
if (not ri.router['distributed'] or self.conf.agent_mode != 'dvr_snat' or
ri.router['gw_port_host'] != self.host):
return
LOG.info('[external_gateway_added] add external gateway port. ex_gw_port:(%s)' % ex_gw_port)
external_net_id = self.conf.gateway_external_network_id
if external_net_id:
#get subnet_id from cascaded by cascading cidr
external_subnet_id = ex_gw_port['subnet']['id']
external_subnet_cidr = ex_gw_port['subnet']['cidr']
req_props = {'cidr' : external_subnet_cidr}
subnet_ret = self.csd_client('list_subnets', **req_props)
if subnet_ret and subnet_ret['subnets'] and len(subnet_ret['subnets']):
cascaded_ext_subnet_id = subnet_ret['subnets'][0]['id']
cascaded_ext_subnet_name = subnet_ret['subnets'][0]['name']
cascaded_ext_net_id = subnet_ret['subnets'][0]['network_id']
if cascaded_ext_subnet_name != ('subnet@' + external_subnet_id):
subnet_ret = self.csd_client('update_subnet', cascaded_ext_subnet_id,
{'subnet': {'name': 'subnet@' + external_subnet_id}})
if not subnet_ret or not subnet_ret.get('subnet'):
LOG.error("update_subnet(%s) failed in cascaded." % cascaded_ext_subnet_id)
return
network_ret = self.csd_client('update_network', cascaded_ext_net_id,
{'network': {'name': 'network@' + external_net_id}})
if not network_ret or not network_ret.get('network'):
LOG.error("update_network(%s) failed in cascaded." % external_net_id)
return
else:
LOG.error("Can not find subnet by cidr(%s) in cascaded." % external_subnet_cidr)
return
extern_ip_address = ex_gw_port['fixed_ips'][0].get('ip_address')
enable_snat = ri.router.get('external_gateway_info', {}).get('enable_snat', True)
router_name = self._get_cascaded_router_name(ri.router['id'])
if self.conf.proxy_router_enable_snat:
enable_snat = self.conf.proxy_router_enable_snat
req_props = {"external_gateway_info": {
"network_id": cascaded_ext_net_id,
"enable_snat":enable_snat,
"external_fixed_ips":
[{"subnet_id": cascaded_ext_subnet_id, "ip_address": extern_ip_address}]}}
#check router, update if router exist, otherwise create it.
router_ret = self.csd_client('list_routers', **{'name': router_name}).get('routers')
if router_ret and len(router_ret):
router_id = router_ret[0].get('id')
ri.cascaded_router_id = router_id
external_gateway_info = router_ret[0].get('external_gateway_info', {}) or {}
if (cascaded_ext_net_id == external_gateway_info.get('network_id')) and \
(enable_snat == external_gateway_info.get('enable_snat')) and \
([{"subnet_id": cascaded_ext_subnet_id, "ip_address": extern_ip_address}] == \
external_gateway_info.get('external_fixed_ips')):
LOG.debug("It's has external gateway(%s) already in cascaded." % cascaded_ext_net_id)
return
router_ret = self.csd_client('update_router', router_id, {'router': req_props})
if router_ret and router_ret.get('router'):
ri.cascaded_router_id = router_ret.get('router', {}).get('id')
else:
LOG.error(_('Update router failed by external network(%s) on cascaded'), str(cascaded_ext_net_id))
return
else:
router_id = self.create_cascaded_router(ri, req_props['external_gateway_info'])
if router_id:
ri.cascaded_router_id = router_id
else:
LOG.error(_('Create router failed by external network(%s) on cascaded'), str(cascaded_ext_net_id))
return
else:
LOG.error('[external_gateway_added] Must be specify gateway_external_network_id in l3_proxy.ini')
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
LOG.debug(_('Got router deleted notification for %s'), router_id)
update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER)
self._queue.add(update)
def add_arp_entry(self, context, payload):
"""Add arp entry into router namespace. Called from RPC."""
pass
def del_arp_entry(self, context, payload):
"""Delete arp entry from router namespace. Called from RPC."""
pass
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug(_('Got routers updated notification :%s'), routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
for id in routers:
update = RouterUpdate(id, PRIORITY_RPC)
self._queue.add(update)
def router_removed_from_agent(self, context, payload):
LOG.debug(_('Got router removed from agent :%r'), payload)
router_id = payload['router_id']
update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER)
self._queue.add(update)
def router_added_to_agent(self, context, payload):
LOG.debug(_('Got router added to agent :%r'), payload)
self.routers_updated(context, payload)
def _process_routers(self, routers, all_routers=False):
pool = eventlet.GreenPool()
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
target_ex_net_id = self._fetch_external_net_id()
# if routers are all the routers we have (They are from router sync on
# starting or when error occurs during running), we seek the
# routers which should be removed.
# If routers are from server side notification, we seek them
# from subset of incoming routers and ones we have now.
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
for r in routers:
# If namespaces are disabled, only process the router associated
# with the configured agent id.
if (not self.conf.use_namespaces and
r['id'] != self.conf.router_id):
continue
ex_net_id = (r['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
continue
if (target_ex_net_id and ex_net_id and
ex_net_id != target_ex_net_id):
# Double check that our single external_net_id has not changed
# by forcing a check by RPC.
if (ex_net_id != self._fetch_external_net_id(force=True)):
continue
cur_router_ids.add(r['id'])
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
pool.spawn_n(self.process_router, ri)
# identify and remove routers that no longer exist
for router_id in prev_router_ids - cur_router_ids:
pool.spawn_n(self._router_removed, router_id)
pool.waitall()
def _process_router_update(self):
for rp, update in self._queue.each_update_to_next_router():
LOG.debug("Starting router update for %s", update.id)
router = update.router
if update.action != DELETE_ROUTER and not router:
try:
update.timestamp = timeutils.utcnow()
routers = self.plugin_rpc.get_routers(self.context,
[update.id])
except Exception:
msg = _("Failed to fetch router information for '%s'")
LOG.exception(msg, update.id)
self.fullsync = True
continue
if routers:
router = routers[0]
if not router:
self._router_removed(update.id)
continue
self._process_routers([router])
LOG.debug("Finished a router update for %s", update.id)
rp.fetched_and_processed(update.timestamp)
def _process_routers_loop(self):
LOG.debug("Starting _process_routers_loop")
pool = eventlet.GreenPool(size=8)
while True:
pool.spawn_n(self._process_router_update)
def _router_ids(self):
if not self.conf.use_namespaces:
return [self.conf.router_id]
def sync_routers(self, routers):
try:
csd_routers_name = [self.get_router_name(r['id']) for r in routers]
csd_routers_info = self.csd_client('list_routers')
if csd_routers_info and len(csd_routers_info.get('routers')) > 0:
for csd_router in csd_routers_info.get('routers'):
if self.validate_router_name(csd_router['name']) and csd_router['name'] not in csd_routers_name:
self.delete_cascaded_router_sync(csd_router['name'].split('@')[1], csd_router['id'])
for csg_router in routers:
if csg_router['id'] not in self.router_info:
self._delete_cascaded_illegal_interface(csg_router['id'], csg_router.get(l3_constants.INTERFACE_KEY, []))
else:
ri = self.router_info[csg_router['id']]
router = ri.router
self._delete_cascaded_illegal_interface(router['id'], router.get(l3_constants.INTERFACE_KEY, []),
ri.internal_ports)
except Exception, e:
LOG.error(_("sync_routers exception: %s"), e)
@periodic_task.periodic_task
def periodic_sync_routers_task(self, context):
LOG.debug("periodic_sync_routers_task")
self._cascaded_clean_task(context)
self._sync_routers_task(context)
def _sync_routers_task(self, context):
if self.services_sync:
super(L3NATAgent, self).process_services_sync(context)
LOG.debug(_("Starting _sync_routers_task - fullsync:%s"),
self.fullsync)
if not self.fullsync:
return
prev_router_ids = set(self.router_info)
try:
router_ids = self._router_ids()
timestamp = timeutils.utcnow()
routers = self.plugin_rpc.get_routers(
context, router_ids)
LOG.debug(_('Processing :%r'), routers)
self.sync_routers(routers)
for r in routers:
update = RouterUpdate(r['id'],
PRIORITY_SYNC_ROUTERS_TASK,
router=r,
timestamp=timestamp)
self._queue.add(update)
self.fullsync = False
LOG.debug(_("_sync_routers_task successfully completed"))
except n_rpc.RPCException:
LOG.exception(_("Failed synchronizing routers due to RPC error"))
self.fullsync = True
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
else:
# Resync is not necessary for the cleanup of stale namespaces
curr_router_ids = set([r['id'] for r in routers])
# Two kinds of stale routers: Routers for which info is cached in
# self.router_info and the others. First, handle the former.
for router_id in prev_router_ids - curr_router_ids:
update = RouterUpdate(router_id,
PRIORITY_SYNC_ROUTERS_TASK,
timestamp=timestamp,
action=DELETE_ROUTER)
self._queue.add(update)
def after_start(self):
eventlet.spawn_n(self._process_routers_loop)
self._sync_routers_task(self.context)
LOG.info(_("L3 agent started"))
def routes_updated(self, ri):
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = common_utils.diff_list_of_dict(old_routes, new_routes)
LOG.debug(_("routes_updated: new_routes:%s, old_routes:%s, adds:%s, removes:%s"), new_routes, old_routes,
adds, removes)
if adds or removes:
ri.routes = new_routes
ri.extern_extra_routes = {}
for routes in new_routes:
ri.extern_extra_routes[routes['destination']] = routes['nexthop']
ri.extra_routes_is_update = True
def check_cascaded_service_ready(self):
for retry in range(l3_constants.GET_RETRY):
try:
neutron_extensions = self.csd_client('list_extensions')
if neutron_extensions:
return True
except Exception:
if retry == (l3_constants.GET_RETRY - 1):
self.fullsync = True
return False
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-l3-proxy',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {
'agent_mode': self.conf.agent_mode,
'use_namespaces': self.conf.use_namespaces,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'external_network_bridge': self.conf.external_network_bridge,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
LOG.debug(_("Report state task started"))
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = self._get_ex_gw_port(ri)
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
csd_neutron_ready = super(L3NATAgentWithStateReport, self).check_cascaded_service_ready()
if csd_neutron_ready:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
LOG.debug(_("Report state task successfully completed"))
else:
LOG.error(_("Cascaded neutron service error!"))
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_("agent_updated by server side %s!"), payload)
def _register_opts(conf):
conf.register_opts(L3NATAgent.OPTS)
conf.register_opts(L3NATAgent.AGENT_OPTS, 'AGENT')
conf.register_opts(l3_ha_agent.OPTS)
config.register_interface_driver_opts_helper(conf)
config.register_use_namespaces_opts_helper(conf)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
conf.register_opts(AGENTS_SCHEDULER_OPTS)
def main(manager='neutron.agent.l3_proxy.L3NATAgentWithStateReport'):
_register_opts(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging()
server = neutron_service.Service.create(
binary='neutron-l3-proxy',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager=manager)
service.launch(server).wait()
if __name__ == "__main__":
sys.exit(main())
|
heyf/cloaked-octo-adventure | refs/heads/master | leetcode/053_maximum-subarray.py | 1 | # 53. Maximum Subarray - LeetCode
# https://leetcode.com/problems/maximum-subarray/description/
# For example, given the array [-2,1,-3,4,-1,2,1,-5,4],
# the contiguous subarray [4,-1,2,1] has the largest sum = 6.
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
max_ending_here = max_so_far = nums[0]
for x in nums[1:]:
max_ending_here = max(x, max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
s = Solution()
pairs = [
([-2,1,-3,4,-1,2,1,-5,4],6),
([-1,-2],-1),
([1],1),
([2,3,4,5],14),
([1,-50,3],3),
([5,-2,4],7)
]
for i in pairs:
print s.maxSubArray(i[0]), i[1] == s.maxSubArray(i[0]) |
beepee14/scikit-learn | refs/heads/master | examples/classification/plot_classifier_comparison.py | 66 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
|
ktbyers/netmiko | refs/heads/develop | netmiko/raisecom/__init__.py | 1 | from netmiko.raisecom.raisecom_roap import RaisecomRoapSSH
from netmiko.raisecom.raisecom_roap import RaisecomRoapTelnet
__all__ = ["RaisecomRoapSSH", "RaisecomRoapTelnet"]
|
moreati/django | refs/heads/master | django/db/migrations/operations/base.py | 356 | from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
|
cctaylor/googleads-python-lib | refs/heads/master | examples/dfp/v201411/inventory_service/update_ad_units.py | 4 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates ad unit sizes by adding a banner ad size.
To determine which ad units exist, run get_all_ad_units.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: InventoryService.getAdUnit
Tags: InventoryService.updateAdUnits
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the ad unit to get.
AD_UNIT_ID = 'INSERT_AD_UNIT_ID_HERE'
def main(client, ad_unit_id):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201411')
# Create a statement to select a single ad unit by ID.
values = [{
'key': 'id',
'value': {
'xsi_type': 'TextValue',
'value': ad_unit_id
}
}]
query = 'WHERE id = :id'
statement = dfp.FilterStatement(query, values)
# Get ad units by statement.
response = inventory_service.getAdUnitsByStatement(
statement.ToStatement())
# Add the size 468x60 to the ad unit.
ad_unit_size = {
'size': {
'width': '468',
'height': '60'
},
'environmentType': 'BROWSER'
}
if 'results' in response:
updated_ad_units = []
for ad_unit in response['results']:
if 'adUnitSizes' not in ad_unit:
ad_unit['adUnitSizes'] = []
ad_unit['adUnitSizes'].append(ad_unit_size)
updated_ad_units.append(ad_unit)
# Update ad unit on the server.
ad_units = inventory_service.updateAdUnits(updated_ad_units)
# Display results.
for ad_unit in ad_units:
ad_unit_sizes = ['{%s x %s}' % (size['size']['width'],
size['size']['height'])
for size in ad_unit['adUnitSizes']]
print ('Ad unit with ID \'%s\', name \'%s\', and sizes [%s] was updated'
% (ad_unit['id'], ad_unit['name'], ','.join(ad_unit_sizes)))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, AD_UNIT_ID)
|
ltilve/chromium | refs/heads/igalia-sidebar | build/android/gyp/create_device_library_links.py | 52 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates symlinks to native libraries for an APK.
The native libraries should have previously been pushed to the device (in
options.target_dir). This script then creates links in an apk's lib/ folder to
those native libraries.
"""
import optparse
import os
import sys
from util import build_device
from util import build_utils
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import constants
from pylib.utils import apk_helper
def RunShellCommand(device, cmd):
output = device.RunShellCommand(cmd)
if output:
raise Exception(
'Unexpected output running command: ' + cmd + '\n' +
'\n'.join(output))
def CreateSymlinkScript(options):
libraries = build_utils.ParseGypList(options.libraries)
link_cmd = (
'rm $APK_LIBRARIES_DIR/%(lib_basename)s > /dev/null 2>&1 \n'
'ln -s $STRIPPED_LIBRARIES_DIR/%(lib_basename)s '
'$APK_LIBRARIES_DIR/%(lib_basename)s \n'
)
script = '#!/bin/sh \n'
for lib in libraries:
script += link_cmd % { 'lib_basename': lib }
with open(options.script_host_path, 'w') as scriptfile:
scriptfile.write(script)
def TriggerSymlinkScript(options):
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
apk_package = apk_helper.GetPackageName(options.apk)
apk_libraries_dir = '/data/data/%s/lib' % apk_package
device_dir = os.path.dirname(options.script_device_path)
mkdir_cmd = ('if [ ! -e %(dir)s ]; then mkdir -p %(dir)s; fi ' %
{ 'dir': device_dir })
RunShellCommand(device, mkdir_cmd)
device.PushChangedFiles([(options.script_host_path,
options.script_device_path)])
trigger_cmd = (
'APK_LIBRARIES_DIR=%(apk_libraries_dir)s; '
'STRIPPED_LIBRARIES_DIR=%(target_dir)s; '
'. %(script_device_path)s'
) % {
'apk_libraries_dir': apk_libraries_dir,
'target_dir': options.target_dir,
'script_device_path': options.script_device_path
}
RunShellCommand(device, trigger_cmd)
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
parser.add_option('--apk', help='Path to the apk.')
parser.add_option('--script-host-path',
help='Path on the host for the symlink script.')
parser.add_option('--script-device-path',
help='Path on the device to push the created symlink script.')
parser.add_option('--libraries',
help='List of native libraries.')
parser.add_option('--target-dir',
help='Device directory that contains the target libraries for symlinks.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
options, _ = parser.parse_args(args)
required_options = ['apk', 'libraries', 'script_host_path',
'script_device_path', 'target_dir', 'configuration_name']
build_utils.CheckOptions(options, parser, required=required_options)
constants.SetBuildType(options.configuration_name)
CreateSymlinkScript(options)
TriggerSymlinkScript(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
umuzungu/zipline | refs/heads/master | tests/pipeline/test_frameload.py | 4 | """
Tests for zipline.pipeline.loaders.frame.DataFrameLoader.
"""
from unittest import TestCase
from mock import patch
from numpy import arange, ones
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
DatetimeIndex,
Int64Index,
)
from zipline.lib.adjustment import (
ADD,
Float64Add,
Float64Multiply,
Float64Overwrite,
MULTIPLY,
OVERWRITE,
)
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders.frame import (
DataFrameLoader,
)
from zipline.utils.tradingcalendar import trading_day
class DataFrameLoaderTestCase(TestCase):
def setUp(self):
self.nsids = 5
self.ndates = 20
self.sids = Int64Index(range(self.nsids))
self.dates = DatetimeIndex(
start='2014-01-02',
freq=trading_day,
periods=self.ndates,
)
self.mask = ones((len(self.dates), len(self.sids)), dtype=bool)
def tearDown(self):
pass
def test_bad_input(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
)
with self.assertRaises(ValueError):
# Wrong column.
loader.load_adjusted_array(
[USEquityPricing.open], self.dates, self.sids, self.mask
)
with self.assertRaises(ValueError):
# Too many columns.
loader.load_adjusted_array(
[USEquityPricing.open, USEquityPricing.close],
self.dates,
self.sids,
self.mask,
)
def test_baseline(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(USEquityPricing.close, baseline)
dates_slice = slice(None, 10, None)
sids_slice = slice(1, 3, None)
[adj_array] = loader.load_adjusted_array(
[USEquityPricing.close],
self.dates[dates_slice],
self.sids[sids_slice],
self.mask[dates_slice, sids_slice],
).values()
for idx, window in enumerate(adj_array.traverse(window_length=3)):
expected = baseline.values[dates_slice, sids_slice][idx:idx + 3]
assert_array_equal(window, expected)
def test_adjustments(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
# Use the dates from index 10 on and sids 1-3.
dates_slice = slice(10, None, None)
sids_slice = slice(1, 4, None)
# Adjustments that should actually affect the output.
relevant_adjustments = [
{
'sid': 1,
'start_date': None,
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 0.5,
'kind': MULTIPLY,
},
{
'sid': 2,
'start_date': self.dates[5],
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 1.0,
'kind': ADD,
},
{
'sid': 2,
'start_date': self.dates[15],
'end_date': self.dates[16],
'apply_date': self.dates[17],
'value': 1.0,
'kind': ADD,
},
{
'sid': 3,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': 99.0,
'kind': OVERWRITE,
},
]
# These adjustments shouldn't affect the output.
irrelevant_adjustments = [
{ # Sid Not Requested
'sid': 0,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Sid Unknown
'sid': 9999,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Not Requested
'sid': 2,
'start_date': self.dates[1],
'end_date': self.dates[2],
'apply_date': self.dates[3],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Before Known Data
'sid': 2,
'start_date': self.dates[0] - (2 * trading_day),
'end_date': self.dates[0] - trading_day,
'apply_date': self.dates[0] - trading_day,
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date After Known Data
'sid': 2,
'start_date': self.dates[-1] + trading_day,
'end_date': self.dates[-1] + (2 * trading_day),
'apply_date': self.dates[-1] + (3 * trading_day),
'value': -9999.0,
'kind': OVERWRITE,
},
]
adjustments = DataFrame(relevant_adjustments + irrelevant_adjustments)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
adjustments=adjustments,
)
expected_baseline = baseline.iloc[dates_slice, sids_slice]
formatted_adjustments = loader.format_adjustments(
self.dates[dates_slice],
self.sids[sids_slice],
)
expected_formatted_adjustments = {
6: [
Float64Multiply(
first_row=0,
last_row=5,
first_col=0,
last_col=0,
value=0.5,
),
Float64Add(
first_row=0,
last_row=5,
first_col=1,
last_col=1,
value=1.0,
),
],
7: [
Float64Add(
first_row=5,
last_row=6,
first_col=1,
last_col=1,
value=1.0,
),
],
8: [
Float64Overwrite(
first_row=6,
last_row=7,
first_col=2,
last_col=2,
value=99.0,
)
],
}
self.assertEqual(formatted_adjustments, expected_formatted_adjustments)
mask = self.mask[dates_slice, sids_slice]
with patch('zipline.pipeline.loaders.frame.AdjustedArray') as m:
loader.load_adjusted_array(
columns=[USEquityPricing.close],
dates=self.dates[dates_slice],
assets=self.sids[sids_slice],
mask=mask,
)
self.assertEqual(m.call_count, 1)
args, kwargs = m.call_args
assert_array_equal(kwargs['data'], expected_baseline.values)
assert_array_equal(kwargs['mask'], mask)
self.assertEqual(kwargs['adjustments'], expected_formatted_adjustments)
|
mlperf/training_results_v0.7 | refs/heads/master | Inspur/benchmarks/transformer/implementations/implementation_closed/fairseq/sequence_scorer.py | 6 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import utils
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(self, models, tgt_dict):
self.models = models
self.pad = tgt_dict.pad()
def cuda(self):
for model in self.models:
model.cuda()
return self
def score_batched_itr(self, data_itr, cuda=False, timer=None):
"""Iterate over a batched dataset and yield scored translations."""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if timer is not None:
timer.start()
pos_scores, attn = self.score(s)
for i, id in enumerate(s['id'].data):
# remove padding from ref
src = utils.strip_pad(s['net_input']['src_tokens'].data[i, :], self.pad)
ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None
tgt_len = ref.numel()
pos_scores_i = pos_scores[i][:tgt_len]
score_i = pos_scores_i.sum() / tgt_len
if attn is not None:
attn_i = attn[i]
_, alignment = attn_i.max(dim=0)
else:
attn_i = alignment = None
hypos = [{
'tokens': ref,
'score': score_i,
'attention': attn_i,
'alignment': alignment,
'positional_scores': pos_scores_i,
}]
if timer is not None:
timer.stop(s['ntokens'])
# return results in the same format as SequenceGenerator
yield id, src, ref, hypos
def score(self, sample):
"""Score a batch of translations."""
net_input = sample['net_input']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in self.models:
with torch.no_grad():
model.eval()
decoder_out = model.forward(**net_input)
attn = decoder_out[1]
probs = model.get_normalized_probs(decoder_out, log_probs=False, sample=sample).data
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None:
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs.div_(len(self.models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(self.models))
avg_probs = avg_probs.gather(
dim=2,
index=sample['target'].data.unsqueeze(-1),
)
return avg_probs.squeeze(2), avg_attn
|
ChromeDevTools/devtools-frontend | refs/heads/master | third_party/pyjson5/src/json5/fakes/host_fake.py | 14 | # Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import sys
if sys.version_info[0] < 3:
# pylint: disable=redefined-builtin
str = unicode
class FakeHost(object):
# "too many instance attributes" pylint: disable=R0902
# "redefining built-in" pylint: disable=W0622
# "unused arg" pylint: disable=W0613
python_interpreter = 'python'
def __init__(self):
self.stdin = io.StringIO()
self.stdout = io.StringIO()
self.stderr = io.StringIO()
self.platform = 'linux2'
self.sep = '/'
self.dirs = set([])
self.files = {}
self.written_files = {}
self.last_tmpdir = None
self.current_tmpno = 0
self.cwd = '/tmp'
def abspath(self, *comps):
relpath = self.join(*comps)
if relpath.startswith('/'):
return relpath
return self.join(self.cwd, relpath)
def chdir(self, *comps): # pragma: no cover
path = self.join(*comps)
if not path.startswith('/'):
path = self.join(self.cwd, path)
self.cwd = path
def dirname(self, path):
return '/'.join(path.split('/')[:-1])
def fileinput(self, files=None):
if files:
for f in files:
for l in self.read_text_file(f).splitlines():
yield l
else:
for l in self.stdin.readlines():
yield l
def getcwd(self):
return self.cwd
def join(self, *comps): # pragma: no cover
p = ''
for c in comps:
if c in ('', '.'):
continue
elif c.startswith('/'):
p = c
elif p:
p += '/' + c
else:
p = c
# Handle ./
p = p.replace('/./', '/')
# Handle ../
while '/..' in p:
comps = p.split('/')
idx = comps.index('..')
comps = comps[:idx-1] + comps[idx+1:]
p = '/'.join(comps)
return p
def maybe_mkdir(self, *comps): # pragma: no cover
path = self.abspath(self.join(*comps))
if path not in self.dirs:
self.dirs.add(path)
def mkdtemp(self, suffix='', prefix='tmp', dir=None, **_kwargs):
if dir is None:
dir = self.sep + '__im_tmp'
curno = self.current_tmpno
self.current_tmpno += 1
self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
self.dirs.add(self.last_tmpdir)
return self.last_tmpdir
def print_(self, msg=u'', end=u'\n', stream=None):
stream = stream or self.stdout
stream.write(str(msg) + str(end))
stream.flush()
def read_text_file(self, *comps):
return self._read(comps)
def _read(self, comps):
return self.files[self.abspath(*comps)]
def remove(self, *comps):
path = self.abspath(*comps)
self.files[path] = None
self.written_files[path] = None
def rmtree(self, *comps):
path = self.abspath(*comps)
for f in self.files:
if f.startswith(path):
self.remove(f)
self.dirs.remove(path)
def write_text_file(self, path, contents):
self._write(path, contents)
def _write(self, path, contents):
full_path = self.abspath(path)
self.maybe_mkdir(self.dirname(full_path))
self.files[full_path] = contents
self.written_files[full_path] = contents
|
jimmy201602/webterminal | refs/heads/master | webterminal/commandextract.py | 1 | import re
class CommandDeal(object):
@staticmethod
def remove_obstruct_char(cmd_str):
'''delete some special control delimiter'''
control_char = re.compile(r'\x07 | \x1b\[1P | \r ', re.X)
cmd_str = control_char.sub('', cmd_str.strip())
# 'delete left and right delete'
patch_char = re.compile('\x08\x1b\[C')
while patch_char.search(cmd_str):
cmd_str = patch_char.sub('', cmd_str.rstrip())
return cmd_str
@staticmethod
def deal_backspace(match_str, result_command, pattern_str, backspace_num):
'''
deal with delete key
'''
if backspace_num > 0:
if backspace_num > len(result_command):
result_command += pattern_str
result_command = result_command[0:-backspace_num]
else:
result_command = result_command[0:-backspace_num]
result_command += pattern_str
del_len = len(match_str) - 3
if del_len > 0:
result_command = result_command[0:-del_len]
return result_command, len(match_str)
@staticmethod
def deal_replace_char(match_str, result_command, backspace_num):
'''
deal and replace command
'''
str_lists = re.findall(r'(?<=\x1b\[1@)\w', match_str)
tmp_str = ''.join(str_lists)
result_command_list = list(result_command)
if len(tmp_str) > 1:
result_command_list[-backspace_num:-
(backspace_num - len(tmp_str))] = tmp_str
elif len(tmp_str) > 0:
if result_command_list[-backspace_num] == ' ':
result_command_list.insert(-backspace_num, tmp_str)
else:
result_command_list[-backspace_num] = tmp_str
result_command = ''.join(result_command_list)
return result_command, len(match_str)
def deal_command(self, str_r):
"""
deal with command special key
"""
str_r = self.remove_obstruct_char(str_r)
result_command = '' # final result
backspace_num = 0 # cursor num
reach_backspace_flag = False # detect if there is cursor key
pattern_str = ''
while str_r:
tmp = re.match(r'\s*\w+\s*', str_r)
if tmp:
str_r = str_r[len(str(tmp.group(0))):]
if reach_backspace_flag:
pattern_str += str(tmp.group(0))
continue
else:
result_command += str(tmp.group(0))
continue
tmp = re.match(r'\x1b\[K[\x08]*', str_r)
if tmp:
result_command, del_len = self.deal_backspace(
str(tmp.group(0)), result_command, pattern_str, backspace_num)
reach_backspace_flag = False
backspace_num = 0
pattern_str = ''
str_r = str_r[del_len:]
continue
tmp = re.match(r'\x08+', str_r)
if tmp:
str_r = str_r[len(str(tmp.group(0))):]
if len(str_r) != 0:
if reach_backspace_flag:
result_command = result_command[0:-
backspace_num] + pattern_str
pattern_str = ''
else:
reach_backspace_flag = True
backspace_num = len(str(tmp.group(0)))
continue
else:
break
# deal with replace command
tmp = re.match(r'(\x1b\[1@\w)+', str_r)
if tmp:
result_command, del_len = self.deal_replace_char(
str(tmp.group(0)), result_command, backspace_num)
str_r = str_r[del_len:]
backspace_num = 0
continue
if reach_backspace_flag:
pattern_str += str_r[0]
else:
result_command += str_r[0]
str_r = str_r[1:]
if backspace_num > 0:
result_command = result_command[0:-backspace_num] + pattern_str
result_command = self.remove_control_char(result_command)
return result_command
def remove_control_char(self, result_command):
"""
deal with special key
"""
control_char = re.compile(r"""
\x1b[ #%()*+\-.\/]. |
\r | #enter key(CR)
(?:\x1b\[|\x9b) [ -?]* [@-~] | #enter control key(CSI)... Cmd
(?:\x1b\]|\x9d) .*? (?:\x1b\\|[\a\x9c]) | \x07 | #enter system control key(OSC)...terminate key(ST|BEL)
(?:\x1b[P^_]|[\x90\x9e\x9f]) .*? (?:\x1b\\|\x9c) | #enter serial communication key(DCS|PM|APC)...terminate key(ST)
\x1b. #special key
[\x80-\x9f] | (?:\x1b\]0.*) | \[.*@.*\][\$#] | (.*mysql>.*) #enter every special key
""", re.X)
result_command = control_char.sub('', result_command.strip())
try:
return result_command.decode('utf8', "ignore")
except:
return result_command
|
hynnet/hiwifi-openwrt-HC5661-HC5761 | refs/heads/master | staging_dir/host/lib/python2.7/distutils/tests/setuptools_extension.py | 149 | from distutils.core import Extension as _Extension
from distutils.core import Distribution as _Distribution
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
_Extension = _get_unpatched(_Extension)
try:
from Pyrex.Distutils.build_ext import build_ext
except ImportError:
have_pyrex = False
else:
have_pyrex = True
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
if not have_pyrex:
# convert .pyx extensions to .c
def __init__(self,*args,**kw):
_Extension.__init__(self,*args,**kw)
sources = []
for s in self.sources:
if s.endswith('.pyx'):
sources.append(s[:-3]+'c')
else:
sources.append(s)
self.sources = sources
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
import sys, distutils.core, distutils.extension
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
|
amaniak/fabric | refs/heads/master | tests/support/mapping.py | 44 | from fabric.tasks import Task
class MappingTask(dict, Task):
def run(self):
pass
mapping_task = MappingTask()
mapping_task.name = "mapping_task"
|
Claod44/GokemonReborn | refs/heads/master | config.example.py | 1 | ### All lines that are commented out (and some that aren't) are optional ###
DB_ENGINE = 'sqlite:///db.sqlite'
#DB_ENGINE = 'mysql://user:pass@localhost/pokeminer'
#DB_ENGINE = 'postgresql://user:pass@localhost/pokeminer
AREA_NAME = 'SLC' # the city or region you are scanning
LANGUAGE = 'EN' # ISO 639-1 codes EN, DE, FR, and ZH for Pokémon names.
MAX_CAPTCHAS = 100 # stop launching new visits if this many CAPTCHAs are pending
SCAN_DELAY = 10 # wait at least this many seconds before scanning with the same account
SPEED_LIMIT = 19 # don't travel over this many miles per hour (sorry non-Americans)
# The number of simultaneous workers will be these two numbers multiplied.
# On the initial run, workers will arrange themselves in a grid across the
# rectangle you defined with MAP_START and MAP_END.
# The rows/columns will also be used for the dot grid in the console output.
# Provide more accounts than the product of your grid to allow swapping.
GRID = (4, 4) # rows, columns
# the corner points of a rectangle for your workers to spread out over before
# any spawn points have been discovered
MAP_START = (40.7913, -111.9398)
MAP_END = (40.7143, -111.8046)
# do not visit spawn points outside of your MAP_START and MAP_END rectangle
# the boundaries will be the rectangle created by MAP_START and MAP_END, unless
STAY_WITHIN_MAP = True
# ensure that you visit within this many meters of every part of your map during bootstrap
#BOOTSTRAP_RADIUS = 450
GIVE_UP_KNOWN = 75 # try to find a worker for a known spawn for this many seconds before giving up
GIVE_UP_UNKNOWN = 60 # try to find a worker for an unknown point for this many seconds before giving up
SKIP_SPAWN = 90 # don't even try to find a worker for a spawn if the spawn time was more than this many seconds ago
# the directory that the pickles folder, socket, etc. will go in
# defaults to working directory if not set
#DIRECTORY = None
# Limit the number of simultaneous logins to this many at a time.
# Lower numbers will increase the amount of time it takes for all workers to
# get started but are recommended to avoid suddenly flooding the servers with
# accounts and arousing suspicion.
SIMULTANEOUS_LOGINS = 4
# Limit the number of workers simulating the app startup process simultaneously.
SIMULTANEOUS_SIMULATION = 10
## alternatively define a Polygon to use as boundaries (requires shapely)
## if BOUNDARIES is set, STAY_WITHIN_MAP will be ignored
## more information available in the shapely manual:
## http://toblerity.org/shapely/manual.html#polygons
#from shapely.geometry import Polygon
#BOUNDARIES = Polygon(((40.799609, -111.948556), (40.792749, -111.887341), (40.779264, -111.838078), (40.761410, -111.817908), (40.728636, -111.805293), (40.688833, -111.785564), (40.689768, -111.919389), (40.750461, -111.949938)))
# If accounts use the same provider and password you can set defaults here
# and omit them from the accounts list.
#PASS = 'pik4chu'
#PROVIDER = 'ptc'
### Device information will be generated for you if you do not provide it.
### Account details are automatically retained in pickles/accounts.pickle
## valid account formats (without PASS and PROVIDER set):
# (username, password, provider, iPhone, iOS, device_id)
# (username, password, provider)
## valid account formats (with PASS and PROVIDER set):
# (username, iPhone, iOS, device_id)
# [username]
ACCOUNTS = [
('ash_ketchum', 'pik4chu', 'ptc'),
('ziemniak_kalafior', 'ogorek', 'google'),
('noideawhattoputhere', 's3cr3t', 'ptc'),
('misty', 'bulbus4ur', 'ptc')
]
# key for Bossland's hashing server, otherwise the old hashing lib will be used
#HASH_KEY = '9d87af14461b93cb3605' # this key is fake
### these next 6 options use more requests but look more like the real client
APP_SIMULATION = True # mimic the actual app's login requests
COMPLETE_TUTORIAL = True # complete the tutorial process and configure avatar for all accounts that haven't yet
INCUBATE_EGGS = False # incubate eggs if available
## encounter Pokémon to store IVs.
## valid options:
# 'all' will encounter every Pokémon that hasn't been already been encountered
# 'notifying' will encounter Pokémon that are eligible for notifications
# None will never encounter Pokémon
ENCOUNTER = None
# PokéStops
SPIN_POKESTOPS = False # spin all PokéStops that are within range
SPIN_COOLDOWN = 300 # spin only one PokéStop every n seconds (default 300)
# minimum number of each item to keep if the bag is cleaned
# remove or set to None to disable bag cleaning
# automatically disabled if SPIN_POKESTOPS is disabled
''' # triple quotes are comments, remove them to use this ITEM_LIMITS example
ITEM_LIMITS = {
1: 20, # Poké Ball
2: 50, # Great Ball
3: 100, # Ultra Ball
101: 0, # Potion
102: 0, # Super Potion
103: 0, # Hyper Potion
104: 40, # Max Potion
201: 0, # Revive
202: 40, # Max Revive
}
'''
# sent with GET_PLAYER requests, should match your region
PLAYER_LOCALE = {'country': 'US', 'language': 'en', 'timezone': 'America/Denver'}
# retry a request after failure this many times before giving up
MAX_RETRIES = 3
# number of seconds before timing out on a login request
LOGIN_TIMEOUT = 2.5
# add spawn points reported in cell_ids to the unknown spawns list
# disable if your workers already have more than enough points to visit
MORE_POINTS = True
# exclude these Pokémon from the map by default (only visible in trash layer)
TRASH_IDS = (
16, 19, 21, 29, 32, 41, 46, 48, 50, 52, 56, 74, 77, 96, 111, 133
)
# include these Pokémon on the "rare" report
RARE_IDS = (
3, 6, 9, 45, 62, 71, 80, 85, 87, 89, 91, 94, 114, 130, 131, 134
)
from datetime import datetime
REPORT_SINCE = datetime(2016, 12, 17) # base reports on data from after this date
# used for altitude queries and maps in reports
GOOGLE_MAPS_KEY = 'OYOgW1wryrp2RKJ81u7BLvHfYUA6aArIyuQCXu4' # this key is fake
#ALT_RANGE = (1250, 1450) # Fall back to altitudes in this range if Google query fails
REPORT_MAPS = True # Show maps on reports
## Automatically resolve captchas using 2Captcha key.
#CAPTCHA_KEY = '1abc234de56fab7c89012d34e56fa7b8'
## the number of CAPTCHAs an account is allowed to receive before being swapped out
#CAPTCHAS_ALLOWED = 3
MAP_WORKERS = True # allow displaying the live location of workers on the map
# unix timestamp of last spawn point migration, spawn times learned before this will be ignored
LAST_MIGRATION = 1481932800 # Dec. 17th, 2016
## Map data provider and appearance, previews available at:
## https://leaflet-extras.github.io/leaflet-providers/preview/
#MAP_PROVIDER_URL = '//{s}.tile.osm.org/{z}/{x}/{y}.png'
#MAP_PROVIDER_ATTRIBUTION = '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors'
# set of proxy addresses and ports
#PROXIES = {'socks5://127.0.0.1:1080', 'socks5://127.0.0.1:1081'}
# convert spawn_id to integer for more efficient DB storage, set to False if
# using an old database since the data types are incompatible.
#SPAWN_ID_INT = True
# Bytestring key to authenticate with manager for inter-process communication
#AUTHKEY = b'm3wtw0'
# Address to use for manager, leave unset or set to None if you're note sure.
#MANAGER_ADDRESS = r'\\.\pipe\pokeminer' # must be in this format for Windows
#MANAGER_ADDRESS = 'pokeminer.sock' # the socket name for Unix systems
#MANAGER_ADDRESS = ('127.0.0.1', 5002) # could be used for CAPTCHA solving and live worker maps on remote systems
# Store the cell IDs so that they don't have to be recalculated every visit.
# Highly recommended unless you don't have enough memory for them.
# Disabling will increase processor usage.
#CACHE_CELLS = True
### OPTIONS BELOW THIS POINT ARE ONLY NECESSARY FOR NOTIFICATIONS ###
NOTIFY = False # enable notifications
# create images with Pokémon stats for Tweets
# requires cairo and ENCOUNTER = 'notifying' or 'all'
TWEET_IMAGES = True
# As many hashtags as can fit will be included in your tweets, these will
# be combined with landmark-specific hashtags (if applicable).
HASHTAGS = {AREA_NAME, 'Pokeminer+', 'PokemonGO'}
#TZ_OFFSET = 0 # UTC offset in hours (if different from system time)
# the required number of seconds remaining to notify about a Pokémon
TIME_REQUIRED = 600 # 10 minutes
### Only set either the NOTIFY_RANKING or NOTIFY_IDS, not both!
# The (x) rarest Pokémon will be eligible for notification. Whether a
# notification is sent or not depends on its score, as explained below.
NOTIFY_RANKING = 90
# Pokémon to potentially notify about, in order of preference.
# The first in the list will have a rarity score of 1, the last will be 0.
#NOTIFY_IDS = (130, 89, 131, 3, 9, 134, 62, 94, 91, 87, 71, 45, 85, 114, 80, 6)
# Sightings of the top (x) will always be notified about, even if below TIME_REQUIRED
# (ignored if using NOTIFY_IDS instead of NOTIFY_RANKING)
ALWAYS_NOTIFY = 14
# Always notify about the following Pokémon even if their time remaining or scores are not high enough
#ALWAYS_NOTIFY_IDS = {89, 130, 144, 145, 146, 150, 151}
# Never notify about the following Pokémon, even if they would otherwise be eligible
#NEVER_NOTIFY_IDS = TRASH_IDS
# Override the rarity score for particular Pokémon
# format is: {pokemon_id: rarity_score}
#RARITY_OVERRIDE = {148: 0.6, 149: 0.9}
# Ignore IV score and only base decision on rarity score (default if IVs not known)
#IGNORE_IVS = False
# Ignore rarity score and only base decision on IV score
#IGNORE_RARITY = False
# The Pokémon score required to notify goes on a sliding scale from INITIAL_SCORE
# to MINIMUM_SCORE over the course of FULL_TIME seconds following a notification
# Pokémon scores are an average of the Pokémon's rarity score and IV score (from 0 to 1)
# If NOTIFY_RANKING is 90, the 90th most common Pokémon will have a rarity of score 0, the rarest will be 1.
# IV score is the IV sum divided by 45 (perfect IVs).
FULL_TIME = 1800 # the number of seconds after a notification when only MINIMUM_SCORE will be required
INITIAL_SCORE = 0.7 # the required score immediately after a notification
MINIMUM_SCORE = 0.4 # the required score after FULL_TIME seconds have passed
# the number of encounter_ids to retain for duplicate checking. Should be at
# least as high as the highest number of notifications you'd send in an hour.
NOTIFICATION_CACHE = 100
### The following values are fake, replace them with your own keys to enable
### PushBullet notifications and/or tweeting, otherwise leave them out of your
### config or set them to None.
## you must provide keys for at least one service to use notifications
#PB_API_KEY = 'o.9187cb7d5b857c97bfcaa8d63eaa8494'
#PB_CHANNEL = 0 # set to the integer of your channel, or to None to push privately
#TWITTER_CONSUMER_KEY = '53d997264eb7f6452b7bf101d'
#TWITTER_CONSUMER_SECRET = '64b9ebf618829a51f8c0535b56cebc808eb3e80d3d18bf9e00'
#TWITTER_ACCESS_KEY = '1dfb143d4f29-6b007a5917df2b23d0f6db951c4227cdf768b'
#TWITTER_ACCESS_SECRET = 'e743ed1353b6e9a45589f061f7d08374db32229ec4a61'
#WEBHOOKS = {'http://127.0.0.1:4000'}
##### Referencing landmarks in your tweets/notifications
#### It is recommended to store the LANDMARKS object in a pickle to reduce startup
#### time if you are using queries. An example script for this is in:
#### scripts/pickle_landmarks.example.py
#from pickle import load
#with open('pickles/landmarks.pickle', 'rb') as f:
# LANDMARKS = load(f)
### if you do pickle it, just load the pickle and omit everything below this point
#from landmarks import Landmarks
#LANDMARKS = Landmarks(query_suffix=AREA_NAME)
# Landmarks to reference when Pokémon are nearby
# If no points are specified then it will query OpenStreetMap for the coordinates
# If 1 point is provided then it will use those coordinates but not create a shape
# If 2 points are provided it will create a rectangle with its corners at those points
# If 3 or more points are provided it will create a polygon with vertices at each point
# You can specify the string to search for on OpenStreetMap with the query parameter
# If no query or points is provided it will query with the name of the landmark (and query_suffix)
# Optionally provide a set of hashtags to be used for tweets about this landmark
# Use is_area for neighborhoods, regions, etc.
# When selecting a landmark, non-areas will be chosen first if any are close enough
# the default phrase is 'in' for areas and 'at' for non-areas, but can be overriden for either.
### replace these with well-known places in your area
## since no points or query is provided, the names provided will be queried and suffixed with AREA_NAME
#LANDMARKS.add('Rice Eccles Stadium', shortname='Rice Eccles', hashtags={'Utes'})
#LANDMARKS.add('the Salt Lake Temple', shortname='the temple', hashtags={'TempleSquare'})
## provide two corner points to create a square for this area
#LANDMARKS.add('City Creek Center', points=((40.769210, -111.893901), (40.767231, -111.888275)), hashtags={'CityCreek'})
## provide a query that is different from the landmark name so that OpenStreetMap finds the correct one
#LANDMARKS.add('the State Capitol', shortname='the Capitol', query='Utah State Capitol Building')
### area examples ###
## query using name, override the default area phrase so that it says 'at (name)' instead of 'in'
#LANDMARKS.add('the University of Utah', shortname='the U of U', hashtags={'Utes'}, phrase='at', is_area=True)
## provide corner points to create a polygon of the area since OpenStreetMap does not have a shape for it
#LANDMARKS.add('Yalecrest', points=((40.750263, -111.836502), (40.750377, -111.851108), (40.751515, -111.853833), (40.741212, -111.853909), (40.741188, -111.836519)), is_area=True)
|
alforro/TSP-Solver | refs/heads/master | artificial_intelligence/models.py | 1 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class TSP_Solution(models.Model):
matrix_size = models.IntegerField(default=0)
solution_cost = models.FloatField(default=0.0)
coordinates = models.CharField(max_length=100000)
expanded_nodes = models.IntegerField(default=0)
execution_time = models.FloatField(default=0.0)
nodes = models.IntegerField(default=0)
approach = models.CharField(max_length=20, default='') |
nomadjourney/django-tastypie | refs/heads/master | docs/conf.py | 12 | # -*- coding: utf-8 -*-
#
# Tastypie documentation build configuration file, created by
# sphinx-quickstart on Sat May 22 21:44:34 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'toc'
# General information about the project.
project = u'Tastypie'
copyright = u'2010-2012, Daniel Lindsley, Cody Soyland, Matt Croydon, Josh Bohde & Issac Kelly'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.12'
# The full version, including alpha/beta/rc tags.
release = '0.9.12-alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tastypiedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Tastypie.tex', u'Tastypie Documentation',
u'Daniel Lindsley, Cody Soyland \\& Matt Croydon', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
misterben/whetstone | refs/heads/master | whetstone-0.0.2/whetstone.py | 1 | #!/usr/bin/env python
# Whetstone
# A tool to help you memorise scripture
#
# Whetstone - Helping you keep your Sword sharp
# Copyright (C) 2009 Ben Thorp ( [email protected] )
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# v0.0.2
# Still requires Diatheke, but will work with any installed SWORD modules
# Quiz page up and running for multiple choice quizzes
# Various bugfixes in planners - note that planners from v0.0.1 will lose the end date
# Preferences page to choose Translation and Default Planner
# v0.0.1
# Requires Diatheke and the ESV bible module to be installed
import sys, commands, re, datetime, string, ConfigParser, os, random
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
import gobject
import pango
except:
sys.exit(1)
from pysqlite2 import dbapi2 as sqlite
import xdg.BaseDirectory
diatheke_test = commands.getoutput("which diatheke")
if (diatheke_test == ""):
print "Unable to locate diatheke; you will not be able to pull scripture verses"
else:
print "Located diatheke at "+diatheke_test
# Load preferences
config_path = xdg.BaseDirectory.save_config_path("whetstone")
config = ConfigParser.RawConfigParser()
global current_translation
global current_planner
current_translation = 'ESV'
current_planner = 'None'
if (os.path.exists(os.path.join(config_path,'.whetstone'))):
config.read(os.path.join(config_path,'.whetstone'))
current_translation = config.get("Section1", "translation")
current_planner = config.get("Section1", "default_planner")
else:
# Create config file
config.add_section('Section1')
config.set('Section1','translation','ESV')
config.set('Section1','default_planner','None')
with open(os.path.join(config_path, '.whetstone'), 'wb') as configfile:
config.write(configfile)
# Static translation to start with; need to add this to the preferences.
#tr = "ESV"
def strip_text(verse):
''' Takes a text from the Sword project and strips out all the notes '''
temp = verse.split('\n')[:-1]
output = ""
for line in temp:
if (line!=""):
output += re.sub('^.*?:.*?: ','',line)
output += "\n"
return unicode(output)
def split_verse(verse):
''' Return a tuple of book, chapter, verse when given a reference '''
temp1 = verse.split(':')
# This gives us [book chapter] and [verses]
if (len(temp1) > 1):
verses = temp1[1]
else:
verses = "ALL"
temp2 = temp1[0].split(' ')
chapter = temp2[-1]
book = " ".join(temp2[:-1])
return (book, chapter, verse)
def join_verse(verse):
''' Return a string when given a verse tuple '''
if (verse[2] == "ALL"):
return verse[0]+" "+verse[1]
else:
return verse[0]+" "+verse[1]+":"+verse[2]
def ellipsize(text, size=50):
''' Return a string of size 'size' with ellipses where truncated '''
if len(text) > size:
return text[:size-3]+"..."
else:
return text
def get_translations_from_sword():
''' Return a list of translations available from SWORD in the format [name,description] '''
raw_modules = commands.getoutput('diatheke -b system -k modulelist')
tr_list = []
# Need to divide up and only pull the bible translations and not other books and commentaries
temp = raw_modules.split(":\n")
for item in temp[1].split('\n')[:-1]:
temp2 = item.split(' : ')
tr_list.append(temp2)
return tr_list
def bylength(word1, word2):
"""
# write your own compare function:
# returns value > 0 of word1 longer then word2
# returns value = 0 if the same length
# returns value < 0 of word2 longer than word1
"""
return len(word2) - len(word1)
def stars(word):
''' Return *s for a given word '''
return "".join(["*" for letter in word])
class MVDB():
''' This defines the sqlite database '''
def __init__(self):
self.con = sqlite.connect('./.pymv_db')
self.cur = self.con.cursor()
flag = 0
# Create the tables if they don't already exist
try:
self.cur.execute("CREATE TABLE cats(refno INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, cat VARCHAR)")
self.cur.execute("CREATE TABLE verses(refno INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, catref INTEGER, book VARCHAR, chapter INTEGER, verse VARCHAR, verseref VARCHAR, versetext VARCHAR)")
self.cur.execute("CREATE TABLE planner(date DATE, verseref INTEGER, name VARCHAR)")
self.cur.execute("CREATE TABLE quiz(verseref INTEGER, word VARCHAR, option VARCHAR)")
self.con.commit()
flag = 1
except sqlite.OperationalError:
print "DB Already Exists"
if (flag == 1):
print "Created the DB. Adding categories and verses"
# Insert the "100 best verses to learn" into the verses table, and all the categories into the category table
verses = open('verses.csv', 'r').readlines()
for verseline in verses:
# Add the category
data = verseline[:-1].split('\t')
category = data[0]
print category
self.cur.execute("INSERT INTO cats(cat) VALUES(?)", (category,))
print "Added category "+category
# Get the reference
catref = self.cur.lastrowid
# Add each verse
for verse in data[1:]:
versetext = strip_text(commands.getoutput("diatheke -b "+current_translation+" -k "+verse))
verseref = unicode(verse)
book, chapter, verse = split_verse(verseref)
self.cur.execute("INSERT INTO verses(catref, book, chapter, verse, verseref, versetext) VALUES(?, ?, ?, ?, ?, ?)", (catref, book, chapter, verse, verseref, versetext))
print "\t Added verse "+verse
self.con.commit()
def cat_list(self, refs=True):
''' Return a list of categories from the database, either in the format {'ref':ref, 'title':title} or a straight list of strings'''
self.cur.execute("SELECT refno, cat FROM cats ORDER BY cat")
raw_cat = self.cur.fetchall()
cat_list = []
for item in raw_cat:
if (refs):
temp = {}
temp['ref'] = item[0]
temp['title'] = item[1]
cat_list.append(temp)
else:
cat_list.append(item[1])
return cat_list
def verse_list(self, catref=0):
''' Return a list of verses (reference and id) for a given category (or all if none given) '''
if (catref != 0):
self.cur.execute("SELECT verseref, refno FROM verses WHERE catref=?",(catref,))
else:
self.cur.execute("SELECT verseref, refno FROM verses")
raw_verses = self.cur.fetchall()
verse_list = []
for item in raw_verses:
verse_list.append((item[0],item[1]))
return verse_list
def get_verse(self, verseid=1):
''' Return a verse based on the DB ID, in the format [verseref, versetext] '''
self.cur.execute("SELECT verseref, versetext FROM verses WHERE refno="+str(verseid))
raw_text = self.cur.fetchall()
return [raw_text[0][0], raw_text[0][1]]
def get_random_verses(self, count=1, type="Verse", avoid = 0):
''' Get 'count' verses, and return as a list, either the text or the ref of the verse, avoiding the reference given '''
if type == 'Verse':
field = 'verseref'
else:
field = 'versetext'
verselist = []
self.cur.execute("SELECT COUNT(verseref) FROM verses")
max = self.cur.fetchall()[0][0]
for item in range(count):
id = random.randint(0, max)
flag = 0
while (flag==0):
try:
self.cur.execute("SELECT "+field+" FROM verses WHERE refno = "+str(id))
tempid = self.cur.fetchall()[0][0]
if (tempid != avoid):
verselist.append(tempid)
flag = 1
except:
id = random.randint(0, max)
return verselist
def delete_verse(self, verseid):
''' Delete a verse from the database '''
self.cur.execute("DELETE FROM verses WHERE refno=?", (verseid,))
# Not sure what to do with empty categories - maybe a tidy up function on exit?
self.con.commit()
def add_category(self, category):
''' Add a category to the database '''
self.cur.execute("INSERT INTO cats(cat) VALUES (?)", (category,))
self.con.commit()
def get_category_id(self, category):
''' Given a category title, find the reference '''
self.cur.execute("SELECT refno FROM cats WHERE cat=?", (category,))
results = self.cur.fetchall()
return results[0][0]
def add_verse(self, verseref, versetext, category):
''' Add a verse to the database, given the reference, text and category title '''
book, chapter, verse = split_verse(verseref)
catid = self.get_category_id(category)
self.cur.execute("INSERT INTO verses(catref, book, chapter, verse, verseref, versetext) VALUES (?,?,?,?,?,?)", (catid,book,chapter,verse,verseref,versetext))
self.con.commit()
def planner_list(self):
''' Return a list of the planners in the database '''
self.cur.execute("SELECT DISTINCT name FROM planner")
raw_results = self.cur.fetchall()
planners = []
for item in raw_results:
planners.append(item[0])
return planners
def verse_count(self, cat_list = []):
''' Count the number of verses in the given categories '''
if len(cat_list) == 0:
return 0
else:
verses = 0
sql = "SELECT count(refno) FROM verses WHERE catref IN ("
for cat in cat_list:
sql += str(cat)+","
# remove the trailing slash
sql = sql[:-1]+")"
self.cur.execute(sql)
return self.cur.fetchall()[0][0]
def add_verse_to_planner(self, planner, verse, date):
''' Add a verse into the planner table - all date handling is done in the client '''
self.cur.execute("INSERT INTO planner(date, verseref, name) VALUES (?,?,?)", (date, verse, planner))
self.con.commit()
def todays_verse(self, planner, date='now'):
''' Return the appropriate verse for today, given a particular planner, in the format [verseref, versetext, date] '''
self.cur.execute("SELECT a.verseref, a.versetext, b.date FROM verses AS a, planner AS b WHERE a.refno = (SELECT verseref FROM planner WHERE date <= date('"+date+"') AND name='"+planner+"' ORDER BY date DESC LIMIT 1) AND b.date = (SELECT date FROM planner WHERE date <= date('"+date+"') AND name = '"+planner+"' ORDER BY date DESC LIMIT 1)")
raw_result = self.cur.fetchall()
if len(raw_result) == 0:
return -1
else:
return [raw_result[0][0], raw_result[0][1], raw_result[0][2]]
def next_and_last(self, planner, date='now'):
''' Return the previous and next dates on the given planner, in the format [last,next] '''
self.cur.execute("SELECT date FROM planner WHERE date <= date('"+date+"') AND name = '"+planner+"' ORDER BY date DESC LIMIT 1")
raw_text = self.cur.fetchall()
if len(raw_text) == 0:
last = -1
else:
last = raw_text[0][0]
self.cur.execute("SELECT date FROM planner WHERE date > date('"+date+"') AND name = '"+planner+"' ORDER BY date ASC LIMIT 1")
raw_text = self.cur.fetchall()
if len(raw_text) == 0:
next = -1
else:
next = raw_text[0][0]
return [last, next]
def get_planner_dates(self, planner, month, year):
''' Return a list of all the dates in the planner for a given month '''
self.cur.execute("SELECT strftime('%d',date) FROM planner WHERE name='"+planner+"' and strftime('%m-%Y', date) = '"+month+"-"+year+"'")
raw_dates = self.cur.fetchall()
date_list = []
for item in raw_dates:
date_list.append(item[0])
# Last value in any planner is the dummy end_date, so we ignore in this list
return date_list[:-1]
def get_verselist_todate(self, planner, date='now', limit = 0):
''' Return a list of verse IDs from the planner, up to today '''
if (limit == 0):
self.cur.execute("SELECT verseref FROM planner WHERE name = '"+planner+"' AND date <= ('"+date+"') ORDER BY date DESC")
else:
self.cur.execute("SELECT verseref FROM planner WHERE name = '"+planner+"' AND date <= ('"+date+"') ORDER BY date DESC LIMIT "+str(limit))
raw_output = self.cur.fetchall()
output = []
for item in raw_output:
output.append(item[0])
return output
def get_cat_from_verse(self, verse):
''' Given a verse, get a category that contains that verse '''
# Note - currently this doesn't work properly with verses that are in multiple categories
self.cur.execute("SELECT cat FROM cats WHERE refno IN (SELECT catref FROM verses WHERE verseref = '"+verse+"')")
return self.cur.fetchall()[0][0]
class guiClient:
''' This is the main application '''
def __init__(self):
# Set the glade file
self.gladefile = 'whetstone.glade'''
self.wTree = gtk.glade.XML(self.gladefile)
# Create our event dictionary and connect it
dic = {"on_swordbutton_clicked" : self.OnSword,
"on_mainwindow_destroy" : self.OnQuit,
"on_addbutton_clicked" : self.OnAdd,
"on_deletebutton_clicked" : self.OnDelete,
"on_clearbutton_clicked" : self.OnClear,
"on_treeview1_row_activated" : self.OnLoadVerse,
"on_notebook1_switch_page" : self.OnChangePage,
"on_slider_value_changed" : self.OnSliderChange,
"on_homebutton_clicked" : self.OnSliderButton,
"on_lessbutton_clicked" : self.OnSliderButton,
"on_morebutton_clicked" : self.OnSliderButton,
"on_endbutton_clicked" : self.OnSliderButton,
"on_newplannerbutton_clicked" : self.OnPlannerWizard,
"on_treeview2_row_activated" : self.OnLoadPlanner,
"on_calendar1_day_selected" : self.OnPlannerDateChange,
"on_calendar1_month_changed" : self.OnPlannerMonthChange,
"on_about1_activate" : self.About,
"on_preferences1_activate" : self.Preferences,
"on_plannerquiz_clicked" : self.OnQuizLaunch,
"on_categoryquiz_clicked" : self.OnQuizLaunch,
"on_quizfirst_clicked" : self.OnQuizButton,
"on_quizback_clicked" : self.OnQuizButton,
"on_quiznext_clicked" : self.OnQuizButton,
"on_quizlast_clicked" : self.OnQuizButton,
"on_choicea_clicked" : self.OnQuizAnswer,
"on_choiceb_clicked" : self.OnQuizAnswer,
"on_choicec_clicked" : self.OnQuizAnswer,
"on_choiced_clicked" : self.OnQuizAnswer,
"on_quizfinish_clicked" : self.OnQuizFinish,
"on_resultsbutton_clicked" : self.OnResultsFinish,
"on_results_close" : self.OnResultsFinish,
"on_results_destroy" : self.OnResultsFinish,
}
self.wTree.signal_autoconnect(dic)
# Setup the DB
self.db = MVDB()
# *********************************************************************
# Add/Edit Verse Tab
# *********************************************************************
# Set up the treeview
self.catview = self.wTree.get_widget("treeview1")
self.catview.set_level_indentation(0)
self.treestore = gtk.TreeStore(str, str, str)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Category", renderer, markup = 0)
column.set_resizable(True)
self.catview.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Verse", renderer, markup = 0)
column.set_visible(False)
column.set_resizable(True)
self.catview.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("DB_ID", renderer, markup = 0)
column.set_visible(False)
self.catview.append_column(column)
# The dropdown
self.cat_dropdown = self.wTree.get_widget("categorydropdown")
# Store the verse database ID
self.verseid = 0
# *********************************************************************
# Learning Tab
# *********************************************************************
self.plannerview = self.wTree.get_widget("treeview2")
self.plannerstore = gtk.TreeStore(str)
self.learn_text = self.wTree.get_widget("learntext")
self.learn_ref = self.wTree.get_widget("learnref")
self.slider = self.wTree.get_widget("slider")
self.show_verse = self.wTree.get_widget("showverse")
self.show_cat = self.wTree.get_widget("showcat")
self.show_text = self.wTree.get_widget("showtext").get_buffer()
# Make sure the exander is closed
self.wTree.get_widget("plannerexpander").set_expanded(False)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Title", renderer, markup = 0)
column.set_resizable(True)
self.plannerview.append_column(column)
self.default_planner = "Test Weekly"
self.planner_date = "now"
# *********************************************************************
# Quiz Tab
# *********************************************************************
# Setup the planner dropdown
self.quizplannerdropdown = self.wTree.get_widget("quizplannerdropdown")
# Setup the quiz category box
self.quizview = self.wTree.get_widget("quizcatview")
self.quizstore = gtk.TreeStore(str)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Title", renderer, markup = 0)
column.set_resizable(True)
self.quizview.append_column(column)
# *********************************************************************
# Quiz Results Window
# *********************************************************************
self.quizresultsview = self.wTree.get_widget("quizresultsview")
self.quizresultsstore = gtk.TreeStore(str, str, str)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Question", renderer, markup = 0)
column.set_resizable(True)
column.set_max_width(200)
self.quizresultsview.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Your Answer", renderer, markup = 1)
column.set_resizable(True)
column.set_visible(True)
self.quizresultsview.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Correct Answer", renderer, markup = 2)
column.set_resizable(True)
column.set_visible(True)
self.quizresultsview.append_column(column)
# Fill the category lists for both the Edit and Quiz tabs
self.OnCategoryRefresh()
# Fill the planner lists for both Learning and Quiz tabs
self.OnPlannersRefresh()
# Show the window
self.window = self.wTree.get_widget("mainwindow")
self.window.show()
def OnSword(self, widget = None):
''' Use diatheke to grab the verse text relating to the reference entered '''
# Get the relevant widgets
verse = self.wTree.get_widget("ref")
text = self.wTree.get_widget("versetext")
textbuffer = text.get_buffer()
verseref = verse.get_text()
print "Searching for "+verseref
versetext = commands.getoutput("diatheke -b "+current_translation+" -k "+verseref)
textbuffer.set_text(strip_text(versetext))
def OnCategoryRefresh(self, widget=None):
# Clear the tree
self.treestore.clear()
self.quizstore.clear()
for centre in self.db.cat_list():
iter = self.treestore.insert_before(None, None)
iter2 = self.quizstore.insert_before(None, None)
self.treestore.set_value(iter, 0, centre['title'])
self.quizstore.set_value(iter2, 0, centre['title'])
for item in self.db.verse_list(centre['ref']):
iter2 = self.treestore.insert_before(iter, None)
self.treestore.set_value(iter2, 0, item[0])
self.treestore.set_value(iter2, 1, item[1])
# Add to the dropdown box too
self.cat_dropdown.append_text(centre['title'])
self.cat_dropdown.remove_text(0)
self.catview.set_model(self.treestore)
self.catview.show()
self.quizview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.quizview.set_model(self.quizstore)
self.quizview.show()
def OnPlannersRefresh(self, widget=None):
# Setup the tree view
# Clear the tree
self.plannerstore.clear()
# Clear the dropdown (need to work on this)
#self.quizplannerdropdown.
for planner in self.db.planner_list():
iter = self.plannerstore.insert_before(None, None)
self.plannerstore.set_value(iter, 0, planner)
# Add to dropdown too
self.quizplannerdropdown.append_text(planner)
self.plannerview.set_model(self.plannerstore)
self.plannerview.show()
# Remove the 0 item from the dropdown
self.quizplannerdropdown.remove_text(0)
def OnAdd(self, widget=None):
''' Add or update a verse in the database '''
if (self.verseid == 0):
print "Adding verse"
# Gather the data
verse = self.wTree.get_widget("ref")
text = self.wTree.get_widget("versetext")
textbuffer = text.get_buffer()
category = self.cat_dropdown.get_active_text()
# Add to the database
# Check the category exists - if not then query creation
if not (category in self.db.cat_list(False)):
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_YES_NO, "The category '"+category+"' does not exist. Would you like to create it?")
resp = msg.run()
if resp == gtk.RESPONSE_YES:
msg.destroy()
# Add category
self.db.add_category(category)
else:
msg.destroy()
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, "Cancelled adding verse")
resp = msg.run()
msg.destroy()
return
# Add the verse
self.db.add_verse(verse.get_text(),textbuffer.get_text(textbuffer.get_start_iter(), textbuffer.get_end_iter()),category)
# Refresh the category list
self.OnCategoryRefresh()
# Confirm to the user
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, "Added verse")
resp = msg.run()
msg.destroy()
def OnDelete(self, widget=None):
''' Delete a verse from the database '''
print "Deleting verse"
if self.verseid == 0:
print "Verse not selected"
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, "You need to select a verse first.")
resp = msg.run()
msg.destroy()
else:
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_YES_NO, "Do you really wish to delete this memory verse?")
resp = msg.run()
if resp == gtk.RESPONSE_YES:
self.db.delete_verse(self.verseid)
print "Deleted verse "+self.verseid
self.OnCategoryRefresh()
self.OnClear()
msg.destroy()
def OnClear(self, widget=None):
''' Clear the form '''
print "Clearing..."
# Get the widgets
verse = self.wTree.get_widget("ref")
text = self.wTree.get_widget("versetext")
textbuffer = text.get_buffer()
# Clear them
textbuffer.set_text("")
verse.set_text("")
# Clear the dropdown
# This is a bit of a kludge as -1 doesn't work with ComboBoxEntry
self.cat_dropdown.insert_text(0, "")
self.cat_dropdown.set_active(0)
self.cat_dropdown.remove_text(0)
# Clear the verseid
self.verseid = 0
# Set the add button back to add (in case we've been updating)
addbutton = self.wTree.get_widget("addbutton")
addbutton.set_label("gtk-add")
def OnLoadVerse(self, widget, path, column):
''' Load a verse from the category tree into the view '''
print "Loading verse"
if (self.treestore.iter_parent(self.treestore.get_iter(path))):
# If we have a parent (ie it's a verse, not a category)
verseref = self.treestore.get_value(self.treestore.get_iter(path), 0)
self.verseid = self.treestore.get_value(self.treestore.get_iter(path), 1)
print "verse id = "+self.verseid
# Load the verse into the textbox, and hit the sword button
versebox = self.wTree.get_widget("ref")
versebox.set_text(verseref)
button = self.wTree.get_widget("swordbutton")
button.clicked()
# Put the category in the dropdown
category = self.treestore.get_value(self.treestore.iter_parent(self.treestore.get_iter(path)), 0)
# This is kludgy and needs to be better
counter = 0
for loop in range(len(self.db.cat_list())):
self.cat_dropdown.set_active(loop)
if (self.cat_dropdown.get_active_text() == category):
counter = loop
self.cat_dropdown.set_active(counter)
# Set add button to update
addbutton = self.wTree.get_widget("addbutton")
addbutton.set_label("Update")
else:
# We have no parent, thus we're a category
print "Just a category"
def OnChangePage(self, widget, page, pagenum):
''' When the user switches to another page '''
print "Page changed to page number "+str(pagenum)
if (pagenum == 1 and len(self.db.planner_list()) == 0):
print "No planners - offer creation wizard"
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, "There are no learning planners. Would you like me to create one now?")
resp = msg.run()
msg.destroy()
if resp == gtk.RESPONSE_YES:
self.OnPlannerWizard()
elif (pagenum == 1):
# Learn page, but we have planners
self.wTree.get_widget("plannerexpander").set_expanded(False)
self.OnLoadPlanner(planner="Test Weekly")
def OnSliderChange(self, widget = None):
''' Adjust the verse output to match the chosen level '''
new_level = self.slider.get_value()
print "Slider changed to "+str(new_level)
### DAMN - need to fix this and the handling of the question as a whole to incorporate the ability to load verses ###
todays_verse = self.db.todays_verse(self.default_planner, self.planner_date)
question = todays_verse[1]
# Work out how many words (and which) to hide
num_words = len(self.word_list)
hide = int((num_words/self.levels)*new_level)
print "Hiding "+str(hide)+" words"
for loop in range(hide):
question = re.sub("\W"+self.word_list[loop]+"\W", " "+stars(self.word_list[loop])+" ", question)
if (new_level) > (self.levels/2):
ref = "**** ** : **"
else:
ref = todays_verse[0]
# Markup the verse and display
font = pango.FontDescription("Arial 18")
self.learn_text.modify_font(font)
font = pango.FontDescription("Arial 14")
self.learn_ref.modify_font(font)
self.learn_text.set_markup(question)
self.learn_ref.set_markup("<i>"+ref+"</i>")
# Load up the editor
self.show_verse.set_text(todays_verse[0])
self.show_cat.set_text(self.db.get_cat_from_verse(todays_verse[0]))
self.show_text.set_text(todays_verse[1])
def OnSliderButton(self, widget=None):
''' Adjust the slider with buttons instead '''
# Work out which button
change = [0, self.slider.get_value()-1, self.slider.get_value()+1, self.levels]
options = ['homebutton', 'lessbutton', 'morebutton', 'endbutton']
change_val = change[options.index(widget.get_name())]
# Adjust the slider accordingly
self.slider.set_value(change_val)
def OnPlannerWizard(self, widget=None):
''' Show the planner wizard '''
print "Planner creation"
plannertree = gtk.glade.XML(self.gladefile, "plannerwizard")
dic = {
'on_planner_cancel_clicked' : self.OnPlannerCancel,
'on_planner_ok_clicked' : self.OnPlannerOK,
}
plannertree.signal_autoconnect(dic)
self.wizard = plannertree.get_widget("plannerwizard")
self.freqdropdown = plannertree.get_widget("freqdropdown")
self.calendar = plannertree.get_widget("calendar2")
self.plannername = plannertree.get_widget("entry1")
# Setup the treeview
self.plannercatview = plannertree.get_widget("catview")
self.plannercatview.set_level_indentation(0)
self.plannercatstore = gtk.TreeStore(str, str)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Category", renderer, markup = 0)
column.set_resizable(True)
self.plannercatview.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("DBID", renderer, markup = 0)
column.set_resizable(True)
column.set_visible(False)
self.plannercatview.append_column(column)
# Clear the tree
self.plannercatstore.clear()
for category in self.db.cat_list():
iter = self.plannercatstore.insert_before(None, None)
self.plannercatstore.set_value(iter, 0, category['title'])
self.plannercatstore.set_value(iter, 1, category['ref'])
self.plannercatview.set_model(self.plannercatstore)
self.plannercatview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.plannercatview.show()
self.wizard.show()
def OnPlannerCancel(self, widget=None):
''' Cancel the planner wizard '''
self.wizard.destroy()
def OnPlannerOK(self, widget=None):
''' Confirm the planner creation '''
selection = self.plannercatview.get_selection()
# Check that they've selected some categories
if selection.count_selected_rows() == 0:
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, "You need to select some categories first.")
resp = msg.run()
msg.destroy()
self.wizard.present()
return
else:
selected_cats = self.plannercatview.get_selection().get_selected_rows()[1]
# Count the number of verses in these categories
# Convert the paths into DBIDs
selected_cat_list = []
for cat in selected_cats:
tempid = self.plannercatstore.get_value(self.plannercatstore.get_iter(cat), 1)
selected_cat_list.append(tempid)
# Check against database
versecount = self.db.verse_count(selected_cat_list)
catcount = selection.count_selected_rows()
# Get the frequency and calculate the number of days
frequency = self.freqdropdown.get_active()
# Note - Daily, Bi-Weekly, Weekly, Fortnightly, Monthly
multiplier = [1, 3, 7, 14, 30]
duration = versecount * multiplier[frequency]
# Create a more easily readable duration
if (duration < 60):
dur_text = str(duration)+" days."
elif (duration < 140):
dur_text = str(duration/7)+" weeks."
else:
dur_text = str(duration/30)+" months."
# Confirm with the user
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_YES_NO, "You have selected "+str(catcount)+" categories containing "+str(versecount)+" memory verses, taking approximately "+dur_text+"\nDo you wish to continue?")
resp = msg.run()
msg.destroy()
if resp == gtk.RESPONSE_NO:
self.wizard.present()
return
else:
print "continuing"
# Insert appropriate dates and verses into the database
# Bi-Weekly should be start_day and start_day + 3 every week (ie Monday and Thursday)
# Weekly should be same day every week
# Fortnightly should be the same day every fornight
# Monthly should be the same numerical day each month
# Get the start date
year, month, day = self.calendar.get_date()
month += 1
start_date = datetime.date.today()
start_date = start_date.replace(year, month, day)
day_counter = multiplier[frequency]
# Handle daily, bi-weekly weekly and fortnightly here
if frequency in [0, 1, 2, 3]:
increment = multiplier[frequency]
tempdate = start_date
for category in selected_cat_list:
for verse in self.db.verse_list(category):
self.db.add_verse_to_planner(self.plannername.get_text(), verse[1], tempdate.isoformat())
tempdate = start_date + datetime.timedelta(days=day_counter)
day_counter += increment
# Handle bi-weekly
if frequency == 1:
if increment == 3:
increment = 4
else:
increment = 3
# Add in a dummy end date
self.db.add_verse_to_planner(self.plannername.get_text(), "END", tempdate.isoformat())
else:
# Monthly
tempdate = start_date
tempmonth = month
tempyear = year
for category in selected_cat_list:
for verse in self.db.verse_list(category):
self.db.add_verse_to_planner(self.plannername.get_text(), verse[1], tempdate.isoformat())
tempmonth += 1
if tempmonth == 13:
tempmonth = 1
tempyear += 1
tempdate = tempdate.replace(tempyear, tempmonth, day)
# Add in a dummy end date
self.db.add_verse_to_planner(self.plannername.get_text(), "END", tempdate.isoformat())
self.OnPlannersRefresh()
self.wizard.destroy()
def OnLoadPlanner(self, widget=None, path=None, column=None, planner="Test Weekly"):
''' Load a planner into the learn tab '''
if (widget != None):
# Get from the selection widget
self.default_planner = self.plannerstore.get_value(self.plannerstore.get_iter(path), 0)
#else:
#self.default_planner = planner
print "Using planner "+self.default_planner
todays_verse = self.db.todays_verse(self.default_planner, self.planner_date)
print todays_verse
if (todays_verse != -1):
# **************************************
# Calculate any 'distortions' required
# **************************************
# Work out the dates
last, next = self.db.next_and_last(self.default_planner, self.planner_date)
if (-1 not in [last,next]):
lastdate = datetime.datetime.strptime(last, '%Y-%m-%d')
nextdate = datetime.datetime.strptime(next, '%Y-%m-%d')
days_in_cycle = nextdate-lastdate
days_in_cycle = days_in_cycle.days
if (self.planner_date == 'now'):
days_complete = datetime.datetime.today()-lastdate
else:
days_complete = datetime.datetime.strptime(self.planner_date, '%Y-%m-%d')-lastdate
days_complete = days_complete.days
if (days_in_cycle < 10):
self.levels = days_in_cycle
self.days_per_level = 1
else:
self.levels = 10
self.days_per_level = days_in_cycle/10
print "Setting levels to "+str(self.levels)+" and days per level to "+str(self.days_per_level)
# Strip out punctuation from the verse and generate wordlist
temp = todays_verse[1]
question = todays_verse[1]
for punc in string.punctuation:
temp = temp.replace(punc, " ")
self.word_list = temp.split()
# Sort by length
self.word_list.sort(cmp=bylength)
# Work out how many words (and which) to hide
num_words = len(self.word_list)
print "Num words = "+str(num_words)
print "days complete = "+str(days_complete)
hide = int((num_words/self.levels)*(days_complete/self.days_per_level))
print "Hiding "+str(hide)+" words"
for loop in range(hide):
question = re.sub("\W"+self.word_list[loop]+"\W", " "+stars(self.word_list[loop])+" ", question)
if (self.levels - (self.days_per_level * days_complete)) <= (self.levels / 2) :
ref = "**** ** : **"
else:
ref = todays_verse[0]
# Markup the verse and display
font = pango.FontDescription("Arial 18")
self.learn_text.modify_font(font)
font = pango.FontDescription("Arial 14")
self.learn_ref.modify_font(font)
self.learn_text.set_markup(question)
self.learn_ref.set_markup("<i>"+ref+"</i>")
# Adjust the slider
self.slider.set_range(0, self.levels)
self.slider.set_value(days_complete / self.days_per_level)
# Load up the editor
self.show_verse.set_text(todays_verse[0])
self.show_cat.set_text(self.db.get_cat_from_verse(todays_verse[0]))
self.show_text.set_text(todays_verse[1])
# Mark the calendar
calendar = self.wTree.get_widget("calendar1")
calendar.clear_marks()
year, month, day = calendar.get_date()
date_list = self.db.get_planner_dates(self.default_planner, string.zfill(str(month+1),2), str(year))
for day in date_list:
calendar.mark_day(int(day))
def OnPlannerDateChange(self, widget=None):
''' Date is changed - reload the planner '''
year, month, date = widget.get_date()
mydate = str(year)+"-"+string.zfill(str(month+1),2)+"-"+string.zfill(str(date),2)
print "Selected "+mydate
self.planner_date = mydate
self.OnLoadPlanner()
def OnPlannerMonthChange(self, widget=None):
''' Month has changed - update the marks on the calendar '''
widget.clear_marks()
year, month, date = widget.get_date()
date_list = self.db.get_planner_dates(self.default_planner, string.zfill(str(month+1), 2), str(year))
for day in date_list:
widget.mark_day(int(day))
def Preferences(self, widget=None):
''' Display the preferences dialog, and handle output '''
global current_translation
global current_planner
prefTree = gtk.glade.XML(self.gladefile, "prefs")
dlg = prefTree.get_widget("prefs")
pref_translation = prefTree.get_widget("translation_dropdown")
pref_planner = prefTree.get_widget("planner_dropdown")
translations = get_translations_from_sword()
for translation in translations:
pref_translation.append_text(translation[0]+" : "+translation[1])
pref_translation.remove_text(0)
for loop in range(len(translations)):
if (translations[loop][0] == current_translation):
pref_translation.set_active(loop)
planner_list = self.db.planner_list()
for tempplanner in planner_list:
pref_planner.append_text(tempplanner)
pref_planner.remove_text(0)
for loop in range(len(planner_list)):
if (planner_list[loop] == current_planner):
pref_planner.set_active(loop)
result = dlg.run()
dlg.destroy()
if (result == gtk.RESPONSE_OK):
print "saving preferences"
translation = pref_translation.get_active_text()
translation = translation.split(" : ")[0]
newplanner = pref_planner.get_active_text()
config.set('Section1','translation',translation)
config.set('Section1','default_planner',newplanner)
with open(os.path.join(config_path, '.whetstone'), 'wb') as configfile:
config.write(configfile)
current_translation = translation
current_planner = newplanner
def OnQuizLaunch(self, widget=None):
''' Analyse the quiz tab fields and launch the appropriate quiz '''
quiz_type = self.wTree.get_widget("quiztypedropdown").get_active()
if (quiz_type == -1):
# Nothing selected
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, "Please select a quiz type before continuing.")
msg.run()
msg.destroy()
return
quiz_types = ['Verse', 'Ref', 'Words', 'Whole']
quiz_type = quiz_types[quiz_type]
if (widget.get_name() == "categoryquiz"):
# Get a list of verse IDs based on the selected categories
verselist = []
selected_cats = self.quizview.get_selection().get_selected_rows()[1]
if not (selected_cats):
# Need to select some categories
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, "Please select 1 or more categories before continuing.")
msg.run()
msg.destroy()
return
for cat in selected_cats:
tempcat = self.quizstore.get_value(self.quizstore.get_iter(cat), 0)
tempid = self.db.get_category_id(tempcat)
tempverselist = self.db.verse_list(catref=tempid)
for verse in tempverselist:
verselist.append(verse[1])
if (widget.get_name() == "plannerquiz"):
# Get the name of the planner and the quiz limit
planner = self.wTree.get_widget("quizplannerdropdown").get_active_text()
limit = self.wTree.get_widget("quizplannerlimit").get_active()
if (planner == -1 or limit == -1):
# Need to select something from both dropdowns
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, "Please select a planner and verses before continuing.")
msg.run()
msg.destroy()
return
limit = limit*4
# Get a list of verse IDs from the planner
verselist = self.db.get_verselist_todate(planner=planner, limit=limit)
if (quiz_type in ['Verse', 'Ref', 'Words']):
self.OnMultiQuiz(type=quiz_type, verselist = verselist)
def OnMultiQuiz(self, widget=None, type='Verse', verselist=[1]):
''' Display a multiple-choice quiz '''
# Verselist should be a list of verse DB IDs
# Generate list of dictionaries of questions, format:
# [{question="For God so Loved the world", a="Revelation 21:21", b="Matthew 4:4", c="John 3:16", d="Genesis 1:1", answer="c"}]
self.quiz = []
choices = ['a','b','c','d']
for verse in verselist:
temp = self.db.get_verse(verse)
if (type == "Verse"):
tempq = temp[1]
tempa = [temp[0]]
answer = temp[0]
elif (type == "Ref"):
tempq = temp[0]
tempa = [temp[1]]
answer = temp[1]
wrongs = self.db.get_random_verses(3, type, verse)
for item in wrongs:
tempa.append(item)
# randomise the list
tempa = random.sample(tempa, 4)
self.quiz.append({'question':tempq, 'a':tempa[0], 'b':tempa[1], 'c':tempa[2], 'd':tempa[3], 'answer':choices[tempa.index(answer)]})
# randomise the quiz questions
self.quiz = random.sample(self.quiz, len(self.quiz))
# Store the users answers
self.quizanswers = []
for item in self.quiz:
self.quizanswers.append('')
# Launch the quiz window
self.quizwindow = self.wTree.get_widget("multiquiz")
self.quizwindow.connect("delete-event", self.OnQuizQuit)
self.quizpos = 0
self.OnQuizButton()
self.quizwindow.show()
def ShowQuizPage(self, widget = None):
''' Show the relevant page of a quiz '''
self.quizquestion = self.wTree.get_widget("question")
self.quiza = self.wTree.get_widget("choicea")
self.quizb = self.wTree.get_widget("choiceb")
self.quizc = self.wTree.get_widget("choicec")
self.quizd = self.wTree.get_widget("choiced")
# Load in the question
font = pango.FontDescription("Arial 18")
self.quizquestion.modify_font(font)
self.quizquestion.set_markup(self.quiz[self.quizpos]['question'])
# Load in the answers
self.quiza.set_label(ellipsize(self.quiz[self.quizpos]['a']))
self.quizb.set_label(ellipsize(self.quiz[self.quizpos]['b']))
self.quizc.set_label(ellipsize(self.quiz[self.quizpos]['c']))
self.quizd.set_label(ellipsize(self.quiz[self.quizpos]['d']))
#self.quiza.set_line_wrap(True)
#self.quizb.set_line_wrap(True)
#self.quizc.set_line_wrap(True)
#self.quizd.set_line_wrap(True)
# Adjust the statusbar
status = "Question "+str(self.quizpos+1)+" out of "+str(len(self.quiz))
self.wTree.get_widget("quizstatus").push(-1, status)
# Activate the correct answer from previous choices
if (self.quizanswers[self.quizpos] == "a"):
self.quiza.set_active(True)
elif (self.quizanswers[self.quizpos] == "b"):
self.quizb.set_active(True)
elif (self.quizanswers[self.quizpos] == "c"):
self.quizc.set_active(True)
elif (self.quizanswers[self.quizpos] == "d"):
self.quizd.set_active(True)
else:
self.quiza.set_active(False)
self.quizb.set_active(False)
self.quizc.set_active(False)
self.quizd.set_active(False)
def OnQuizButton(self, widget=None):
''' Move to the appropriate page in the quiz '''
if (widget == None):
self.quizpos = 0
elif (widget.get_name() == 'quizfirst'):
self.quizpos = 0
elif (widget.get_name() == 'quizback'):
self.quizpos -= 1
if (self.quizpos == -1):
self.quizpos = 0
elif (widget.get_name() == 'quiznext'):
self.quizpos += 1
if (self.quizpos > len(self.quiz)-1 ):
self.quizpos = len(self.quiz)-1
else:
self.quizpos = len(self.quiz)-1
# Update the buttons
if (self.quizpos == 0):
# Disable First and Back buttons, enable Last and Next
self.wTree.get_widget("quizfirst").set_sensitive(False)
self.wTree.get_widget("quizback").set_sensitive(False)
self.wTree.get_widget("quiznext").set_sensitive(True)
self.wTree.get_widget("quizlast").set_sensitive(True)
elif (self.quizpos > 0) and (self.quizpos < len(self.quiz)-1):
# Enable everything
self.wTree.get_widget("quizfirst").set_sensitive(True)
self.wTree.get_widget("quizback").set_sensitive(True)
self.wTree.get_widget("quiznext").set_sensitive(True)
self.wTree.get_widget("quizlast").set_sensitive(True)
elif (self.quizpos == len(self.quiz)-1):
# Disable Next and Last
self.wTree.get_widget("quizfirst").set_sensitive(True)
self.wTree.get_widget("quizback").set_sensitive(True)
self.wTree.get_widget("quiznext").set_sensitive(False)
self.wTree.get_widget("quizlast").set_sensitive(False)
self.ShowQuizPage()
def OnQuizAnswer(self, widget):
''' Store the answer selected '''
self.quizanswers[self.quizpos] = widget.get_name()[-1]
print self.quizanswers
def OnQuizFinish(self, widget):
''' Output the results for the user '''
# Check that they've filled in answers for everything
if ("" in self.quizanswers):
msg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, "Please choose an answer for every question")
msg.run()
msg.destroy()
return
self.quizwindow.hide()
# Calculate the number of correct answers
correct = 0
for answer in range(len(self.quizanswers)):
if (self.quizanswers[answer] == self.quiz[answer]['answer']):
correct += 1
# Setup the quiz results dialog
result = "You got "+str(correct)+" correct out of a possible "+str(len(self.quizanswers))
self.wTree.get_widget("score").set_label(result)
# Populate the treeview
# Clear the tree
self.quizresultsstore.clear()
for question in range(len(self.quiz)):
iter = self.quizresultsstore.insert_before(None, None)
self.quizresultsstore.set_value(iter, 0, self.quiz[question]['question'])
if (self.quiz[question]['answer'] == self.quizanswers[question]):
mycolour = "green"
else:
mycolour = "red"
self.quizresultsstore.set_value(iter, 1, "<span foreground='"+mycolour+"'>"+self.quizanswers[question] + " : "+self.quiz[question][self.quizanswers[question]]+"</span>")
self.quizresultsstore.set_value(iter, 2, "<span foreground='green'>"+self.quiz[question]['answer'] + " : "+self.quiz[question][self.quiz[question]['answer']]+"</span>")
self.quizresultsview.set_model(self.quizresultsstore)
self.quizresultsview.show()
quizresultsdialog = self.wTree.get_widget('results')
quizresultsdialog.connect("delete-event", self.OnResultsFinish)
quizresultsdialog.show()
def OnQuizQuit(self, widget=None, event=None):
''' Hide the window if destroyed, rather than killing it '''
widget.hide()
return True
def OnResultsFinish(self, widget=None, event=None):
# Hide the results window
window = self.wTree.get_widget("results")
window.hide()
return True
def About(self, widget=None):
''' Display the About dialog '''
aboutTree = gtk.glade.XML(self.gladefile, "aboutdialog1")
dlg = aboutTree.get_widget("aboutdialog1")
dlg.run()
dlg.destroy()
def OnQuit(self, widget=None):
gtk.main_quit()
if __name__ == "__main__":
app = guiClient()
gtk.main()
|
dtroyer/osc-cloud | refs/heads/master | osccloud/tests/test_osccloud.py | 1 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_oscloud
----------------------------------
Tests for `osccloud` module.
"""
from osccloud.tests import base
class TestOSCCloud(base.TestCase):
def test_something(self):
pass
|
kk9599/django-cms | refs/heads/develop | cms/test_utils/project/sampleapp/urls_example.py | 58 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from ..placeholderapp import views
urlpatterns = [
url(r'^example/$', views.example_view, name="example"),
]
|
SickGear/SickGear | refs/heads/master | lib/apprise/plugins/NotifyGrowl/__init__.py | 2 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <[email protected]>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .gntp import notifier
from .gntp import errors
from ..NotifyBase import NotifyBase
from ...URLBase import PrivacyMode
from ...common import NotifyImageSize
from ...common import NotifyType
from ...utils import parse_bool
from ...AppriseLocale import gettext_lazy as _
# Priorities
class GrowlPriority(object):
LOW = -2
MODERATE = -1
NORMAL = 0
HIGH = 1
EMERGENCY = 2
GROWL_PRIORITIES = (
GrowlPriority.LOW,
GrowlPriority.MODERATE,
GrowlPriority.NORMAL,
GrowlPriority.HIGH,
GrowlPriority.EMERGENCY,
)
GROWL_NOTIFICATION_TYPE = "New Messages"
class NotifyGrowl(NotifyBase):
"""
A wrapper to Growl Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Growl'
# The services URL
service_url = 'http://growl.info/'
# The default protocol
protocol = 'growl'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_growl'
# Allows the user to specify the NotifyImageSize object
image_size = NotifyImageSize.XY_72
# Disable throttle rate for Growl requests since they are normally
# local anyway
request_rate_per_sec = 0
# A title can not be used for Growl Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Limit results to just the first 10 line otherwise there is just to much
# content to display
body_max_line_count = 2
# Default Growl Port
default_port = 23053
# Define object templates
# Define object templates
templates = (
'{schema}://{host}',
'{schema}://{host}:{port}',
'{schema}://{password}@{host}',
'{schema}://{password}@{host}:{port}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'host': {
'name': _('Hostname'),
'type': 'string',
'required': True,
},
'port': {
'name': _('Port'),
'type': 'int',
'min': 1,
'max': 65535,
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'priority': {
'name': _('Priority'),
'type': 'choice:int',
'values': GROWL_PRIORITIES,
'default': GrowlPriority.NORMAL,
},
'version': {
'name': _('Version'),
'type': 'choice:int',
'values': (1, 2),
'default': 2,
},
'image': {
'name': _('Include Image'),
'type': 'bool',
'default': True,
'map_to': 'include_image',
},
})
def __init__(self, priority=None, version=2, include_image=True, **kwargs):
"""
Initialize Growl Object
"""
super(NotifyGrowl, self).__init__(**kwargs)
if not self.port:
self.port = self.default_port
# The Priority of the message
if priority not in GROWL_PRIORITIES:
self.priority = GrowlPriority.NORMAL
else:
self.priority = priority
# Always default the sticky flag to False
self.sticky = False
# Store Version
self.version = version
payload = {
'applicationName': self.app_id,
'notifications': [GROWL_NOTIFICATION_TYPE, ],
'defaultNotifications': [GROWL_NOTIFICATION_TYPE, ],
'hostname': self.host,
'port': self.port,
}
if self.password is not None:
payload['password'] = self.password
self.logger.debug('Growl Registration Payload: %s' % str(payload))
self.growl = notifier.GrowlNotifier(**payload)
try:
self.growl.register()
self.logger.debug(
'Growl server registration completed successfully.'
)
except errors.NetworkError:
msg = 'A network error occured sending Growl ' \
'notification to {}.'.format(self.host)
self.logger.warning(msg)
raise TypeError(msg)
except errors.AuthError:
msg = 'An authentication error occured sending Growl ' \
'notification to {}.'.format(self.host)
self.logger.warning(msg)
raise TypeError(msg)
except errors.UnsupportedError:
msg = 'An unsupported error occured sending Growl ' \
'notification to {}.'.format(self.host)
self.logger.warning(msg)
raise TypeError(msg)
# Track whether or not we want to send an image with our notification
# or not.
self.include_image = include_image
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Growl Notification
"""
icon = None
if self.version >= 2:
# URL Based
icon = None if not self.include_image \
else self.image_url(notify_type)
else:
# Raw
icon = None if not self.include_image \
else self.image_raw(notify_type)
payload = {
'noteType': GROWL_NOTIFICATION_TYPE,
'title': title,
'description': body,
'icon': icon is not None,
'sticky': False,
'priority': self.priority,
}
self.logger.debug('Growl Payload: %s' % str(payload))
# Update icon of payload to be raw data; this is intentionally done
# here after we spit the debug message above (so we don't try to
# print the binary contents of an image
payload['icon'] = icon
# Always call throttle before any remote server i/o is made
self.throttle()
try:
response = self.growl.notify(**payload)
if not isinstance(response, bool):
self.logger.warning(
'Growl notification failed to send with response: %s' %
str(response),
)
else:
self.logger.info('Sent Growl notification.')
except errors.BaseError as e:
# Since Growl servers listen for UDP broadcasts, it's possible
# that you will never get to this part of the code since there is
# no acknowledgement as to whether it accepted what was sent to it
# or not.
# However, if the host/server is unavailable, you will get to this
# point of the code.
self.logger.warning(
'A Connection error occured sending Growl '
'notification to %s.' % self.host)
self.logger.debug('Growl Exception: %s' % str(e))
# Return; we're done
return False
return True
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
_map = {
GrowlPriority.LOW: 'low',
GrowlPriority.MODERATE: 'moderate',
GrowlPriority.NORMAL: 'normal',
GrowlPriority.HIGH: 'high',
GrowlPriority.EMERGENCY: 'emergency',
}
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'image': 'yes' if self.include_image else 'no',
'priority':
_map[GrowlPriority.NORMAL] if self.priority not in _map
else _map[self.priority],
'version': self.version,
'verify': 'yes' if self.verify_certificate else 'no',
}
auth = ''
if self.user:
# The growl password is stored in the user field
auth = '{password}@'.format(
password=self.pprint(
self.user, privacy, mode=PrivacyMode.Secret, safe=''),
)
return '{schema}://{auth}{hostname}{port}/?{args}'.format(
schema=self.secure_protocol if self.secure else self.protocol,
auth=auth,
hostname=NotifyGrowl.quote(self.host, safe=''),
port='' if self.port is None or self.port == self.default_port
else ':{}'.format(self.port),
args=NotifyGrowl.urlencode(args),
)
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
version = None
if 'version' in results['qsd'] and len(results['qsd']['version']):
# Allow the user to specify the version of the protocol to use.
try:
version = int(
NotifyGrowl.unquote(
results['qsd']['version']).strip().split('.')[0])
except (AttributeError, IndexError, TypeError, ValueError):
NotifyGrowl.logger.warning(
'An invalid Growl version of "%s" was specified and will '
'be ignored.' % results['qsd']['version']
)
pass
if 'priority' in results['qsd'] and len(results['qsd']['priority']):
_map = {
'l': GrowlPriority.LOW,
'm': GrowlPriority.MODERATE,
'n': GrowlPriority.NORMAL,
'h': GrowlPriority.HIGH,
'e': GrowlPriority.EMERGENCY,
}
try:
results['priority'] = \
_map[results['qsd']['priority'][0].lower()]
except KeyError:
# No priority was set
pass
# Because of the URL formatting, the password is actually where the
# username field is. For this reason, we just preform this small hack
# to make it (the URL) conform correctly. The following strips out the
# existing password entry (if exists) so that it can be swapped with
# the new one we specify.
if results.get('password', None) is None:
results['password'] = results.get('user', None)
# Include images with our message
results['include_image'] = \
parse_bool(results['qsd'].get('image', True))
# Set our version
if version:
results['version'] = version
return results
|
kodi-czsk/plugin.video.pohadkar.cz | refs/heads/master | default.py | 1 | # -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2013 Libor Zoubek
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import os
import xbmc,xbmcaddon,xbmcgui,xbmcplugin,util,xbmcprovider,xbmcutil
from provider import ResolveException
__scriptid__ = 'plugin.video.pohadkar.cz'
__scriptname__ = 'Pohádkář.cz'
__addon__ = xbmcaddon.Addon(id=__scriptid__)
__language__ = __addon__.getLocalizedString
sys.path.append( os.path.join ( __addon__.getAddonInfo('path'), 'resources','lib') )
import pohadkar
settings = {'downloads':__addon__.getSetting('downloads'),'quality':__addon__.getSetting('quality')}
class PohadkarContentProvider(xbmcprovider.XBMCMultiResolverContentProvider):
def play(self, item):
stream = self.resolve(item['url'])
print type(stream)
if type(stream) == type([]):
# resolved to mutliple files, we'll feed playlist and play the first one
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
for video in stream:
li = xbmcgui.ListItem(label=video['title'], path=video['url'], iconImage='DefaultVideo.png')
playlist.add(video['url'], li)
stream = stream[0]
if stream:
xbmcutil.reportUsage(self.addon_id, self.addon_id + '/play')
if 'headers' in stream.keys():
for header in stream['headers']:
stream['url'] += '|%s=%s' % (header, stream['headers'][header])
print 'Sending %s to player' % stream['url']
li = xbmcgui.ListItem(path=stream['url'], iconImage='DefaulVideo.png')
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, li)
xbmcutil.load_subtitles(stream['subs'])
def resolve(self, url):
def select_cb(resolved):
stream_parts = []
stream_parts_dict = {}
for stream in resolved:
if stream['surl'] not in stream_parts_dict:
stream_parts_dict[stream['surl']] = []
stream_parts.append(stream['surl'])
stream_parts_dict[stream['surl']].append(stream)
if len(stream_parts) == 1:
return resolved[0]
dialog = xbmcgui.Dialog()
opts = [__language__(30050)]
# when there are multiple stream, we let user choose only from best qualities
opts = opts + ['%s [%s]' % (stream_parts_dict[p][0]['title'], stream_parts_dict[p][0]['quality']) for p in stream_parts]
ret = dialog.select(xbmcutil.__lang__(30005), opts)
if ret == 0:
# requested to play all streams in given order - so return them all
return [stream_parts_dict[p][0] for p in stream_parts]
if ret >= 0:
return stream_parts_dict[stream_parts[ret]][0]
item = self.provider.video_item()
item.update({'url':url})
try:
return self.provider.resolve(item, select_cb=select_cb)
except ResolveException, e:
self._handle_exc(e)
params = util.params()
if params=={}:
xbmcutil.init_usage_reporting( __scriptid__)
PohadkarContentProvider(pohadkar.PohadkarContentProvider(tmp_dir=xbmc.translatePath(__addon__.getAddonInfo('profile'))),settings,__addon__).run(params)
|
TwinkleChawla/nova | refs/heads/master | nova/api/openstack/compute/hide_server_addresses.py | 32 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extension for hiding server addresses in certain states."""
from oslo_config import cfg
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import vm_states
opts = [
cfg.ListOpt('osapi_hide_server_address_states',
default=[vm_states.BUILDING],
help='List of instance states that should hide network info'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
ALIAS = 'os-hide-server-addresses'
authorize = extensions.os_compute_soft_authorizer(ALIAS)
class Controller(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
hidden_states = CONF.osapi_hide_server_address_states
# NOTE(jkoelker) _ is not considered uppercase ;)
valid_vm_states = [getattr(vm_states, state)
for state in dir(vm_states)
if state.isupper()]
self.hide_address_states = [state.lower()
for state in hidden_states
if state in valid_vm_states]
def _perhaps_hide_addresses(self, instance, resp_server):
if instance.get('vm_state') in self.hide_address_states:
resp_server['addresses'] = {}
@wsgi.extends
def show(self, req, resp_obj, id):
resp = resp_obj
if not authorize(req.environ['nova.context']):
return
if 'server' in resp.obj and 'addresses' in resp.obj['server']:
instance = req.get_db_instance(id)
self._perhaps_hide_addresses(instance, resp.obj['server'])
@wsgi.extends
def detail(self, req, resp_obj):
resp = resp_obj
if not authorize(req.environ['nova.context']):
return
for server in list(resp.obj['servers']):
if 'addresses' in server:
instance = req.get_db_instance(server['id'])
self._perhaps_hide_addresses(instance, server)
class HideServerAddresses(extensions.V21APIExtensionBase):
"""Support hiding server addresses in certain states."""
name = 'HideServerAddresses'
alias = ALIAS
version = 1
def get_controller_extensions(self):
return [extensions.ControllerExtension(self, 'servers', Controller())]
def get_resources(self):
return []
|
bbengfort/TextBlob | refs/heads/dev | text/tokenizers.py | 1 | '''Deprecated tokenizers module. Import ``textblob.tokenizers`` instead.
'''
from textblob.tokenizers import *
|
wa1tnr/ainsuSPI | refs/heads/master | 0-Distribution.d/circuitpython-master/tests/basics/builtin_hash_intbig.py | 23 | # test builtin hash function
print({1 << 66:1}) # hash big int
print({-(1 << 66):2}) # hash negative big int
# __hash__ returning a large number should be truncated
class F:
def __hash__(self):
return 1 << 70 | 1
print(hash(F()) != 0)
|
Thielak/program-y | refs/heads/rc | src/programy/mappings/person.py | 5 | """
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.mappings.base import DoubleStringPatternSplitCollection
class PersonCollection(DoubleStringPatternSplitCollection):
def __init__(self):
DoubleStringPatternSplitCollection.__init__(self)
def person(self, gender):
if self.has_key(gender):
return self.value(gender)
def personalise_string(self, string):
return self.replace_by_pattern(string)
|
commtrack/temp-rapidsms | refs/heads/master | apps/backends/urls.py | 3 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.conf.urls.defaults import *
urlpatterns = patterns('')
|
denismakogon/savanna-dashboard | refs/heads/master | savannadashboard/openstack/common/version.py | 1 |
# Copyright 2012 OpenStack Foundation
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities for consuming the version from pkg_resources.
"""
import pkg_resources
class VersionInfo(object):
def __init__(self, package):
"""Object that understands versioning for a package
:param package: name of the python package, such as glance, or
python-glanceclient
"""
self.package = package
self.release = None
self.version = None
self._cached_version = None
def __str__(self):
"""Make the VersionInfo object behave like a string."""
return self.version_string()
def __repr__(self):
"""Include the name."""
return "VersionInfo(%s:%s)" % (self.package, self.version_string())
def _get_version_from_pkg_resources(self):
"""Get the version of the package from the pkg_resources record
associated with the package."""
try:
requirement = pkg_resources.Requirement.parse(self.package)
provider = pkg_resources.get_provider(requirement)
return provider.version
except pkg_resources.DistributionNotFound:
# The most likely cause for this is running tests in a tree
# produced from a tarball where the package itself has not been
# installed into anything. Revert to setup-time logic.
from savannadashboard.openstack.common import setup
return setup.get_version(self.package)
def release_string(self):
"""Return the full version of the package including suffixes indicating
VCS status.
"""
if self.release is None:
self.release = self._get_version_from_pkg_resources()
return self.release
def version_string(self):
"""Return the short version minus any alpha/beta tags."""
if self.version is None:
parts = []
for part in self.release_string().split('.'):
if part[0].isdigit():
parts.append(part)
else:
break
self.version = ".".join(parts)
return self.version
# Compatibility functions
canonical_version_string = version_string
version_string_with_vcs = release_string
def cached_version_string(self, prefix=""):
"""Generate an object which will expand in a string context to
the results of version_string(). We do this so that don't
call into pkg_resources every time we start up a program when
passing version information into the CONF constructor, but
rather only do the calculation when and if a version is requested
"""
if not self._cached_version:
self._cached_version = "%s%s" % (prefix,
self.version_string())
return self._cached_version
|
paluh/django-tz | refs/heads/master | django_tz/templatetags/django_tz_tags.py | 1 | import pytz
from django.conf import settings
from django.template import Node
from django.template import Library
from django_tz.utils import adjust_datetime_to_timezone
from django_tz import global_tz
register = Library()
@register.filter
def to_global_tz(value, from_timezone=None):
with_tzinfo = value.tzinfo is not None
from_timezone = from_timezone or value.tzinfo or pytz.timezone(settings.TIME_ZONE)
value = adjust_datetime_to_timezone(value, from_timezone, global_tz.get_timezone())
if with_tzinfo:
return value
return value.replace(tzinfo=None)
|
marclaporte/clearskies_core | refs/heads/master | tools/gyp/test/same-target-name-different-directory/src/touch.py | 679 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
f = open(sys.argv[1], 'w+')
f.write('Hello from touch.py\n')
f.close()
|
Subsets and Splits