repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
Onirik79/aaritmud | data/proto_rooms/villaggio-zingaro/villaggio-zingaro_room_locanda.py | 1 | 1355 | # -*- coding: utf-8 -*-
#= DESCRIZIONE =================================================================
"""
Inietta una bimba in locanda.
"""
#= IMPORT ======================================================================
import random
from src.database import database
from src.defer import defer_random_time
from src.enums import TO
from src.log import log
from src.mob import Mob
#= COSTANTI ====================================================================
PROTO_MOB_CODE = "villaggio-zingaro_mob_bimba-tereza"
#= FUNZIONI ====================================================================
def on_dawn(locanda):
print "<<<<< iniezione bimba in 1,60 >>>>>>>>>"
defer_random_time(1, 60, coming , locanda)
def coming(locanda):
"""
Controlla non ci siano bimbe in giro e poi la inietta.
"""
# È possibile visto che questa funzione viene deferrata
if not locanda:
return
for mob in database["mobs"].itervalues():
if mob.prototype.code == PROTO_MOB_CODE:
return
bimba = Mob(PROTO_MOB_CODE)
bimba.inject(locanda)
bimba.act("Entri trotterellante in $N", TO.ENTITY, locanda)
bimba.act("$n entra trotterellando in $N.", TO.OTHERS, locanda)
bimba.act("Ti senti calpestare dal trotterellare di $n", TO.TARGET, locanda)
#- Fine Funzione -
| gpl-2.0 | 7,934,607,924,687,638,000 | -1,426,259,504,149,763,300 | 25.54902 | 80 | 0.517725 | false |
Donkyhotay/MoonPy | zope/security/tests/test_checker.py | 1 | 24043 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Security Checker tests
$Id: test_checker.py 67630 2006-04-27 00:54:03Z jim $
"""
from unittest import TestCase, TestSuite, main, makeSuite
from zope.interface import implements
from zope.interface.verify import verifyObject
from zope.testing.cleanup import CleanUp
from zope.proxy import getProxiedObject
from zope.security.interfaces import ISecurityPolicy, Unauthorized
from zope.security.interfaces import Forbidden, ForbiddenAttribute
from zope.security.management import setSecurityPolicy, newInteraction
from zope.security.management import endInteraction, getInteraction
from zope.security.proxy import removeSecurityProxy, getChecker, Proxy
from zope.security.checker import defineChecker, undefineChecker, ProxyFactory
from zope.security.checker import canWrite, canAccess
from zope.security.checker import Checker, NamesChecker, CheckerPublic
from zope.security.checker import BasicTypes, _checkers, NoProxy, _clear
import types, pickle
class SecurityPolicy(object):
implements(ISecurityPolicy)
def checkPermission(self, permission, object):
'See ISecurityPolicy'
return permission == 'test_allowed'
class RecordedSecurityPolicy(object):
implements(ISecurityPolicy)
def __init__(self):
self._checked = []
self.permissions = {}
def checkPermission(self, permission, object):
'See ISecurityPolicy'
self._checked.append(permission)
return self.permissions.get(permission, True)
def checkChecked(self, checked):
res = self._checked == checked
self._checked = []
return res
class TransparentProxy(object):
def __init__(self, ob):
self._ob = ob
def __getattribute__(self, name):
ob = object.__getattribute__(self, '_ob')
return getattr(ob, name)
class OldInst:
__metaclass__ = types.ClassType
a = 1
def b(self):
pass
c = 2
def gete(self):
return 3
e = property(gete)
def __getitem__(self, x):
return 5, x
def __setitem__(self, x, v):
pass
class NewInst(object, OldInst):
# This is not needed, but left in to show the change of metaclass
# __metaclass__ = type
def gete(self):
return 3
def sete(self, v):
pass
e = property(gete, sete)
class Test(TestCase, CleanUp):
def setUp(self):
CleanUp.setUp(self)
self.__oldpolicy = setSecurityPolicy(SecurityPolicy)
newInteraction()
def tearDown(self):
endInteraction()
setSecurityPolicy(self.__oldpolicy)
CleanUp.tearDown(self)
def test_typesAcceptedByDefineChecker(self):
class ClassicClass:
__metaclass__ = types.ClassType
class NewStyleClass:
__metaclass__ = type
import zope.security
not_a_type = object()
defineChecker(ClassicClass, NamesChecker())
defineChecker(NewStyleClass, NamesChecker())
defineChecker(zope.security, NamesChecker())
self.assertRaises(TypeError,
defineChecker, not_a_type, NamesChecker())
# check_getattr cases:
#
# - no attribute there
# - method
# - allow and disallow by permission
def test_check_getattr(self):
oldinst = OldInst()
oldinst.d = OldInst()
newinst = NewInst()
newinst.d = NewInst()
for inst in oldinst, newinst:
checker = NamesChecker(['a', 'b', 'c', '__getitem__'], 'perm')
self.assertRaises(Unauthorized, checker.check_getattr, inst, 'a')
self.assertRaises(Unauthorized, checker.check_getattr, inst, 'b')
self.assertRaises(Unauthorized, checker.check_getattr, inst, 'c')
self.assertRaises(Unauthorized, checker.check, inst, '__getitem__')
self.assertRaises(Forbidden, checker.check, inst, '__setitem__')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'd')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'e')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'f')
checker = NamesChecker(['a', 'b', 'c', '__getitem__'],
'test_allowed')
checker.check_getattr(inst, 'a')
checker.check_getattr(inst, 'b')
checker.check_getattr(inst, 'c')
checker.check(inst, '__getitem__')
self.assertRaises(Forbidden, checker.check, inst, '__setitem__')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'd')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'e')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'f')
checker = NamesChecker(['a', 'b', 'c', '__getitem__'],
CheckerPublic)
checker.check_getattr(inst, 'a')
checker.check_getattr(inst, 'b')
checker.check_getattr(inst, 'c')
checker.check(inst, '__getitem__')
self.assertRaises(Forbidden, checker.check, inst, '__setitem__')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'd')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'e')
self.assertRaises(Forbidden, checker.check_getattr, inst, 'f')
def test_check_setattr(self):
oldinst = OldInst()
oldinst.d = OldInst()
newinst = NewInst()
newinst.d = NewInst()
for inst in oldinst, newinst:
checker = Checker({}, {'a': 'perm', 'z': 'perm'})
self.assertRaises(Unauthorized, checker.check_setattr, inst, 'a')
self.assertRaises(Unauthorized, checker.check_setattr, inst, 'z')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'c')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'd')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'e')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'f')
checker = Checker({}, {'a': 'test_allowed', 'z': 'test_allowed'})
checker.check_setattr(inst, 'a')
checker.check_setattr(inst, 'z')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'd')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'e')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'f')
checker = Checker({}, {'a': CheckerPublic, 'z': CheckerPublic})
checker.check_setattr(inst, 'a')
checker.check_setattr(inst, 'z')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'd')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'e')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'f')
def test_proxy(self):
checker = NamesChecker(())
from zope.security.checker import BasicTypes_examples
rocks = tuple(BasicTypes_examples.values())
for rock in rocks:
proxy = checker.proxy(rock)
self.failUnless(proxy is rock, (rock, type(proxy)))
for class_ in OldInst, NewInst:
inst = class_()
for ob in inst, class_:
proxy = checker.proxy(ob)
self.failUnless(removeSecurityProxy(proxy) is ob)
checker = getChecker(proxy)
if ob is inst:
self.assertEqual(checker.permission_id('__str__'),
None)
else:
self.assertEqual(checker.permission_id('__str__'),
CheckerPublic)
#No longer doing anything special for transparent proxies.
#A proxy needs to provide its own security checker.
#
#special = NamesChecker(['a', 'b'], 'test_allowed')
#defineChecker(class_, special)
#
#for ob in inst, TransparentProxy(inst):
# proxy = checker.proxy(ob)
# self.failUnless(removeSecurityProxy(proxy) is ob)
#
# checker = getChecker(proxy)
# self.failUnless(checker is special,
# checker.get_permissions)
#
# proxy2 = checker.proxy(proxy)
# self.failUnless(proxy2 is proxy, [proxy, proxy2])
def testLayeredProxies(self):
"""Tests that a Proxy will not be re-proxied."""
class Base:
__Security_checker__ = NamesChecker(['__Security_checker__'])
base = Base()
checker = Checker({})
# base is not proxied, so we expect a proxy
proxy1 = checker.proxy(base)
self.assert_(type(proxy1) is Proxy)
self.assert_(getProxiedObject(proxy1) is base)
# proxy is a proxy, so we don't expect to get another
proxy2 = checker.proxy(proxy1)
self.assert_(proxy2 is proxy1)
self.assert_(getProxiedObject(proxy2) is base)
def testMultiChecker(self):
from zope.interface import Interface
class I1(Interface):
def f1(): ''
def f2(): ''
class I2(I1):
def f3(): ''
def f4(): ''
class I3(Interface):
def g(): ''
from zope.exceptions import DuplicationError
from zope.security.checker import MultiChecker
self.assertRaises(DuplicationError,
MultiChecker,
[(I1, 'p1'), (I2, 'p2')])
self.assertRaises(DuplicationError,
MultiChecker,
[(I1, 'p1'), {'f2': 'p2'}])
MultiChecker([(I1, 'p1'), (I2, 'p1')])
checker = MultiChecker([
(I2, 'p1'),
{'a': 'p3'},
(I3, 'p2'),
(('x','y','z'), 'p4'),
])
self.assertEqual(checker.permission_id('f1'), 'p1')
self.assertEqual(checker.permission_id('f2'), 'p1')
self.assertEqual(checker.permission_id('f3'), 'p1')
self.assertEqual(checker.permission_id('f4'), 'p1')
self.assertEqual(checker.permission_id('g'), 'p2')
self.assertEqual(checker.permission_id('a'), 'p3')
self.assertEqual(checker.permission_id('x'), 'p4')
self.assertEqual(checker.permission_id('y'), 'p4')
self.assertEqual(checker.permission_id('z'), 'p4')
self.assertEqual(checker.permission_id('zzz'), None)
def testAlwaysAvailable(self):
from zope.security.checker import NamesChecker
checker = NamesChecker(())
class C(object): pass
self.assertEqual(checker.check(C, '__hash__'), None)
self.assertEqual(checker.check(C, '__nonzero__'), None)
self.assertEqual(checker.check(C, '__class__'), None)
self.assertEqual(checker.check(C, '__implements__'), None)
self.assertEqual(checker.check(C, '__lt__'), None)
self.assertEqual(checker.check(C, '__le__'), None)
self.assertEqual(checker.check(C, '__gt__'), None)
self.assertEqual(checker.check(C, '__ge__'), None)
self.assertEqual(checker.check(C, '__eq__'), None)
self.assertEqual(checker.check(C, '__ne__'), None)
def test_setattr(self):
checker = NamesChecker(['a', 'b', 'c', '__getitem__'],
'test_allowed')
for inst in NewInst(), OldInst():
self.assertRaises(Forbidden, checker.check_setattr, inst, 'a')
self.assertRaises(Forbidden, checker.check_setattr, inst, 'z')
# TODO: write a test to see that
# Checker.check/check_setattr handle permission
# values that evaluate to False
def test_ProxyFactory(self):
class SomeClass(object):
pass
import zope.security
checker = NamesChecker()
specific_checker = NamesChecker()
checker_as_magic_attr = NamesChecker()
obj = SomeClass()
proxy = ProxyFactory(obj)
self.assert_(type(proxy) is Proxy)
from zope.security.checker import _defaultChecker
self.assert_(getChecker(proxy) is _defaultChecker)
defineChecker(SomeClass, checker)
proxy = ProxyFactory(obj)
self.assert_(type(proxy) is Proxy)
self.assert_(getChecker(proxy) is checker)
obj.__Security_checker__ = checker_as_magic_attr
proxy = ProxyFactory(obj)
self.assert_(type(proxy) is Proxy)
self.assert_(getChecker(proxy) is checker_as_magic_attr)
proxy = ProxyFactory(obj, specific_checker)
self.assert_(type(proxy) is Proxy)
self.assert_(getChecker(proxy) is specific_checker)
def test_define_and_undefineChecker(self):
class SomeClass(object):
pass
obj = SomeClass()
checker = NamesChecker()
from zope.security.checker import _defaultChecker, selectChecker
self.assert_(selectChecker(obj) is _defaultChecker)
defineChecker(SomeClass, checker)
self.assert_(selectChecker(obj) is checker)
undefineChecker(SomeClass)
self.assert_(selectChecker(obj) is _defaultChecker)
def test_ProxyFactory_using_proxy(self):
class SomeClass(object):
pass
obj = SomeClass()
checker = NamesChecker()
proxy1 = ProxyFactory(obj)
proxy2 = ProxyFactory(proxy1)
self.assert_(proxy1 is proxy2)
# Trying to change the checker on a proxy.
self.assertRaises(TypeError, ProxyFactory, proxy1, checker)
# Setting exactly the same checker as the proxy already has.
proxy1 = ProxyFactory(obj, checker)
proxy2 = ProxyFactory(proxy1, checker)
self.assert_(proxy1 is proxy2)
def test_canWrite_canAccess(self):
# the canWrite and canAccess functions are conveniences. Often code
# wants to check if a certain option is open to a user before
# presenting it. If the code relies on a certain permission, the
# Zope 3 goal of keeping knowledge of security assertions out of the
# code and only in the zcml assertions is broken. Instead, ask if the
# current user canAccess or canWrite some pertinent aspect of the
# object. canAccess is used for both read access on an attribute
# and call access to methods.
# For example, consider this humble pair of class and object.
class SomeClass(object):
pass
obj = SomeClass()
# We will establish a checker for the class. This is the standard
# name-based checker, and works by specifying two dicts, one for read
# and one for write. Each item in the dictionary should be an
# attribute name and the permission required to read or write it.
# For these tests, the SecurityPolicy defined at the top of this file
# is in place. It is a stub. Normally, the security policy would
# have knowledge of interactions and participants, and would determine
# on the basis of the particpants and the object if a certain permission
# were authorized. This stub simply says that the 'test_allowed'
# permission is authorized and nothing else is, for any object you pass
# it.
# Therefore, according to the checker created here, the current
# 'interaction' (as stubbed out in the security policy) will be allowed
# to access and write foo, and access bar. The interaction is
# unauthorized for accessing baz and writing bar. Any other access or
# write is not merely unauthorized but forbidden--including write access
# for baz.
checker = Checker(
{'foo':'test_allowed', # these are the read settings
'bar':'test_allowed',
'baz':'you_will_not_have_this_permission'},
{'foo':'test_allowed', # these are the write settings
'bar':'you_will_not_have_this_permission',
'bing':'you_will_not_have_this_permission'})
defineChecker(SomeClass, checker)
# so, our hapless interaction may write and access foo...
self.assert_(canWrite(obj, 'foo'))
self.assert_(canAccess(obj, 'foo'))
# ...may access, but not write, bar...
self.assert_(not canWrite(obj, 'bar'))
self.assert_(canAccess(obj, 'bar'))
# ...and may access baz.
self.assert_(not canAccess(obj, 'baz'))
# there are no security assertions for writing or reading shazam, so
# checking these actually raises Forbidden. The rationale behind
# exposing the Forbidden exception is primarily that it is usually
# indicative of programming or configuration errors.
self.assertRaises(Forbidden, canAccess, obj, 'shazam')
self.assertRaises(Forbidden, canWrite, obj, 'shazam')
# However, we special-case canWrite when an attribute has a Read
# setting but no Write setting. Consider the 'baz' attribute from the
# checker above: it is readonly. All users are forbidden to write
# it. This is a very reasonable configuration. Therefore, canWrite
# will hide the Forbidden exception if and only if there is a
# setting for accessing the attribute.
self.assert_(not canWrite(obj, 'baz'))
# The reverse is not true at the moment: an unusal case like the
# write-only 'bing' attribute will return a boolean for canWrite,
# but canRead will simply raise a Forbidden exception, without checking
# write settings.
self.assert_(not canWrite(obj, 'bing'))
self.assertRaises(Forbidden, canAccess, obj, 'bing')
class TestCheckerPublic(TestCase):
def test_that_pickling_CheckerPublic_retains_identity(self):
self.assert_(pickle.loads(pickle.dumps(CheckerPublic))
is
CheckerPublic)
def test_that_CheckerPublic_identity_works_even_when_proxied(self):
self.assert_(ProxyFactory(CheckerPublic) is CheckerPublic)
class TestMixinDecoratedChecker(TestCase):
def decoratedSetUp(self):
self.policy = RecordedSecurityPolicy
self._oldpolicy = setSecurityPolicy(self.policy)
newInteraction()
self.interaction = getInteraction()
self.obj = object()
def decoratedTearDown(self):
endInteraction()
setSecurityPolicy(self._oldpolicy)
def check_checking_impl(self, checker):
o = self.obj
checker.check_getattr(o, 'both_get_set')
self.assert_(self.interaction.checkChecked(['dc_get_permission']))
checker.check_getattr(o, 'c_only')
self.assert_(self.interaction.checkChecked(['get_permission']))
checker.check_getattr(o, 'd_only')
self.assert_(self.interaction.checkChecked(['dc_get_permission']))
self.assertRaises(ForbiddenAttribute,
checker.check_getattr, o,
'completely_different_attr')
self.assert_(self.interaction.checkChecked([]))
checker.check(o, '__str__')
self.assert_(self.interaction.checkChecked(['get_permission']))
checker.check_setattr(o, 'both_get_set')
self.assert_(self.interaction.checkChecked(['dc_set_permission']))
self.assertRaises(ForbiddenAttribute,
checker.check_setattr, o, 'c_only')
self.assert_(self.interaction.checkChecked([]))
self.assertRaises(ForbiddenAttribute,
checker.check_setattr, o, 'd_only')
self.assert_(self.interaction.checkChecked([]))
originalChecker = NamesChecker(['both_get_set', 'c_only', '__str__'],
'get_permission')
decorationSetMap = {'both_get_set': 'dc_set_permission'}
decorationGetMap = {'both_get_set': 'dc_get_permission',
'd_only': 'dc_get_permission'}
overridingChecker = Checker(decorationGetMap, decorationSetMap)
class TestCombinedChecker(TestMixinDecoratedChecker, TestCase):
def setUp(self):
TestCase.setUp(self)
self.decoratedSetUp()
def tearDown(self):
self.decoratedTearDown()
TestCase.tearDown(self)
def test_checking(self):
from zope.security.checker import CombinedChecker
cc = CombinedChecker(self.overridingChecker, self.originalChecker)
self.check_checking_impl(cc)
# When a permission is not authorized by the security policy,
# the policy is queried twice per check_getattr -- once for each
# checker.
self.interaction.permissions['dc_get_permission'] = False
cc.check_getattr(self.obj, 'both_get_set')
self.assert_(
self.interaction.checkChecked(['dc_get_permission',
'get_permission'])
)
# This should raise Unauthorized instead of ForbiddenAttribute, since
# access can be granted if you e.g. login with different credentials.
self.assertRaises(Unauthorized, cc.check_getattr, self.obj, 'd_only')
self.assertRaises(Unauthorized, cc.check, self.obj, 'd_only')
def test_interface(self):
from zope.security.checker import CombinedChecker
from zope.security.interfaces import IChecker
dc = CombinedChecker(self.overridingChecker, self.originalChecker)
verifyObject(IChecker, dc)
class TestBasicTypes(TestCase):
def test(self):
class MyType(object): pass
class MyType2(object): pass
# When an item is added to the basic types, it should also be added to
# the list of checkers.
BasicTypes[MyType] = NoProxy
self.assert_(MyType in _checkers)
# If we clear the checkers, the type should still be there
_clear()
self.assert_(MyType in BasicTypes)
self.assert_(MyType in _checkers)
# Now delete the type from the dictionary, will also delete it from
# the checkers
del BasicTypes[MyType]
self.assert_(MyType not in BasicTypes)
self.assert_(MyType not in _checkers)
# The quick way of adding new types is using update
BasicTypes.update({MyType: NoProxy, MyType2: NoProxy})
self.assert_(MyType in BasicTypes)
self.assert_(MyType2 in BasicTypes)
self.assert_(MyType in _checkers)
self.assert_(MyType2 in _checkers)
# Let's remove the two new types
del BasicTypes[MyType]
del BasicTypes[MyType2]
# Of course, BasicTypes is a full dictionary. This dictionary is by
# default filled with several entries:
keys = BasicTypes.keys()
keys.sort()
self.assert_(bool in keys)
self.assert_(int in keys)
self.assert_(float in keys)
self.assert_(str in keys)
self.assert_(unicode in keys)
self.assert_(object in keys)
# ...
# Finally, the ``clear()`` method has been deactivated to avoid
# unwanted deletions.
self.assertRaises(NotImplementedError, BasicTypes.clear)
def test_suite():
return TestSuite((
makeSuite(Test),
makeSuite(TestCheckerPublic),
makeSuite(TestCombinedChecker),
makeSuite(TestBasicTypes),
))
if __name__=='__main__':
main(defaultTest='test_suite')
| gpl-3.0 | 4,629,844,210,237,309,000 | 3,104,177,408,852,707,300 | 37.224165 | 80 | 0.614441 | false |
listamilton/supermilton.repository | plugin.video.supermiltonflix/lib/keyring/backends/file.py | 5 | 9728 | from __future__ import with_statement
import abc
import base64
import getpass
import json
import os
import sys
from ..backend import KeyringBackend
from ..errors import PasswordDeleteError
from ..py27compat import configparser
from ..util import platform_, properties
from ..util.escape import escape as escape_for_ini
class FileBacked(object):
@abc.abstractproperty
def filename(self):
"""
The filename used to store the passwords.
"""
@properties.NonDataProperty
def file_path(self):
"""
The path to the file where passwords are stored. This property
may be overridden by the subclass or at the instance level.
"""
return os.path.join(platform_.data_root(), self.filename)
class BaseKeyring(FileBacked, KeyringBackend):
"""
BaseKeyring is a file-based implementation of keyring.
This keyring stores the password directly in the file and provides methods
which may be overridden by subclasses to support
encryption and decryption. The encrypted payload is stored in base64
format.
"""
@abc.abstractmethod
def encrypt(self, password):
"""
Given a password (byte string), return an encrypted byte string.
"""
@abc.abstractmethod
def decrypt(self, password_encrypted):
"""
Given a password encrypted by a previous call to `encrypt`, return
the original byte string.
"""
def get_password(self, service, username):
"""
Read the password from the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path)
# fetch the password
try:
password_base64 = config.get(service, username).encode()
# decode with base64
password_encrypted = base64.decodestring(password_base64)
# decrypted the password
password = self.decrypt(password_encrypted).decode('utf-8')
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password
def set_password(self, service, username, password):
"""Write the password in the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# encrypt the password
password_encrypted = self.encrypt(password.encode('utf-8'))
# encode with base64
password_base64 = base64.encodestring(password_encrypted).decode()
# ensure the file exists
self._ensure_file_path()
# load the keyring from the disk
config = configparser.RawConfigParser()
config.read(self.file_path)
# update the keyring with the password
if not config.has_section(service):
config.add_section(service)
config.set(service, username, password_base64)
# save the keyring back to the file
with open(self.file_path, 'w') as config_file:
config.write(config_file)
def _ensure_file_path(self):
"""
Ensure the storage path exists.
If it doesn't, create it with "go-rwx" permissions.
"""
storage_root = os.path.dirname(self.file_path)
if storage_root and not os.path.isdir(storage_root):
os.makedirs(storage_root)
if not os.path.isfile(self.file_path):
# create the file without group/world permissions
with open(self.file_path, 'w'):
pass
user_read_write = 0o600
os.chmod(self.file_path, user_read_write)
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path)
try:
if not config.remove_option(service, username):
raise PasswordDeleteError("Password not found")
except configparser.NoSectionError:
raise PasswordDeleteError("Password not found")
# update the file
with open(self.file_path, 'w') as config_file:
config.write(config_file)
class PlaintextKeyring(BaseKeyring):
"""Simple File Keyring with no encryption"""
priority = .5
"Applicable for all platforms, but not recommended"
filename = 'keyring_pass.cfg'
def encrypt(self, password):
"""Directly return the password itself.
"""
return password
def decrypt(self, password_encrypted):
"""Directly return encrypted password.
"""
return password_encrypted
class Encrypted(object):
"""
PyCrypto-backed Encryption support
"""
block_size = 32
def _create_cipher(self, password, salt, IV):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
pw = PBKDF2(password, salt, dkLen=self.block_size)
return AES.new(pw[:self.block_size], AES.MODE_CFB, IV)
def _get_new_password(self):
while True:
password = getpass.getpass(
"Please set a password for your new keyring: ")
confirm = getpass.getpass('Please confirm the password: ')
if password != confirm:
sys.stderr.write("Error: Your passwords didn't match\n")
continue
if '' == password.strip():
# forbid the blank password
sys.stderr.write("Error: blank passwords aren't allowed.\n")
continue
return password
class EncryptedKeyring(Encrypted, BaseKeyring):
"""PyCrypto File Keyring"""
filename = 'crypted_pass.cfg'
pw_prefix = 'pw:'.encode()
@properties.ClassProperty
@classmethod
def priority(self):
"Applicable for all platforms, but not recommended."
try:
__import__('Crypto.Cipher.AES')
__import__('Crypto.Protocol.KDF')
__import__('Crypto.Random')
except ImportError:
raise RuntimeError("PyCrypto required")
if not json:
raise RuntimeError("JSON implementation such as simplejson "
"required.")
return .6
@properties.NonDataProperty
def keyring_key(self):
# _unlock or _init_file will set the key or raise an exception
if self._check_file():
self._unlock()
else:
self._init_file()
return self.keyring_key
def _init_file(self):
"""
Initialize a new password file and set the reference password.
"""
self.keyring_key = self._get_new_password()
# set a reference password, used to check that the password provided
# matches for subsequent checks.
self.set_password('keyring-setting', 'password reference',
'password reference value')
def _check_file(self):
"""
Check if the file exists and has the expected password reference.
"""
if not os.path.exists(self.file_path):
return False
self._migrate()
config = configparser.RawConfigParser()
config.read(self.file_path)
try:
config.get(
escape_for_ini('keyring-setting'),
escape_for_ini('password reference'),
)
except (configparser.NoSectionError, configparser.NoOptionError):
return False
return True
def _unlock(self):
"""
Unlock this keyring by getting the password for the keyring from the
user.
"""
self.keyring_key = getpass.getpass(
'Please enter password for encrypted keyring: ')
try:
ref_pw = self.get_password('keyring-setting', 'password reference')
assert ref_pw == 'password reference value'
except AssertionError:
self._lock()
raise ValueError("Incorrect Password")
def _lock(self):
"""
Remove the keyring key from this instance.
"""
del self.keyring_key
def encrypt(self, password):
from Crypto.Random import get_random_bytes
salt = get_random_bytes(self.block_size)
from Crypto.Cipher import AES
IV = get_random_bytes(AES.block_size)
cipher = self._create_cipher(self.keyring_key, salt, IV)
password_encrypted = cipher.encrypt(self.pw_prefix + password)
# Serialize the salt, IV, and encrypted password in a secure format
data = dict(
salt=salt, IV=IV, password_encrypted=password_encrypted,
)
for key in data:
data[key] = base64.encodestring(data[key]).decode()
return json.dumps(data).encode()
def decrypt(self, password_encrypted):
# unpack the encrypted payload
data = json.loads(password_encrypted.decode())
for key in data:
data[key] = base64.decodestring(data[key].encode())
cipher = self._create_cipher(self.keyring_key, data['salt'],
data['IV'])
plaintext = cipher.decrypt(data['password_encrypted'])
assert plaintext.startswith(self.pw_prefix)
return plaintext[3:]
def _migrate(self, keyring_password=None):
"""
Convert older keyrings to the current format.
"""
| gpl-2.0 | -9,044,997,032,602,074,000 | -487,446,087,407,247,700 | 32.088435 | 79 | 0.609169 | false |
jay3sh/vispy | examples/basics/scene/isocurve_updates.py | 17 | 4424 | # -*- coding: utf-8 -*-
# vispy: gallery 30
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Show use of SceneCanvas to display and update Image and Isocurve visuals using
ViewBox visual.
"""
import sys
import numpy as np
from itertools import cycle
from vispy import app, scene
from vispy.scene import STTransform
from vispy.util.filter import gaussian_filter
from vispy.color import get_colormaps, get_color_names
canvas = scene.SceneCanvas(keys='interactive',
title='Show update capabilities of Isocurve Visual',
show=True)
canvas.show()
# Setting up four viewboxes
vb1 = scene.widgets.ViewBox(border_color='yellow', parent=canvas.scene)
vb2 = scene.widgets.ViewBox(border_color='blue', parent=canvas.scene)
vb3 = scene.widgets.ViewBox(border_color='red', parent=canvas.scene)
vb4 = scene.widgets.ViewBox(border_color='purple', parent=canvas.scene)
vb = (vb1, vb2, vb3, vb4)
# add grid as central widget, add viewboxes into grid
grid = canvas.central_widget.add_grid()
grid.padding = 0
grid.add_widget(vb1, 0, 0)
grid.add_widget(vb2, 0, 1)
grid.add_widget(vb3, 1, 0)
grid.add_widget(vb4, 1, 1)
# panzoom cameras for every viewbox
for box in vb:
box.camera = 'panzoom'
box.camera.aspect = 1.0
# Create random image
img_data1 = np.empty((100, 100, 3), dtype=np.ubyte)
noise = np.random.normal(size=(100, 100), loc=50, scale=150)
noise = gaussian_filter(noise, (4, 4, 0))
img_data1[:] = noise[..., np.newaxis]
# create 2d array with some function
x, y = np.mgrid[0:2*np.pi:101j, 0:2*np.pi:101j]
myfunc = np.cos(2*x[:-1, :-1]) + np.sin(2*y[:-1, :-1])
# add image to viewbox1
image1 = scene.visuals.Image(noise, parent=vb1.scene, cmap='cubehelix')
# move image behind curves
image1.transform = STTransform(translate=(0, 0, 0.5))
vb1.camera.set_range()
# add image to viewbox2
image2 = scene.visuals.Image(myfunc, parent=vb2.scene, cmap='cubehelix')
# move image behind curves
image2.transform = STTransform(translate=(0, 0, 0.5))
vb2.camera.set_range()
# create some level for the isocurves
levels1 = np.linspace(noise.min(), noise.max(), num=52, endpoint=True)[1:-1]
levels2 = np.linspace(myfunc.min(), myfunc.max(), num=52, endpoint=True)[1:-1]
# create curve 1a (image overlay, black) and 1b (plain, cubehelix colored)
# to viewboxes 1 and 3
curve1a = scene.visuals.Isocurve(
noise, levels=levels1[::4], color_lev='k', parent=vb1.scene)
curve1b = scene.visuals.Isocurve(
noise, levels=levels1, color_lev='cubehelix', parent=vb3.scene)
# create curve 2a (2darray overlay, black) and 2b (plain, cubehelix colored)
# to viewboxes 2 and 4
curve2a = scene.visuals.Isocurve(
myfunc, levels=levels2[::4], color_lev='k', parent=vb2.scene)
curve2b = scene.visuals.Isocurve(
myfunc, levels=levels2, color_lev='cubehelix', parent=vb4.scene)
# set viewport
vb3.camera.set_range((0, 100), (0, 100))
vb4.camera.set_range((0, 100), (0, 100))
# setup update parameters
up = 1
index = 1
clip = np.linspace(myfunc.min(), myfunc.max(), num=51)
cmap = cycle(get_colormaps())
color = cycle(get_color_names())
def update(ev):
global myfunc, index, up, levels2, noise, cmap, color
if index > 0 and index < 25:
# update left panes rolling upwards
noise = np.roll(noise, 1, axis=0)
image1.set_data(noise)
curve1a.set_data(noise)
curve1b.set_data(noise)
# update colors/colormap
if (index % 5) == 0:
curve1b.color = next(color)
cm = next(cmap)
image2.cmap = cm
curve2b.color = cm
# change isocurves by clipping data/or changing limits
# update curve1b levels (clip)
curve1b.levels = levels1[index:-index]
# update curve2b data with clipped data
im2 = np.clip(myfunc, clip[index], clip[-index])
curve2b.set_data(im2)
index += up
else:
# change index direction
up = -up
index += up
canvas.update()
# setup timer
timer = app.Timer()
timer.connect(update)
# slow this down a bit to better see what happens
timer.start(0)
if __name__ == '__main__' and sys.flags.interactive == 0:
app.run()
| bsd-3-clause | 884,717,293,895,306,200 | -2,170,969,078,357,745,400 | 30.827338 | 79 | 0.649412 | false |
hackendless/heekscnc | nc/hpgl3d.py | 34 | 2237 | # hpgl3d.py
#
# Copyright (c) 2009, Dan Heeks
# This program is released under the BSD license. See the file COPYING for details.
#
import nc
import hpgl2d
import math
class Creator(hpgl2d.Creator):
def __init__(self):
hpgl2d.Creator.__init__(self)
self.z = int(0)
self.metric() # set self.units_to_mc_units
self.doing_rapid = True
def program_begin(self, id, name=''):
self.write(';;^IN;!MC0;\n')
self.write('V50.0;^PR;Z0,0,10500;^PA;\n')
self.write('!RC15;\n')
self.write('!MC1;\n')
def program_end(self):
self.write('!VZ50.0;!ZM0;\n')
self.write('!MC0;^IN;\n')
def get_machine_xyz(self, x=None, y=None, z=None):
machine_x = self.x
machine_y = self.y
machine_z = self.z
if x != None:
machine_x = self.closest_int(x * self.units_to_mc_units)
if y != None:
machine_y = self.closest_int(y * self.units_to_mc_units)
if z != None:
machine_z = self.closest_int(z * self.units_to_mc_units)
return machine_x, machine_y, machine_z
def rapid(self, x=None, y=None, z=None, a=None, b=None, c=None):
# do a rapid move.
# for now, do all rapid moves at V50 ( 50 mm/s )
mx, my, mz = self.get_machine_xyz(x, y, z)
if mx != self.x or my != self.y or mz != self.z:
if self.doing_rapid == False: self.write('V50.0;')
self.write(('Z%i' % mx) + (',%i' % my) + (',%i;\n' % mz))
self.x = mx
self.y = my
self.z = mz
self.doing_rapid = True
def feed(self, x=None, y=None, z=None, a=None, b=None, c=None):
# do a feed move.
# for now, do all feed moves at V10 ( 10 mm/s )
mx, my, mz = self.get_machine_xyz(x, y, z)
if mx != self.x or my != self.y or mz != self.z:
if self.doing_rapid == True: self.write('V10.0;')
self.write(('Z%i' % mx) + (',%i' % my) + (',%i;\n' % mz))
self.x = mx
self.y = my
self.z = mz
self.doing_rapid = False
nc.creator = Creator()
| bsd-3-clause | -1,460,822,799,517,220,600 | 2,008,926,294,962,855,700 | 32.953125 | 83 | 0.494859 | false |
chauhanhardik/populo | common/lib/xmodule/xmodule/assetstore/__init__.py | 124 | 11966 | """
Classes representing asset metadata.
"""
from datetime import datetime
import dateutil.parser
import pytz
import json
from contracts import contract, new_contract
from opaque_keys.edx.keys import CourseKey, AssetKey
from lxml import etree
new_contract('AssetKey', AssetKey)
new_contract('CourseKey', CourseKey)
new_contract('datetime', datetime)
new_contract('basestring', basestring)
new_contract('long', long)
new_contract('AssetElement', lambda x: isinstance(x, etree._Element) and x.tag == "asset") # pylint: disable=protected-access
new_contract('AssetsElement', lambda x: isinstance(x, etree._Element) and x.tag == "assets") # pylint: disable=protected-access
class AssetMetadata(object):
"""
Stores the metadata associated with a particular course asset. The asset metadata gets stored
in the modulestore.
"""
TOP_LEVEL_ATTRS = ['pathname', 'internal_name', 'locked', 'contenttype', 'thumbnail', 'fields']
EDIT_INFO_ATTRS = ['curr_version', 'prev_version', 'edited_by', 'edited_by_email', 'edited_on']
CREATE_INFO_ATTRS = ['created_by', 'created_by_email', 'created_on']
ATTRS_ALLOWED_TO_UPDATE = TOP_LEVEL_ATTRS + EDIT_INFO_ATTRS
ASSET_TYPE_ATTR = 'type'
ASSET_BASENAME_ATTR = 'filename'
XML_ONLY_ATTRS = [ASSET_TYPE_ATTR, ASSET_BASENAME_ATTR]
XML_ATTRS = XML_ONLY_ATTRS + ATTRS_ALLOWED_TO_UPDATE + CREATE_INFO_ATTRS
# Type for assets uploaded by a course author in Studio.
GENERAL_ASSET_TYPE = 'asset'
# Asset section XML tag for asset metadata as XML.
ALL_ASSETS_XML_TAG = 'assets'
# Individual asset XML tag for asset metadata as XML.
ASSET_XML_TAG = 'asset'
# Top-level directory name in exported course XML which holds asset metadata.
EXPORTED_ASSET_DIR = 'assets'
# Filename of all asset metadata exported as XML.
EXPORTED_ASSET_FILENAME = 'assets.xml'
@contract(asset_id='AssetKey',
pathname='basestring|None', internal_name='basestring|None',
locked='bool|None', contenttype='basestring|None',
thumbnail='basestring|None', fields='dict|None',
curr_version='basestring|None', prev_version='basestring|None',
created_by='int|long|None', created_by_email='basestring|None', created_on='datetime|None',
edited_by='int|long|None', edited_by_email='basestring|None', edited_on='datetime|None')
def __init__(self, asset_id,
pathname=None, internal_name=None,
locked=None, contenttype=None,
thumbnail=None, fields=None,
curr_version=None, prev_version=None,
created_by=None, created_by_email=None, created_on=None,
edited_by=None, edited_by_email=None, edited_on=None,
field_decorator=None,):
"""
Construct a AssetMetadata object.
Arguments:
asset_id (AssetKey): Key identifying this particular asset.
pathname (str): Original path to file at asset upload time.
internal_name (str): Name, url, or handle for the storage system to access the file.
locked (bool): If True, only course participants can access the asset.
contenttype (str): MIME type of the asset.
thumbnail (str): the internal_name for the thumbnail if one exists
fields (dict): fields to save w/ the metadata
curr_version (str): Current version of the asset.
prev_version (str): Previous version of the asset.
created_by (int): User ID of initial user to upload this asset.
created_by_email (str): Email address of initial user to upload this asset.
created_on (datetime): Datetime of intial upload of this asset.
edited_by (int): User ID of last user to upload this asset.
edited_by_email (str): Email address of last user to upload this asset.
edited_on (datetime): Datetime of last upload of this asset.
field_decorator (function): used by strip_key to convert OpaqueKeys to the app's understanding.
Not saved.
"""
self.asset_id = asset_id if field_decorator is None else field_decorator(asset_id)
self.pathname = pathname # Path w/o filename.
self.internal_name = internal_name
self.locked = locked
self.contenttype = contenttype
self.thumbnail = thumbnail
self.curr_version = curr_version
self.prev_version = prev_version
now = datetime.now(pytz.utc)
self.edited_by = edited_by
self.edited_by_email = edited_by_email
self.edited_on = edited_on or now
# created_by, created_by_email, and created_on should only be set here.
self.created_by = created_by
self.created_by_email = created_by_email
self.created_on = created_on or now
self.fields = fields or {}
def __repr__(self):
return """AssetMetadata{!r}""".format((
self.asset_id,
self.pathname, self.internal_name,
self.locked, self.contenttype, self.fields,
self.curr_version, self.prev_version,
self.created_by, self.created_by_email, self.created_on,
self.edited_by, self.edited_by_email, self.edited_on,
))
def update(self, attr_dict):
"""
Set the attributes on the metadata. Any which are not in ATTRS_ALLOWED_TO_UPDATE get put into
fields.
Arguments:
attr_dict: Prop, val dictionary of all attributes to set.
"""
for attr, val in attr_dict.iteritems():
if attr in self.ATTRS_ALLOWED_TO_UPDATE:
setattr(self, attr, val)
else:
self.fields[attr] = val
def to_storable(self):
"""
Converts metadata properties into a MongoDB-storable dict.
"""
return {
'filename': self.asset_id.path,
'asset_type': self.asset_id.asset_type,
'pathname': self.pathname,
'internal_name': self.internal_name,
'locked': self.locked,
'contenttype': self.contenttype,
'thumbnail': self.thumbnail,
'fields': self.fields,
'edit_info': {
'curr_version': self.curr_version,
'prev_version': self.prev_version,
'created_by': self.created_by,
'created_by_email': self.created_by_email,
'created_on': self.created_on,
'edited_by': self.edited_by,
'edited_by_email': self.edited_by_email,
'edited_on': self.edited_on
}
}
@contract(asset_doc='dict|None')
def from_storable(self, asset_doc):
"""
Fill in all metadata fields from a MongoDB document.
The asset_id prop is initialized upon construction only.
"""
if asset_doc is None:
return
self.pathname = asset_doc['pathname']
self.internal_name = asset_doc['internal_name']
self.locked = asset_doc['locked']
self.contenttype = asset_doc['contenttype']
self.thumbnail = asset_doc['thumbnail']
self.fields = asset_doc['fields']
self.curr_version = asset_doc['edit_info']['curr_version']
self.prev_version = asset_doc['edit_info']['prev_version']
self.created_by = asset_doc['edit_info']['created_by']
self.created_by_email = asset_doc['edit_info']['created_by_email']
self.created_on = asset_doc['edit_info']['created_on']
self.edited_by = asset_doc['edit_info']['edited_by']
self.edited_by_email = asset_doc['edit_info']['edited_by_email']
self.edited_on = asset_doc['edit_info']['edited_on']
@contract(node='AssetElement')
def from_xml(self, node):
"""
Walk the etree XML node and fill in the asset metadata.
The node should be a top-level "asset" element.
"""
for child in node:
qname = etree.QName(child)
tag = qname.localname
if tag in self.XML_ATTRS:
value = child.text
if tag in self.XML_ONLY_ATTRS:
# An AssetLocator is constructed separately from these parts.
continue
elif tag == 'locked':
# Boolean.
value = True if value == "true" else False
elif value == 'None':
# None.
value = None
elif tag in ('created_on', 'edited_on'):
# ISO datetime.
value = dateutil.parser.parse(value)
elif tag in ('created_by', 'edited_by'):
# Integer representing user id.
value = int(value)
elif tag == 'fields':
# Dictionary.
value = json.loads(value)
setattr(self, tag, value)
@contract(node='AssetElement')
def to_xml(self, node):
"""
Add the asset data as XML to the passed-in node.
The node should already be created as a top-level "asset" element.
"""
for attr in self.XML_ATTRS:
child = etree.SubElement(node, attr)
# Get the value.
if attr == self.ASSET_TYPE_ATTR:
value = self.asset_id.asset_type
elif attr == self.ASSET_BASENAME_ATTR:
value = self.asset_id.path
else:
value = getattr(self, attr)
# Format the value.
if isinstance(value, bool):
value = "true" if value else "false"
elif isinstance(value, datetime):
value = value.isoformat()
elif isinstance(value, dict):
value = json.dumps(value)
else:
value = unicode(value)
child.text = value
@staticmethod
@contract(node='AssetsElement', assets=list)
def add_all_assets_as_xml(node, assets):
"""
Take a list of AssetMetadata objects. Add them all to the node.
The node should already be created as a top-level "assets" element.
"""
for asset in assets:
asset_node = etree.SubElement(node, "asset")
asset.to_xml(asset_node)
class CourseAssetsFromStorage(object):
"""
Wrapper class for asset metadata lists returned from modulestore storage.
"""
@contract(course_id='CourseKey', asset_md=dict)
def __init__(self, course_id, doc_id, asset_md):
"""
Params:
course_id: Course ID for which the asset metadata is stored.
doc_id: ObjectId of MongoDB document
asset_md: Dict with asset types as keys and lists of storable asset metadata as values.
"""
self.course_id = course_id
self._doc_id = doc_id
self.asset_md = asset_md
@property
def doc_id(self):
"""
Returns the ID associated with the MongoDB document which stores these course assets.
"""
return self._doc_id
def setdefault(self, item, default=None):
"""
Provides dict-equivalent setdefault functionality.
"""
return self.asset_md.setdefault(item, default)
def __getitem__(self, item):
return self.asset_md[item]
def __delitem__(self, item):
del self.asset_md[item]
def __len__(self):
return len(self.asset_md)
def __setitem__(self, key, value):
self.asset_md[key] = value
def get(self, item, default=None):
"""
Provides dict-equivalent get functionality.
"""
return self.asset_md.get(item, default)
def iteritems(self):
"""
Iterates over the items of the asset dict.
"""
return self.asset_md.iteritems()
| agpl-3.0 | -4,977,655,993,738,249,000 | 8,728,642,589,141,720,000 | 38.622517 | 128 | 0.587665 | false |
michaelpri10/WelcomeBot | google_search.py | 2 | 1336 | import urllib2
import json
def google_search(search_term):
"""
Searches for a `search_term` which should be a string or a value convertable to string.
Parameters:
- str `search_term`: a string to search for
Returns a tuple (on success):
- first value is a list of search results for the `search_term` returned by Google API
- second value is a Google Search UI URL, where more results can be obtained
Returns False (on failure).
--
Authors:
- michaelpri10
- Jacob-Gray
- Kubo2
"""
# The request also includes the userip parameter which provides the end
# user's IP address. Doing so will help distinguish this legitimate
# server-side traffic from traffic which doesn't come from an end-user.
search_term = search_term.encode('ascii', errors='replace')
url = "https://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=%s&userip=USERS-IP-ADDRESS" % search_term
request = urllib2.Request(url, None)
response = urllib2.urlopen(request)
# Process the JSON string.
results = json.load(response)
# now have some fun with the results...
if len(results["responseData"]["results"]) > 0:
return results["responseData"]["results"], results["responseData"]["cursor"]["moreResultsUrl"]
return False
| mit | 1,422,961,080,709,298,400 | -8,989,898,913,372,973,000 | 31.585366 | 113 | 0.671407 | false |
snowmantw/AutobahnTestSuite | autobahntestsuite/autobahntestsuite/case/case7_3_2.py | 14 | 1535 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case7_3_2(Case):
DESCRIPTION = """Send a close frame with payload length 1"""
EXPECTATION = """Clean close with protocol error or drop TCP."""
def init(self):
self.suppressClose = True
def onConnectionLost(self, failedByMe):
Case.onConnectionLost(self, failedByMe)
if self.behaviorClose == Case.WRONG_CODE:
self.behavior = Case.FAILED
self.passed = False
self.result = self.resultClose
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_PROTOCOL_ERROR],"requireClean":False}
self.p.sendCloseFrame(reasonUtf8 = "a")
self.p.killAfter(1)
| apache-2.0 | 7,904,830,362,449,026,000 | -2,058,642,065,055,524,400 | 34.547619 | 121 | 0.601954 | false |
colemanja91/PyEloqua-Examples | venv/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/requirements.py | 454 | 4355 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pkg_resources.extern.pyparsing import Literal as L # noqa
from pkg_resources.extern.six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = \
NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
"Invalid requirement, parse error at \"{0!r}\"".format(
requirement_string[e.loc:e.loc + 8]))
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc):
raise InvalidRequirement("Invalid URL given")
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
| gpl-2.0 | 1,079,475,958,577,169,800 | -1,439,867,516,446,866,400 | 33.291339 | 98 | 0.665901 | false |
dpo/nlpy | nlpy/model/adolcmodel.py | 3 | 9361 | from nlpy.model import NLPModel
from nlpy.krylov import SimpleLinearOperator
import adolc
import numpy as np
has_colpack = False
class AdolcModel(NLPModel):
"""
A class to represent optimization problems in which derivatives
are computed via algorithmic differentiation through ADOL-C.
See the documentation of `NLPModel` for further information.
"""
# Count the number of instances of this class to generate
# non-conflicting tape ids. Must be a mutable type.
__NUM_INSTANCES = [-1]
def __init__(self, n=0, m=0, name='Adolc-Generic', **kwargs):
NLPModel.__init__(self, n, m, name, **kwargs)
self.__class__.__NUM_INSTANCES[0] += 1
# Trace objective and constraint functions.
self._obj_trace_id = None
self._trace_obj(self.x0)
self._con_trace_id = None
if self.m > 0: self._trace_con(self.x0)
self.first_sparse_hess_eval = True
self.first_sparse_jac_eval = True
def _get_trace_id(self):
"Return an available trace id."
return 100*self.__NUM_INSTANCES[0]
def get_obj_trace_id(self):
"Return the trace id for the objective function."
return self._obj_trace_id
def get_con_trace_id(self):
"Return the trace id for the constraints."
return self._con_trace_id
def _trace_obj(self, x):
if self._obj_trace_id is None:
self._obj_trace_id = self._get_trace_id()
adolc.trace_on(self._obj_trace_id)
x = adolc.adouble(x)
adolc.independent(x)
y = self.obj(x)
adolc.dependent(y)
adolc.trace_off()
def _trace_con(self, x):
if self._con_trace_id is None and self.m > 0:
self._con_trace_id = self._get_trace_id() + 1
adolc.trace_on(self._con_trace_id)
x = adolc.adouble(x)
adolc.independent(x)
y = self.cons(x)
adolc.dependent(y)
adolc.trace_off()
def _adolc_obj(self, x):
"Evaluate the objective function from the ADOL-C tape."
return adolc.function(self._obj_trace_id, x)
def grad(self, x, **kwargs):
"Evaluate the objective gradient at x."
return self._adolc_grad(x, **kwargs)
def _adolc_grad(self, x, **kwargs):
"Evaluate the objective gradient from the ADOL-C tape."
return adolc.gradient(self._obj_trace_id, x)
def hess(self, x, z, **kwargs):
"Return the Hessian of the objective at x."
if has_colpack:
return self.sparse_hess(x, z, **kwargs)
return self.dense_hess(x, z, **kwargs)
def dense_hess(self, x, z, **kwargs):
"Return the Hessian of the objective at x in dense format."
return adolc.hessian(self._obj_trace_id, x)
def hprod(self, x, z, v, **kwargs):
"Return the Hessian-vector product at x with v."
return adolc.hess_vec(self._obj_trace_id, x, v)
def sparse_hess(self, x, z, **kwargs):
"Return the Hessian of the objective at x in sparse format."
options = np.zeros(2,dtype=int)
if self.first_sparse_hess_eval:
nnz, rind, cind, values = \
adolc.colpack.sparse_hess_no_repeat(self._obj_trace_id,
x,
options=options)
self.nnzH = nnz
self.hess_rind = rind
self.hess_cind = cind
self.hess_values = values
self.first_sparse_hess_eval = False
return rind, cind, values
else:
return adolc.colpack.sparse_hess_repeat(self._obj_trace_id,
x,
self.hess_rind,
self.hess_cind,
self.hess_values)
def _adolc_cons(self, x, **kwargs):
"Evaluate the constraints from the ADOL-C tape."
return adolc.function(self._con_trace_id, x)
def jac(self, x, **kwargs):
"Return constraints Jacobian at x."
if has_colpack:
return self.sparse_jac(x, **kwargs)
return self.dense_jac(x, **kwargs)
def dense_jac(self, x, **kwargs):
"Return constraints Jacobian at x in dense format."
return self._adolc_jac(x, **kwargs)
def _adolc_jac(self, x, **kwargs):
"Evaluate the constraints Jacobian from the ADOL-C tape."
return adolc.jacobian(self._con_trace_id, x)
def sparse_jac(self, x, **kwargs):
"Return constraints Jacobian at x in sparse format."
[nnz, rind, cind, values] =sparse_jac_no_repeat(tape_tag, x, options)
options = np.zeros(4,dtype=int)
if self.first_sparse_jac_eval:
nnz, rind, cind, values = \
adolc.colpack.sparse_jac_no_repeat(self._con_trace_id,
x,
options=options)
self.nnzJ = nnz
self.jac_rind = rind
self.jac_cind = cind
self.jac_values = values
self.first_sparse_jac_eval = False
return rind, cind, values
else:
return adolc.colpack.sparse_jac_repeat(self._jac_trace_id,
x,
self.jac_rind,
self.jac_cind,
self.jac_values)
def jac_vec(self, x, v, **kwargs):
"Return the product of v with the Jacobian at x."
return adolc.jac_vec(self._con_trace_id, x, v)
def vec_jac(self, x, v, **kwargs):
"Return the product of v with the transpose Jacobian at x."
return adolc.vec_jac(self._con_trace_id, x, v)
def get_jac_linop(self, x, **kwargs):
"Return the Jacobian at x as a linear operator."
J = SimpleLinearOperator(self.n, self.m,
lambda v: self.jac_vec(x,v),
matvec_transp=lambda v: self.vec_jac(x,v),
symmetric=False)
return J
if __name__ == '__main__':
from nlpy.optimize.solvers.lbfgs import LBFGSFramework
from nlpy.optimize.solvers.ldfp import LDFPTrunkFramework
from nlpy.optimize.solvers.trunk import TrunkFramework
from nlpy.optimize.tr.trustregion import TrustRegionFramework as TR
from nlpy.optimize.tr.trustregion import TrustRegionCG as TRSolver
import nlpy.tools.logs
import logging, sys
# Define a few problems.
class AdolcRosenbrock(AdolcModel):
def obj(self, x, **kwargs):
return np.sum( 100*(x[1:] - x[:-1]**2)**2 + (1 - x[:-1])**2 )
class AdolcHS7(AdolcModel):
def obj(self, x, **kwargs):
return np.log(1 + x[0]**2) - x[1]
def cons(self, x, **kwargs):
return (1 + x[0]**2)**2 + x[1]**2 - 4
nvar = 5
rosenbrock = AdolcRosenbrock(n=nvar, name='Rosenbrock', x0=-np.ones(nvar))
hs7 = AdolcHS7(n=2, m=1, name='HS7', x0=2*np.ones(2))
nlp = hs7
g = nlp.grad(nlp.x0)
H = nlp.hess(nlp.x0, nlp.x0)
#H_sparse = nlp.sparse_hess(nlp.x0, nlp.x0)
print 'number of variables: ', nlp.n
print 'initial guess: ', nlp.x0
print 'f(x0) = ', nlp.obj(nlp.x0)
print 'g(x0) = ', g
print 'H(x0) = ', H
#print 'H_sparse(x0) = ', H_sparse
if nlp.m > 0 :
print 'number of constraints: ', nlp.m
c = nlp.cons(nlp.x0)
J = nlp.jac(nlp.x0)
v = np.array([-1.,-1.])
w = np.array([2])
print 'c(x0) = ', c
print 'J(x0) = ', J
print 'J(x0) * [-1,1] = ', nlp.jac_vec(nlp.x0, v)
print 'J(x0).T * [-2] = ', nlp.vec_jac(nlp.x0, w)
# # Solve with linesearch-based L-BFGS method.
# lbfgs = LBFGSFramework(nlp, npairs=5, scaling=True, silent=False)
# lbfgs.solve()
# # Create root logger.
# log = logging.getLogger('adolcmodel')
# log.setLevel(logging.INFO)
# fmt = logging.Formatter('%(name)-15s %(levelname)-8s %(message)s')
# hndlr = logging.StreamHandler(sys.stdout)
# hndlr.setFormatter(fmt)
# log.addHandler(hndlr)
# # Configure the subproblem solver logger
# nlpy.tools.logs.config_logger('adolcmodel.ldfp',
# filemode='w',
# stream=sys.stdout)
# tr = TR(Delta=1.0, eta1=0.05, eta2=0.9, gamma1=0.25, gamma2=2.5)
# # Solve with trust-region-based L-DFP method.
# # ldfp = LDFPTrunkFramework(nlp, tr, TRSolver,
# # ny=True, monotone=False,
# # logger_name='adolcmodel.ldfp')
# # ldfp.TR.Delta = 0.1 * np.linalg.norm(g) # Reset initial trust-region radius
# # ldfp.Solve()
# # Solve with trust-region-based method.
# trnk = TrunkFramework(nlp, tr, TRSolver,
# ny=True, monotone=False,
# logger_name='adolcmodel.ldfp')
# trnk.TR.Delta = 0.1 * np.linalg.norm(g) # Reset initial trust-region radius
# trnk.Solve()
| gpl-3.0 | 1,746,035,733,642,238,500 | -7,736,597,658,226,962,000 | 32.195035 | 90 | 0.536054 | false |
pshen/ansible | test/units/mock/yaml_helper.py | 209 | 5267 | import io
import yaml
from ansible.module_utils.six import PY3
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.dumper import AnsibleDumper
class YamlTestUtils(object):
"""Mixin class to combine with a unittest.TestCase subclass."""
def _loader(self, stream):
"""Vault related tests will want to override this.
Vault cases should setup a AnsibleLoader that has the vault password."""
return AnsibleLoader(stream)
def _dump_stream(self, obj, stream, dumper=None):
"""Dump to a py2-unicode or py3-string stream."""
if PY3:
return yaml.dump(obj, stream, Dumper=dumper)
else:
return yaml.dump(obj, stream, Dumper=dumper, encoding=None)
def _dump_string(self, obj, dumper=None):
"""Dump to a py2-unicode or py3-string"""
if PY3:
return yaml.dump(obj, Dumper=dumper)
else:
return yaml.dump(obj, Dumper=dumper, encoding=None)
def _dump_load_cycle(self, obj):
# Each pass though a dump or load revs the 'generation'
# obj to yaml string
string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper)
# wrap a stream/file like StringIO around that yaml
stream_from_object_dump = io.StringIO(string_from_object_dump)
loader = self._loader(stream_from_object_dump)
# load the yaml stream to create a new instance of the object (gen 2)
obj_2 = loader.get_data()
# dump the gen 2 objects directory to strings
string_from_object_dump_2 = self._dump_string(obj_2,
dumper=AnsibleDumper)
# The gen 1 and gen 2 yaml strings
self.assertEquals(string_from_object_dump, string_from_object_dump_2)
# the gen 1 (orig) and gen 2 py object
self.assertEquals(obj, obj_2)
# again! gen 3... load strings into py objects
stream_3 = io.StringIO(string_from_object_dump_2)
loader_3 = self._loader(stream_3)
obj_3 = loader_3.get_data()
string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper)
self.assertEquals(obj, obj_3)
# should be transitive, but...
self.assertEquals(obj_2, obj_3)
self.assertEquals(string_from_object_dump, string_from_object_dump_3)
def _old_dump_load_cycle(self, obj):
'''Dump the passed in object to yaml, load it back up, dump again, compare.'''
stream = io.StringIO()
yaml_string = self._dump_string(obj, dumper=AnsibleDumper)
self._dump_stream(obj, stream, dumper=AnsibleDumper)
yaml_string_from_stream = stream.getvalue()
# reset stream
stream.seek(0)
loader = self._loader(stream)
# loader = AnsibleLoader(stream, vault_password=self.vault_password)
obj_from_stream = loader.get_data()
stream_from_string = io.StringIO(yaml_string)
loader2 = self._loader(stream_from_string)
# loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password)
obj_from_string = loader2.get_data()
stream_obj_from_stream = io.StringIO()
stream_obj_from_string = io.StringIO()
if PY3:
yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper)
yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper)
else:
yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None)
yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None)
yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue()
yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue()
stream_obj_from_stream.seek(0)
stream_obj_from_string.seek(0)
if PY3:
yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper)
yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper)
else:
yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None)
yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None)
assert yaml_string == yaml_string_obj_from_stream
assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream ==
yaml_string_stream_obj_from_string)
assert obj == obj_from_stream
assert obj == obj_from_string
assert obj == yaml_string_obj_from_stream
assert obj == yaml_string_obj_from_string
assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
return {'obj': obj,
'yaml_string': yaml_string,
'yaml_string_from_stream': yaml_string_from_stream,
'obj_from_stream': obj_from_stream,
'obj_from_string': obj_from_string,
'yaml_string_obj_from_string': yaml_string_obj_from_string}
| gpl-3.0 | -4,435,004,946,449,504,000 | 4,802,518,374,375,713,000 | 42.528926 | 130 | 0.639453 | false |
c4mb0t/django-setman | testproject/core/migrations/0001_initial.py | 2 | 4372 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('core_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='profile', unique=True, to=orm['auth.User'])),
('role', self.gf('django.db.models.fields.CharField')(default='writer', max_length=32)),
))
db.send_create_signal('core', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('core_userprofile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'writer'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['core']
| bsd-3-clause | -4,791,850,482,479,226,000 | -484,877,339,361,180,900 | 60.577465 | 182 | 0.558554 | false |
andela-ifageyinbo/django | tests/m2m_intermediary/tests.py | 381 | 1334 | from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from django.utils import six
from .models import Article, Reporter, Writer
class M2MIntermediaryTests(TestCase):
def test_intermeiary(self):
r1 = Reporter.objects.create(first_name="John", last_name="Smith")
r2 = Reporter.objects.create(first_name="Jane", last_name="Doe")
a = Article.objects.create(
headline="This is a test", pub_date=datetime(2005, 7, 27)
)
w1 = Writer.objects.create(reporter=r1, article=a, position="Main writer")
w2 = Writer.objects.create(reporter=r2, article=a, position="Contributor")
self.assertQuerysetEqual(
a.writer_set.select_related().order_by("-position"), [
("John Smith", "Main writer"),
("Jane Doe", "Contributor"),
],
lambda w: (six.text_type(w.reporter), w.position)
)
self.assertEqual(w1.reporter, r1)
self.assertEqual(w2.reporter, r2)
self.assertEqual(w1.article, a)
self.assertEqual(w2.article, a)
self.assertQuerysetEqual(
r1.writer_set.all(), [
("John Smith", "Main writer")
],
lambda w: (six.text_type(w.reporter), w.position)
)
| bsd-3-clause | 7,173,531,507,055,078,000 | -6,275,664,575,981,144,000 | 31.536585 | 82 | 0.602699 | false |
sbyoun/i-mapreduce | src/contrib/hod/hodlib/RingMaster/idleJobTracker.py | 182 | 9106 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os, re, time
from hodlib.Common.threads import loop, func
from hodlib.Common.threads import simpleCommand
from hodlib.Common.util import get_exception_string, hadoopVersion
class HadoopJobStatus:
"""This class represents the status of a single Hadoop job"""
def __init__(self, jobId, status):
self.__jobId = jobId
self.__status = status
def getJobId(self):
return self.__jobId
def getStatus(self):
return self.__status
class HadoopClientException(Exception):
"""This class represents an exception that is raised when we fail in
running the job client."""
def __init__(self, errorCode):
self.errorCode = errorCode
class JobTrackerMonitor:
"""This class monitors the JobTracker of an allocated cluster
periodically to detect whether it is idle. If it is found
to be idle for more than a configured limit, it calls back
registered handlers who can act upon the idle cluster."""
def __init__(self, log, idleJTHandler, interval, limit,
hadoopDir, javaHome, servInfoProvider):
self.__log = log
self.__idlenessLimit = limit
self.__idleJobTrackerHandler = idleJTHandler
self.__hadoopDir = hadoopDir
hadoopPath = os.path.join(self.__hadoopDir, "bin", "hadoop")
#hadoop directory can be from pkgs or a temp location like tarball. Verify once.
if not os.path.exists(hadoopPath):
raise Exception('Invalid Hadoop path specified: %s' % hadoopPath)
self.__javaHome = javaHome
# Note that when this object is created, we don't yet know the JT URL.
# The service info provider will be polled until we get the URL.
self.__serviceInfoProvider = servInfoProvider
self.__jobCountRegExp = re.compile("([0-9]+) jobs currently running.*")
self.__jobStatusRegExp = re.compile("(\S+)\s+(\d)\s+\d+\s+\S+$")
self.__firstIdleTime = 0
self.__hadoop15Version = { 'major' : '0', 'minor' : '15' }
#Assumption: we are not going to support versions older than 0.15 for Idle Job tracker.
if not self.__isCompatibleHadoopVersion(self.__hadoop15Version):
raise Exception('Incompatible Hadoop Version: Cannot check status')
self.__stopFlag = False
self.__jtURLFinderThread = func(name='JTURLFinderThread', functionRef=self.getJobTrackerURL)
self.__jtMonitorThread = loop(name='JTMonitorThread', functionRef=self.monitorJobTracker,
sleep=interval)
self.__jobTrackerURL = None
def start(self):
"""This method starts a thread that will determine the JobTracker URL"""
self.__jtURLFinderThread.start()
def stop(self):
self.__log.debug('Joining the monitoring thread.')
self.__stopFlag = True
if self.__jtMonitorThread.isAlive():
self.__jtMonitorThread.join()
self.__log.debug('Joined the monitoring thread.')
def getJobTrackerURL(self):
"""This method periodically checks the service info provider for the JT URL"""
self.__jobTrackerURL = self.__serviceInfoProvider.getServiceAddr('mapred')
while not self.__stopFlag and not self.__isValidJobTrackerURL():
time.sleep(10)
if not self.__stopFlag:
self.__jobTrackerURL = self.__serviceInfoProvider.getServiceAddr('mapred')
else:
break
if self.__isValidJobTrackerURL():
self.__log.debug('Got URL %s. Starting monitoring' % self.__jobTrackerURL)
self.__jtMonitorThread.start()
def monitorJobTracker(self):
"""This method is periodically called to monitor the JobTracker of the cluster."""
try:
if self.__isIdle():
if self.__idleJobTrackerHandler:
self.__log.info('Detected cluster as idle. Calling registered callback handler.')
self.__idleJobTrackerHandler.handleIdleJobTracker()
except:
self.__log.debug('Exception while monitoring job tracker. %s' % get_exception_string())
def getJobsStatus(self):
"""This method should return the status of all jobs that are run on the HOD allocated
hadoop cluster"""
jobStatusList = []
try:
hadoop16Version = { 'major' : '0', 'minor' : '16' }
if self.__isCompatibleHadoopVersion(hadoop16Version):
jtStatusCommand = self.__initStatusCommand(option='-list all')
jtStatusCommand.start()
jtStatusCommand.wait()
jtStatusCommand.join()
if jtStatusCommand.exit_code() == 0:
for line in jtStatusCommand.output():
jobStatus = self.__extractJobStatus(line)
if jobStatus is not None:
jobStatusList.append(jobStatus)
except:
self.__log.debug('Exception while getting job statuses. %s' % get_exception_string())
return jobStatusList
def __isValidJobTrackerURL(self):
"""This method checks that the passed in URL is not one of the special case strings
returned by the getServiceAddr API"""
return ((self.__jobTrackerURL != None) and (self.__jobTrackerURL != 'not found') \
and (not self.__jobTrackerURL.startswith('Error')))
def __extractJobStatus(self, line):
"""This method parses an output line from the job status command and creates
the JobStatus object if there is a match"""
jobStatus = None
line = line.strip()
jsMatch = self.__jobStatusRegExp.match(line)
if jsMatch:
jobStatus = HadoopJobStatus(jsMatch.group(1), int(jsMatch.group(2)))
return jobStatus
def __isIdle(self):
"""This method checks if the JobTracker is idle beyond a certain limit."""
jobCount = 0
err = False
try:
jobCount = self.__getJobCount()
except HadoopClientException, hce:
self.__log.debug('HadoopClientException handled in getting job count. \
Error code: %s' % hce.errorCode)
err = True
if (jobCount==0) or err:
if self.__firstIdleTime == 0:
#detecting idleness for the first time
self.__firstIdleTime = time.time()
else:
if ((time.time()-self.__firstIdleTime) >= self.__idlenessLimit):
self.__log.info('Idleness limit crossed for cluster')
return True
else:
# reset idleness time
self.__firstIdleTime = 0
return False
def __getJobCount(self):
"""This method executes the hadoop job -list command and parses the output to detect
the number of running jobs."""
# We assume here that the poll interval is small enough to detect running jobs.
# If jobs start and stop within the poll interval, the cluster would be incorrectly
# treated as idle. Hadoop 2266 will provide a better mechanism than this.
jobs = -1
jtStatusCommand = self.__initStatusCommand()
jtStatusCommand.start()
jtStatusCommand.wait()
jtStatusCommand.join()
if jtStatusCommand.exit_code() == 0:
for line in jtStatusCommand.output():
match = self.__jobCountRegExp.match(line)
if match:
jobs = int(match.group(1))
elif jtStatusCommand.exit_code() == 1:
# for now, exit code 1 comes for any exception raised by JobClient. If hadoop gets
# to differentiate and give more granular exit codes, we can check for those errors
# corresponding to network errors etc.
raise HadoopClientException(jtStatusCommand.exit_code())
return jobs
def __isCompatibleHadoopVersion(self, expectedVersion):
"""This method determines whether the version of hadoop being used is one that
is higher than the expectedVersion.
This can be used for checking if a particular feature is available or not"""
ver = hadoopVersion(self.__hadoopDir, self.__javaHome, self.__log)
ret = False
if (ver['major']!=None) and (int(ver['major']) >= int(expectedVersion['major'])) \
and (ver['minor']!=None) and (int(ver['minor']) >= int(expectedVersion['minor'])):
ret = True
return ret
def __initStatusCommand(self, option="-list"):
"""This method initializes the command to run to check the JT status"""
cmd = None
hadoopPath = os.path.join(self.__hadoopDir, 'bin', 'hadoop')
cmdStr = "%s job -jt %s" % (hadoopPath, self.__jobTrackerURL)
cmdStr = "%s %s" % (cmdStr, option)
self.__log.debug('cmd str %s' % cmdStr)
env = os.environ
env['JAVA_HOME'] = self.__javaHome
cmd = simpleCommand('HadoopStatus', cmdStr, env)
return cmd
| apache-2.0 | 6,800,298,680,811,124,000 | -1,328,892,016,144,282,000 | 40.770642 | 96 | 0.677795 | false |
MichaelVL/osm-analytic-tracker | HumanTime.py | 1 | 2895 | import datetime, pytz
import re
import tzlocal
def date2human(when, slack_secs=180):
""" Convert timestamp to human-friendly times, like '3 minutes ago' """
if not when:
return None
now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
diff = now-when
secs = diff.seconds
days = diff.days
if days > 0:
return str(when)
if secs >= 3600:
if (secs<7200):
return '1 hour ago'
else:
return str(secs/3600)+' hours ago'
else:
if (secs <slack_secs):
return 'a moment ago'
else:
return str(secs/60)+' minutes ago'
def human2date(when, past=True):
"""UTC timestamps from human 'encoding' like '2 hours ago'. Human
timestamps are relative to local time zone."""
# This is not millisecond precise...
local_tz = tzlocal.get_localzone()
now = datetime.datetime.now().replace(tzinfo=local_tz)
utcnow = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
if when == 'now':
return utcnow
if when == 'today':
want = now.replace(hour=0, minute=0, second=0, microsecond=0)
if not past:
want += datetime.timedelta(days=1)
newtime = utcnow-(now-want)
return newtime
if when == 'yesterday':
want = now.replace(hour=0, minute=0, second=0, microsecond=0)
newtime = utcnow-(now-want)
return newtime-datetime.timedelta(days=1)
in_past = in_future = False
if when.endswith(' ago'):
in_past = True
if when.startswith('in ') or when.startswith('after '):
in_future = True
if in_past and in_future:
raise TypeError('Time cannot be in the past and in the future')
r = re.compile('(\d+) days?( ago)?')
m = r.match(when)
if m:
td = datetime.timedelta(days=float(m.group(1)))
return utcnow-td
r = re.compile('(\d+) hours?( ago)?')
m = r.match(when)
if m:
td = datetime.timedelta(hours=float(m.group(1)))
return utcnow-td
r = re.compile('(\d+) minutes?( ago)?')
m = r.match(when)
if m:
td = datetime.timedelta(minutes=float(m.group(1)))
return utcnow-td
formats = ['%H:%M']
for fmt in formats:
try:
td = datetime.datetime.strptime(when, fmt).replace(tzinfo=local_tz)
new = now
if '%H' in fmt:
new = new.replace(hour=td.hour)
if '%M' in fmt:
new = new.replace(minute=td.minute)
if '%S' in fmt:
new = new.replace(second=td.second)
else:
new = new.replace(second=0)
if '%d' in fmt:
new = new.replace(day=td.day)
return new
except ValueError:
pass
return datetime.datetime.strptime(when, '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=pytz.utc)
| gpl-2.0 | 2,509,113,189,657,437,000 | -9,133,068,294,088,644,000 | 30.813187 | 90 | 0.562003 | false |
apark263/tensorflow | tensorflow/python/kernel_tests/distributions/multinomial_test.py | 22 | 14730 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import multinomial
from tensorflow.python.platform import test
class MultinomialTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_v1_only("b/120545219")
def testSimpleShapes(self):
with self.cached_session():
p = [.1, .3, .6]
dist = multinomial.Multinomial(total_count=1., probs=p)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
@test_util.run_v1_only("b/120545219")
def testComplexShapes(self):
with self.cached_session():
p = 0.5 * np.ones([3, 2, 2], dtype=np.float32)
n = [[3., 2], [4, 5], [6, 7]]
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
@test_util.run_v1_only("b/120545219")
def testN(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual((2, 1), dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count.eval())
@test_util.run_v1_only("b/120545219")
def testP(self):
p = [[0.1, 0.2, 0.7]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=3., probs=p)
self.assertEqual((1, 3), dist.probs.get_shape())
self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.probs.eval())
@test_util.run_v1_only("b/120545219")
def testLogits(self):
p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32)
logits = np.log(p) - 50.
with self.cached_session():
multinom = multinomial.Multinomial(total_count=3., logits=logits)
self.assertEqual((1, 3), multinom.probs.get_shape())
self.assertEqual((1, 3), multinom.logits.get_shape())
self.assertAllClose(p, multinom.probs.eval())
self.assertAllClose(logits, multinom.logits.eval())
@test_util.run_v1_only("b/120545219")
def testPmfUnderflow(self):
logits = np.array([[-200, 0]], dtype=np.float32)
with self.cached_session():
dist = multinomial.Multinomial(total_count=1., logits=logits)
lp = dist.log_prob([1., 0.]).eval()[0]
self.assertAllClose(-200, lp, atol=0, rtol=1e-6)
@test_util.run_v1_only("b/120545219")
def testPmfandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
with self.assertRaisesOpError("must be non-negative"):
dist.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
dist.prob([3., 3, 0]).eval()
@test_util.run_v1_only("b/120545219")
def testPmfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
# No errors with integer n.
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=True)
multinom.prob([2., 1, 2]).eval()
multinom.prob([3., 0, 2]).eval()
# Counts don't sum to n.
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
multinom.prob([2., 3, 2]).eval()
# Counts are non-integers.
x = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(
"cannot contain fractional components."):
multinom.prob(x).eval(feed_dict={x: [1.0, 2.5, 1.5]})
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=False)
multinom.prob([1., 2., 2.]).eval()
# Non-integer arguments work.
multinom.prob([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = [0.5, 0.5]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(0.5, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = [0.1, 0.9]
counts = [3., 2]
dist = multinomial.Multinomial(total_count=5., probs=p)
pmf = dist.prob(counts)
# 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000.
self.assertAllClose(81. / 10000, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9]]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [0.1, 0.9]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [[1., 0]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual(pmf.get_shape(), (2))
def testPmfShapeCountsStretchedN(self):
with self.cached_session():
# [2, 2, 2]
p = [[[0.1, 0.9], [0.1, 0.9]], [[0.7, 0.3], [0.7, 0.3]]]
# [2, 2]
n = [[3., 3], [3, 3]]
# [2]
counts = [2., 1]
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
self.evaluate(pmf)
self.assertEqual(pmf.get_shape(), (2, 2))
def testPmfShapeCountsPStretchedN(self):
with self.cached_session():
p = [0.1, 0.9]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
self.evaluate(pmf)
self.assertEqual((4, 3), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testMultinomialMean(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_means = 5 * np.array(p, dtype=np.float32)
self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean().eval())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovariance(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_covariances = [[9. / 20, -1 / 10, -7 / 20],
[-1 / 10, 4 / 5, -7 / 10],
[-7 / 20, -7 / 10, 21 / 20]]
self.assertEqual((3, 3), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovarianceBatch(self):
with self.cached_session():
# Shape [2]
n = [5.] * 2
# Shape [4, 1, 2]
p = [[[0.1, 0.9]], [[0.1, 0.9]]] * 2
dist = multinomial.Multinomial(total_count=n, probs=p)
# Shape [2, 2]
inner_var = [[9. / 20, -9 / 20], [-9 / 20, 9 / 20]]
# Shape [4, 2, 2, 2]
expected_covariances = [[inner_var, inner_var]] * 4
self.assertEqual((4, 2, 2, 2), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
def testCovarianceMultidimensional(self):
# Shape [3, 5, 4]
p = np.random.dirichlet([.25, .25, .25, .25], [3, 5]).astype(np.float32)
# Shape [6, 3, 3]
p2 = np.random.dirichlet([.3, .3, .4], [6, 3]).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1]).astype(np.float32)
with self.cached_session():
dist = multinomial.Multinomial(ns, p)
dist2 = multinomial.Multinomial(ns2, p2)
covariance = dist.covariance()
covariance2 = dist2.covariance()
self.assertEqual((3, 5, 4, 4), covariance.get_shape())
self.assertEqual((6, 3, 3, 3), covariance2.get_shape())
@test_util.run_v1_only("b/120545219")
def testCovarianceFromSampling(self):
# We will test mean, cov, var, stddev on a DirichletMultinomial constructed
# via broadcast between alpha, n.
theta = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
theta /= np.sum(theta, 1)[..., array_ops.newaxis]
n = np.array([[10., 9.], [8., 7.], [6., 5.]], dtype=np.float32)
with self.cached_session() as sess:
# batch_shape=[3, 2], event_shape=[3]
dist = multinomial.Multinomial(n, theta)
x = dist.sample(int(1000e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0.01, rtol=0.01)
self.assertAllClose(sample_cov_, analytic_cov, atol=0.01, rtol=0.01)
self.assertAllClose(sample_var_, analytic_var, atol=0.01, rtol=0.01)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.01, rtol=0.01)
@test_util.run_v1_only("b/120545219")
def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess:
dist = multinomial.Multinomial(
total_count=[7., 6., 5.],
logits=math_ops.log(2. * self._rng.rand(4, 3, 2).astype(np.float32)))
n = int(3e4)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
# Cyclically rotate event dims left.
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.10)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
@test_util.run_v1_only("b/120545219")
def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess:
dist = multinomial.Multinomial(
total_count=5.,
logits=math_ops.log(2. * self._rng.rand(4).astype(np.float32)))
n = int(5e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean # Already transposed to [n, 2].
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.10)
self.assertAllEqual([4, 4], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
def testNotReparameterized(self):
total_count = constant_op.constant(5.0)
p = constant_op.constant([0.2, 0.6])
with backprop.GradientTape() as tape:
tape.watch(total_count)
tape.watch(p)
dist = multinomial.Multinomial(
total_count=total_count,
probs=p)
samples = dist.sample(100)
grad_total_count, grad_p = tape.gradient(samples, [total_count, p])
self.assertIsNone(grad_total_count)
self.assertIsNone(grad_p)
if __name__ == "__main__":
test.main()
| apache-2.0 | -8,480,317,240,997,217,000 | 8,059,364,734,513,101,000 | 37.661417 | 80 | 0.610998 | false |
wikimedia/operations-debs-python-diamond | src/collectors/mongodb/mongodb.py | 5 | 12712 | # coding=utf-8
"""
Collects all number values from the db.serverStatus() command, other
values are ignored.
#### Dependencies
* pymongo
#### Example Configuration
MongoDBCollector.conf
```
enabled = True
hosts = localhost:27017, alias1@localhost:27018, etc
```
"""
import diamond.collector
from diamond.collector import str_to_bool
import re
import zlib
try:
import pymongo
pymongo # workaround for pyflakes issue #13
except ImportError:
pymongo = None
try:
from pymongo import ReadPreference
ReadPreference # workaround for pyflakes issue #13
except ImportError:
ReadPreference = None
class MongoDBCollector(diamond.collector.Collector):
MAX_CRC32 = 4294967295
def __init__(self, *args, **kwargs):
self.__totals = {}
super(MongoDBCollector, self).__init__(*args, **kwargs)
def get_default_config_help(self):
config_help = super(MongoDBCollector, self).get_default_config_help()
config_help.update({
'hosts': 'Array of hostname(:port) elements to get metrics from'
'Set an alias by prefixing host:port with alias@',
'host': 'A single hostname(:port) to get metrics from'
' (can be used instead of hosts and overrides it)',
'user': 'Username for authenticated login (optional)',
'passwd': 'Password for authenticated login (optional)',
'databases': 'A regex of which databases to gather metrics for.'
' Defaults to all databases.',
'ignore_collections': 'A regex of which collections to ignore.'
' MapReduce temporary collections (tmp.mr.*)'
' are ignored by default.',
'collection_sample_rate': 'Only send stats for a consistent subset '
'of collections. This is applied after collections are ignored via'
' ignore_collections Sampling uses crc32 so it is consistent across'
' replicas. Value between 0 and 1. Default is 1',
'network_timeout': 'Timeout for mongodb connection (in seconds).'
' There is no timeout by default.',
'simple': 'Only collect the same metrics as mongostat.',
'translate_collections': 'Translate dot (.) to underscores (_)'
' in collection names.',
'ssl': 'True to enable SSL connections to the MongoDB server.'
' Default is False'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MongoDBCollector, self).get_default_config()
config.update({
'path': 'mongo',
'hosts': ['localhost'],
'user': None,
'passwd': None,
'databases': '.*',
'ignore_collections': '^tmp\.mr\.',
'network_timeout': None,
'simple': 'False',
'translate_collections': 'False',
'collection_sample_rate': 1,
'ssl': False
})
return config
def collect(self):
"""Collect number values from db.serverStatus()"""
if pymongo is None:
self.log.error('Unable to import pymongo')
return
hosts = self.config.get('hosts')
# Convert a string config value to be an array
if isinstance(hosts, basestring):
hosts = [hosts]
# we need this for backwards compatibility
if 'host' in self.config:
hosts = [self.config['host']]
# convert network_timeout to integer
if self.config['network_timeout']:
self.config['network_timeout'] = int(
self.config['network_timeout'])
# convert collection_sample_rate to float
if self.config['collection_sample_rate']:
self.config['collection_sample_rate'] = float(
self.config['collection_sample_rate'])
# use auth if given
if 'user' in self.config:
user = self.config['user']
else:
user = None
if 'passwd' in self.config:
passwd = self.config['passwd']
else:
passwd = None
for host in hosts:
matches = re.search('((.+)\@)?(.+)?', host)
alias = matches.group(2)
host = matches.group(3)
if alias is None:
if len(hosts) == 1:
# one host only, no need to have a prefix
base_prefix = []
else:
base_prefix = [re.sub('[:\.]', '_', host)]
else:
base_prefix = [alias]
try:
# Ensure that the SSL option is a boolean.
if type(self.config['ssl']) is str:
self.config['ssl'] = str_to_bool(self.config['ssl'])
if ReadPreference is None:
conn = pymongo.Connection(
host,
network_timeout=self.config['network_timeout'],
ssl=self.config['ssl'],
slave_okay=True
)
else:
conn = pymongo.Connection(
host,
network_timeout=self.config['network_timeout'],
ssl=self.config['ssl'],
read_preference=ReadPreference.SECONDARY,
)
except Exception, e:
self.log.error('Couldnt connect to mongodb: %s', e)
continue
# try auth
if user:
try:
conn.admin.authenticate(user, passwd)
except Exception, e:
self.log.error('User auth given, but could not autheticate'
+ ' with host: %s, err: %s' % (host, e))
return{}
data = conn.db.command('serverStatus')
self._publish_transformed(data, base_prefix)
if str_to_bool(self.config['simple']):
data = self._extract_simple_data(data)
self._publish_dict_with_prefix(data, base_prefix)
db_name_filter = re.compile(self.config['databases'])
ignored_collections = re.compile(self.config['ignore_collections'])
sample_threshold = self.MAX_CRC32 * self.config[
'collection_sample_rate']
for db_name in conn.database_names():
if not db_name_filter.search(db_name):
continue
db_stats = conn[db_name].command('dbStats')
db_prefix = base_prefix + ['databases', db_name]
self._publish_dict_with_prefix(db_stats, db_prefix)
for collection_name in conn[db_name].collection_names():
if ignored_collections.search(collection_name):
continue
if (self.config['collection_sample_rate'] < 1 and (
zlib.crc32(collection_name) & 0xffffffff
) > sample_threshold):
continue
collection_stats = conn[db_name].command('collstats',
collection_name)
if str_to_bool(self.config['translate_collections']):
collection_name = collection_name.replace('.', '_')
collection_prefix = db_prefix + [collection_name]
self._publish_dict_with_prefix(collection_stats,
collection_prefix)
def _publish_transformed(self, data, base_prefix):
""" Publish values of type: counter or percent """
self._publish_dict_with_prefix(data.get('opcounters', {}),
base_prefix + ['opcounters_per_sec'],
self.publish_counter)
self._publish_dict_with_prefix(data.get('opcountersRepl', {}),
base_prefix + ['opcountersRepl_per_sec'],
self.publish_counter)
self._publish_metrics(base_prefix + ['backgroundFlushing_per_sec'],
'flushes',
data.get('backgroundFlushing', {}),
self.publish_counter)
self._publish_dict_with_prefix(data.get('network', {}),
base_prefix + ['network_per_sec'],
self.publish_counter)
self._publish_metrics(base_prefix + ['extra_info_per_sec'],
'page_faults',
data.get('extra_info', {}),
self.publish_counter)
def get_dotted_value(data, key_name):
key_name = key_name.split('.')
for i in key_name:
data = data.get(i, {})
if not data:
return 0
return data
def compute_interval(data, total_name):
current_total = get_dotted_value(data, total_name)
total_key = '.'.join(base_prefix + [total_name])
last_total = self.__totals.get(total_key, current_total)
interval = current_total - last_total
self.__totals[total_key] = current_total
return interval
def publish_percent(value_name, total_name, data):
value = float(get_dotted_value(data, value_name) * 100)
interval = compute_interval(data, total_name)
key = '.'.join(base_prefix + ['percent', value_name])
self.publish_counter(key, value, time_delta=bool(interval),
interval=interval)
publish_percent('globalLock.lockTime', 'globalLock.totalTime', data)
publish_percent('indexCounters.btree.misses',
'indexCounters.btree.accesses', data)
locks = data.get('locks')
if locks:
if '.' in locks:
locks['_global_'] = locks['.']
del (locks['.'])
key_prefix = '.'.join(base_prefix + ['percent'])
db_name_filter = re.compile(self.config['databases'])
interval = compute_interval(data, 'uptimeMillis')
for db_name in locks:
if not db_name_filter.search(db_name):
continue
r = get_dotted_value(
locks,
'%s.timeLockedMicros.r' % db_name)
R = get_dotted_value(
locks,
'.%s.timeLockedMicros.R' % db_name)
value = float(r + R) / 10
if value:
self.publish_counter(
key_prefix + '.locks.%s.read' % db_name,
value, time_delta=bool(interval),
interval=interval)
w = get_dotted_value(
locks,
'%s.timeLockedMicros.w' % db_name)
W = get_dotted_value(
locks,
'%s.timeLockedMicros.W' % db_name)
value = float(w + W) / 10
if value:
self.publish_counter(
key_prefix + '.locks.%s.write' % db_name,
value, time_delta=bool(interval), interval=interval)
def _publish_dict_with_prefix(self, dict, prefix, publishfn=None):
for key in dict:
self._publish_metrics(prefix, key, dict, publishfn)
def _publish_metrics(self, prev_keys, key, data, publishfn=None):
"""Recursively publish keys"""
if not key in data:
return
value = data[key]
keys = prev_keys + [key]
if not publishfn:
publishfn = self.publish
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(keys, new_key, value)
elif isinstance(value, int) or isinstance(value, float):
publishfn('.'.join(keys), value)
elif isinstance(value, long):
publishfn('.'.join(keys), float(value))
def _extract_simple_data(self, data):
return {
'connections': data.get('connections'),
'globalLock': data.get('globalLock'),
'indexCounters': data.get('indexCounters')
}
| mit | 8,178,841,530,689,513,000 | 2,317,644,819,491,845,000 | 39.100946 | 80 | 0.503933 | false |
DataViva/dataviva-site | dataviva/apps/build_graph/views.py | 1 | 3965 | # -*- coding: utf-8 -*-
import re
from flask import Blueprint, render_template, g, jsonify, request
from dataviva.apps.general.views import get_locale
from dataviva.apps.embed.models import Build, App
from dataviva.api.rais.services import Industry as CnaeService
from dataviva.api.secex.services import Product as SecexService
from dataviva.api.hedu.services import University as HeduService
from dataviva.api.sc.services import Basic_course as ScService
from dataviva.translations.dictionary import dictionary
from sqlalchemy import not_
mod = Blueprint('build_graph', __name__,
template_folder='templates',
url_prefix='/<lang_code>/build_graph')
@mod.before_request
def before_request():
g.page_type = mod.name
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
def parse_filter_id(filter_id):
if filter_id != 'all':
return '<%>'
else:
return filter_id
@mod.route('/')
@mod.route('/<dataset>/<filter0>/<filter1>/<filter2>')
def index(dataset=None, filter0=None, filter1=None, filter2=None):
view = request.args.get('view')
graph = request.args.get('graph')
compare = request.args.get('compare')
metadata = None
build_query = Build.query.join(App).filter(
Build.dataset == dataset,
Build.filter1.like(parse_filter_id(filter1)),
Build.filter2.like(parse_filter_id(filter2)),
Build.slug2_en == view,
App.type == graph)
if graph:
build = build_query.first_or_404()
build.set_bra(filter0)
if filter1 != 'all':
build.set_filter1(filter1)
if filter2 != 'all':
build.set_filter2(filter2)
service_id = filter1 if filter1 != u'all' else None
year = ' - ' if dataset else ''
if dataset == 'rais':
year += str(CnaeService(service_id).get_year())
elif dataset == 'secex':
year += str(SecexService(service_id).year())
elif dataset == 'hedu':
year += str(HeduService(service_id).year())
elif dataset == 'sc':
year += str(ScService(service_id).course_year())
title = re.sub(r'\s\(.*\)', r'', build.title())
metadata = {
'view': title,
'graph': dictionary()[graph],
'dataset': dictionary()[dataset] + year,
}
return render_template(
'build_graph/index.html', dataset=dataset, filter0=filter0, filter1=filter1, filter2=filter2,
graph=graph, view=view, compare=compare, metadata=metadata)
def parse_filter(filter):
if filter != 'all':
return '<%s>' % filter
else:
return filter
@mod.route('/views/<dataset>/<bra>/<filter1>/<filter2>')
def views(dataset, bra, filter1, filter2):
'''/views/secex/hs/wld'''
build_query = Build.query.filter(
Build.dataset == dataset,
Build.filter1 == parse_filter(filter1),
Build.filter2 == parse_filter(filter2))
if bra != 'all':
build_query.filter(not_(Build.bra.like('all')))
views = {}
for build in build_query.all():
if bra == 'all' and build.app.type == 'compare':
break
if bra:
build.set_bra(bra)
if filter1 != 'all':
build.set_filter1(request.args.get('filter1'))
if filter2 != 'all':
build.set_filter2(request.args.get('filter2'))
title = re.sub(r'\s\(.*\)', r'', build.title())
id = build.slug2_en
if id not in views:
views[id] = {
'id': id,
'name': title,
'graphs': {},
}
views[id]['graphs'][build.app.type] = {
'url': build.url(),
'name': build.app.name()
}
return jsonify(views=views)
| mit | 2,656,323,573,984,715,300 | -2,021,363,926,349,102,800 | 26.727273 | 101 | 0.588398 | false |
polyaxon/polyaxon-api | polyaxon_lib/polyaxonfile/local_runner.py | 1 | 4049 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import atexit
import json
import os
import signal
import subprocess
import sys
import time
from six.moves import xrange
from multiprocessing import Process
from tensorflow.python.platform import tf_logging as logging
from polyaxon_schemas.polyaxonfile.polyaxonfile import PolyaxonFile
from polyaxon_schemas.polyaxonfile.specification import Specification
from polyaxon_schemas.utils import TaskType
from polyaxon_lib.polyaxonfile.manager import (
prepare_all_experiment_jobs,
start_experiment_run,
)
jobs = []
processes = []
current_run = {'finished': False, TaskType.MASTER: None}
def cleanup():
for job in jobs:
job.terminate()
job.join()
# Register cleanup function to the exit of this module
atexit.register(cleanup)
def signal_handler(*args):
for p in processes:
p.terminate()
current_run['finished'] = True
def check_master_process():
print([p.is_alive() for p in jobs])
if not current_run['master'].is_alive():
signal_handler()
cleanup()
def get_pybin():
try:
pybin = os.path.join(os.environ['VIRTUAL_ENV'], 'bin/python')
except: # pylint: disable=bare-except
pybin = sys.executable
return pybin
def run_cmd(pybin, cmd, cwd):
env_cmd = '{} {}'.format(pybin, cmd)
signal.signal(signal.SIGINT, signal_handler)
logging.info(env_cmd)
p = subprocess.Popen(env_cmd, cwd=cwd, shell=True)
processes.append(p)
_, error = p.communicate()
if error:
logging.error('{} - ERROR: '.format(error))
def create_process(env):
cmd = ("""-c \"from polyaxon_lib.polyaxonfile.local_runner import start_experiment_run;
start_experiment_run(
'{polyaxonfile}', '{experiment_id}', '{task_type}', {task_id}, '{schedule}')\"""".format(
**env))
p = Process(target=run_cmd, args=(get_pybin(), cmd, os.getcwd(),))
p.daemon = True
p.start()
jobs.append(p)
if env['task_type'] == TaskType.MASTER:
current_run[TaskType.MASTER] = p
def run_experiment(spec_config, xp):
spec = Specification.read(spec_config)
logging.info("running Experiment n: {}".format(xp))
cluster, is_distributed = spec.cluster_def
if not is_distributed:
start_experiment_run(spec, xp, TaskType.MASTER, 0, 'continuous_train_and_eval')
current_run['finished'] = True
else:
env = {
'polyaxonfile': json.dumps(spec.parsed_data),
'task_type': TaskType.MASTER,
'experiment_id': xp,
'task_id': 0,
'schedule': 'train_and_evaluate'
}
create_process(env)
for i in xrange(cluster.get(TaskType.WORKER, 0)):
env['task_id'] = i
env['task_type'] = TaskType.WORKER
env['schedule'] = 'train'
create_process(env)
for i in xrange(cluster.get(TaskType.PS, 0)):
env['task_id'] = i
env['task_type'] = TaskType.PS
env['schedule'] = 'run_std_server'
create_process(env)
for job in jobs:
job.join()
def run(polyaxonfile):
plx_file = PolyaxonFile.read(polyaxonfile)
for xp in range(plx_file.matrix_space):
run_experiment(plx_file.experiment_specs[xp], xp)
while not current_run['finished']:
check_master_process()
time.sleep(10)
current_run['finished'] = False
current_run['master'] = None
def run_all(polyaxonfile):
plx_file = PolyaxonFile.read(polyaxonfile)
for xp in range(plx_file.matrix_space):
xp_jobs = prepare_all_experiment_jobs(plx_file.experiment_specs[xp], xp)
for i, xp_job in enumerate(xp_jobs):
if i == 0:
schedule = 'train_and_evaluate'
else:
schedule = 'train'
p = Process(target=getattr(xp_job, schedule))
p.start()
jobs.append(p)
for job in jobs:
job.join()
| mit | 1,775,139,914,783,036,700 | -3,924,439,349,919,227,400 | 25.81457 | 93 | 0.61472 | false |
actuaryzhang/spark | examples/src/main/python/ml/dct_example.py | 123 | 1509 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import DCT
from pyspark.ml.linalg import Vectors
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DCTExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
(Vectors.dense([0.0, 1.0, -2.0, 3.0]),),
(Vectors.dense([-1.0, 2.0, 4.0, -7.0]),),
(Vectors.dense([14.0, -2.0, -5.0, 1.0]),)], ["features"])
dct = DCT(inverse=False, inputCol="features", outputCol="featuresDCT")
dctDf = dct.transform(df)
dctDf.select("featuresDCT").show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 | -6,823,620,537,601,908,000 | 6,690,293,816,046,402,000 | 32.533333 | 74 | 0.689861 | false |
FreeScienceCommunity/rt-thread | tools/package.py | 45 | 2209 | #
# File : package.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-04-10 Bernard First version
#
# this script is used to build group with package.json instead of SConscript
import os
from building import *
def ExtendPackageVar(package, var):
v = []
if not package.has_key(var):
return v
for item in package[var]:
v = v + [item]
return v
def BuildPackage(package):
import json
f = file(package)
package_json = f.read()
# get package.json path
cwd = os.path.dirname(package)
package = json.loads(package_json)
# check package name
if not package.has_key('name'):
return []
# get depends
depend = ExtendPackageVar(package, 'depends')
src = []
if package.has_key('source_files'):
for src_file in package['source_files']:
src_file = os.path.join(cwd, src_file)
src += Glob(src_file)
CPPPATH = []
if package.has_key('CPPPATH'):
for path in package['CPPPATH']:
if path.startswith('/') and os.path.isdir(path):
CPPPATH = CPPPATH + [path]
else:
CPPPATH = CPPPATH + [os.path.join(cwd, path)]
CPPDEFINES = ExtendPackageVar(package, 'CPPDEFINES')
objs = DefineGroup(package['name'], src, depend = depend, CPPPATH = CPPPATH, CPPDEFINES = CPPDEFINES)
return objs
| gpl-2.0 | -2,102,784,872,106,429,700 | -8,907,135,672,616,193,000 | 28.851351 | 105 | 0.651879 | false |
tomhughes/mapnik | scons/scons-time.py | 3 | 47967 | #!/usr/bin/env python
#
# scons-time - run SCons timings and collect statistics
#
# A script for running a configuration through SCons with a standard
# set of invocations to collect timing and memory statistics and to
# capture the results in a consistent set of output files for display
# and analysis.
#
#
# Copyright (c) 2001 - 2021 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "bin/scons-time.py 215860fd4f6bea67896c145660a035fad20cc41c 2021-01-19 19:32:22 bdbaddog"
import getopt
import glob
import os
import re
import shutil
import sys
import tempfile
import time
def HACK_for_exec(cmd, *args):
"""
For some reason, Python won't allow an exec() within a function
that also declares an internal function (including lambda functions).
This function is a hack that calls exec() in a function with no
internal functions.
"""
if not args:
exec(cmd)
elif len(args) == 1:
exec(cmd, args[0])
else:
exec(cmd, args[0], args[1])
class Plotter:
def increment_size(self, largest):
"""
Return the size of each horizontal increment line for a specified
maximum value. This returns a value that will provide somewhere
between 5 and 9 horizontal lines on the graph, on some set of
boundaries that are multiples of 10/100/1000/etc.
"""
i = largest // 5
if not i:
return largest
multiplier = 1
while i >= 10:
i = i // 10
multiplier = multiplier * 10
return i * multiplier
def max_graph_value(self, largest):
# Round up to next integer.
largest = int(largest) + 1
increment = self.increment_size(largest)
return ((largest + increment - 1) // increment) * increment
class Line:
def __init__(self, points, type, title, label, comment, fmt="%s %s"):
self.points = points
self.type = type
self.title = title
self.label = label
self.comment = comment
self.fmt = fmt
def print_label(self, inx, x, y):
if self.label:
print('set label %s "%s" at %0.1f,%0.1f right' % (inx, self.label, x, y))
def plot_string(self):
if self.title:
title_string = 'title "%s"' % self.title
else:
title_string = 'notitle'
return "'-' %s with lines lt %s" % (title_string, self.type)
def print_points(self, fmt=None):
if fmt is None:
fmt = self.fmt
if self.comment:
print('# %s' % self.comment)
for x, y in self.points:
# If y is None, it usually represents some kind of break
# in the line's index number. We might want to represent
# this some way rather than just drawing the line straight
# between the two points on either side.
if y is not None:
print(fmt % (x, y))
print('e')
def get_x_values(self):
return [p[0] for p in self.points]
def get_y_values(self):
return [p[1] for p in self.points]
class Gnuplotter(Plotter):
def __init__(self, title, key_location):
self.lines = []
self.title = title
self.key_location = key_location
def line(self, points, type, title=None, label=None, comment=None, fmt='%s %s'):
if points:
line = Line(points, type, title, label, comment, fmt)
self.lines.append(line)
def plot_string(self, line):
return line.plot_string()
def vertical_bar(self, x, type, label, comment):
if self.get_min_x() <= x <= self.get_max_x():
points = [(x, 0), (x, self.max_graph_value(self.get_max_y()))]
self.line(points, type, label, comment)
def get_all_x_values(self):
result = []
for line in self.lines:
result.extend(line.get_x_values())
return [r for r in result if r is not None]
def get_all_y_values(self):
result = []
for line in self.lines:
result.extend(line.get_y_values())
return [r for r in result if r is not None]
def get_min_x(self):
try:
return self.min_x
except AttributeError:
try:
self.min_x = min(self.get_all_x_values())
except ValueError:
self.min_x = 0
return self.min_x
def get_max_x(self):
try:
return self.max_x
except AttributeError:
try:
self.max_x = max(self.get_all_x_values())
except ValueError:
self.max_x = 0
return self.max_x
def get_min_y(self):
try:
return self.min_y
except AttributeError:
try:
self.min_y = min(self.get_all_y_values())
except ValueError:
self.min_y = 0
return self.min_y
def get_max_y(self):
try:
return self.max_y
except AttributeError:
try:
self.max_y = max(self.get_all_y_values())
except ValueError:
self.max_y = 0
return self.max_y
def draw(self):
if not self.lines:
return
if self.title:
print('set title "%s"' % self.title)
print('set key %s' % self.key_location)
min_y = self.get_min_y()
max_y = self.max_graph_value(self.get_max_y())
incr = (max_y - min_y) / 10.0
start = min_y + (max_y / 2.0) + (2.0 * incr)
position = [start - (i * incr) for i in range(5)]
inx = 1
for line in self.lines:
line.print_label(inx, line.points[0][0] - 1,
position[(inx - 1) % len(position)])
inx += 1
plot_strings = [self.plot_string(l) for l in self.lines]
print('plot ' + ', \\\n '.join(plot_strings))
for line in self.lines:
line.print_points()
def untar(fname):
import tarfile
tar = tarfile.open(name=fname, mode='r')
for tarinfo in tar:
tar.extract(tarinfo)
tar.close()
def unzip(fname):
import zipfile
zf = zipfile.ZipFile(fname, 'r')
for name in zf.namelist():
dir = os.path.dirname(name)
try:
os.makedirs(dir)
except OSError:
pass
with open(name, 'wb') as f:
f.write(zf.read(name))
def read_tree(dir):
for dirpath, dirnames, filenames in os.walk(dir):
for fn in filenames:
fn = os.path.join(dirpath, fn)
if os.path.isfile(fn):
with open(fn, 'rb') as f:
f.read()
def redirect_to_file(command, log):
return '%s > %s 2>&1' % (command, log)
def tee_to_file(command, log):
return '%s 2>&1 | tee %s' % (command, log)
class SConsTimer:
"""
Usage: scons-time SUBCOMMAND [ARGUMENTS]
Type "scons-time help SUBCOMMAND" for help on a specific subcommand.
Available subcommands:
func Extract test-run data for a function
help Provides help
mem Extract --debug=memory data from test runs
obj Extract --debug=count data from test runs
time Extract --debug=time data from test runs
run Runs a test configuration
"""
name = 'scons-time'
name_spaces = ' ' * len(name)
def makedict(**kw):
return kw
default_settings = makedict(
chdir=None,
config_file=None,
initial_commands=[],
key_location='bottom left',
orig_cwd=os.getcwd(),
outdir=None,
prefix='',
python='"%s"' % sys.executable,
redirect=redirect_to_file,
scons=None,
scons_flags='--debug=count --debug=memory --debug=time --debug=memoizer',
scons_lib_dir=None,
scons_wrapper=None,
startup_targets='--help',
subdir=None,
subversion_url=None,
svn='svn',
svn_co_flag='-q',
tar='tar',
targets='',
targets0=None,
targets1=None,
targets2=None,
title=None,
unzip='unzip',
verbose=False,
vertical_bars=[],
unpack_map={
'.tar.gz': (untar, '%(tar)s xzf %%s'),
'.tgz': (untar, '%(tar)s xzf %%s'),
'.tar': (untar, '%(tar)s xf %%s'),
'.zip': (unzip, '%(unzip)s %%s'),
},
)
run_titles = [
'Startup',
'Full build',
'Up-to-date build',
]
run_commands = [
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof0)s %(targets0)s',
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof1)s %(targets1)s',
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof2)s %(targets2)s',
]
stages = [
'pre-read',
'post-read',
'pre-build',
'post-build',
]
stage_strings = {
'pre-read': 'Memory before reading SConscript files:',
'post-read': 'Memory after reading SConscript files:',
'pre-build': 'Memory before building targets:',
'post-build': 'Memory after building targets:',
}
memory_string_all = 'Memory '
default_stage = stages[-1]
time_strings = {
'total': 'Total build time',
'SConscripts': 'Total SConscript file execution time',
'SCons': 'Total SCons execution time',
'commands': 'Total command execution time',
}
time_string_all = 'Total .* time'
#
def __init__(self):
self.__dict__.update(self.default_settings)
# Functions for displaying and executing commands.
def subst(self, x, dictionary):
try:
return x % dictionary
except TypeError:
# x isn't a string (it's probably a Python function),
# so just return it.
return x
def subst_variables(self, command, dictionary):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
try:
command + ''
except TypeError:
action = command[0]
string = command[1]
args = command[2:]
else:
action = command
string = action
args = (())
action = self.subst(action, dictionary)
string = self.subst(string, dictionary)
return (action, string, args)
def _do_not_display(self, msg, *args):
pass
def display(self, msg, *args):
"""
Displays the specified message.
Each message is prepended with a standard prefix of our name
plus the time.
"""
if callable(msg):
msg = msg(*args)
else:
msg = msg % args
if msg is None:
return
fmt = '%s[%s]: %s\n'
sys.stdout.write(fmt % (self.name, time.strftime('%H:%M:%S'), msg))
def _do_not_execute(self, action, *args):
pass
def execute(self, action, *args):
"""
Executes the specified action.
The action is called if it's a callable Python function, and
otherwise passed to os.system().
"""
if callable(action):
action(*args)
else:
os.system(action % args)
def run_command_list(self, commands, dict):
"""
Executes a list of commands, substituting values from the
specified dictionary.
"""
commands = [self.subst_variables(c, dict) for c in commands]
for action, string, args in commands:
self.display(string, *args)
sys.stdout.flush()
status = self.execute(action, *args)
if status:
sys.exit(status)
def log_display(self, command, log):
command = self.subst(command, self.__dict__)
if log:
command = self.redirect(command, log)
return command
def log_execute(self, command, log):
command = self.subst(command, self.__dict__)
p = os.popen(command)
output = p.read()
p.close()
# TODO: convert to subrocess, os.popen is obsolete. This didn't work:
# process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
# output = process.stdout.read()
# process.stdout.close()
# process.wait()
if self.verbose:
sys.stdout.write(output)
# TODO: Figure out
# Not sure we need to write binary here
with open(log, 'w') as f:
f.write(str(output))
def archive_splitext(self, path):
"""
Splits an archive name into a filename base and extension.
This is like os.path.splitext() (which it calls) except that it
also looks for '.tar.gz' and treats it as an atomic extensions.
"""
if path.endswith('.tar.gz'):
return path[:-7], path[-7:]
else:
return os.path.splitext(path)
def args_to_files(self, args, tail=None):
"""
Takes a list of arguments, expands any glob patterns, and
returns the last "tail" files from the list.
"""
files = []
for a in args:
files.extend(sorted(glob.glob(a)))
if tail:
files = files[-tail:]
return files
def ascii_table(self, files, columns,
line_function, file_function=lambda x: x,
*args, **kw):
header_fmt = ' '.join(['%12s'] * len(columns))
line_fmt = header_fmt + ' %s'
print(header_fmt % columns)
for file in files:
t = line_function(file, *args, **kw)
if t is None:
t = []
diff = len(columns) - len(t)
if diff > 0:
t += [''] * diff
t.append(file_function(file))
print(line_fmt % tuple(t))
def collect_results(self, files, function, *args, **kw):
results = {}
for file in files:
base = os.path.splitext(file)[0]
run, index = base.split('-')[-2:]
run = int(run)
index = int(index)
value = function(file, *args, **kw)
try:
r = results[index]
except KeyError:
r = []
results[index] = r
r.append((run, value))
return results
def doc_to_help(self, obj):
"""
Translates an object's __doc__ string into help text.
This strips a consistent number of spaces from each line in the
help text, essentially "outdenting" the text to the left-most
column.
"""
doc = obj.__doc__
if doc is None:
return ''
return self.outdent(doc)
def find_next_run_number(self, dir, prefix):
"""
Returns the next run number in a directory for the specified prefix.
Examines the contents the specified directory for files with the
specified prefix, extracts the run numbers from each file name,
and returns the next run number after the largest it finds.
"""
x = re.compile(re.escape(prefix) + '-([0-9]+).*')
matches = [x.match(e) for e in os.listdir(dir)]
matches = [_f for _f in matches if _f]
if not matches:
return 0
run_numbers = [int(m.group(1)) for m in matches]
return int(max(run_numbers)) + 1
def gnuplot_results(self, results, fmt='%s %.3f'):
"""
Prints out a set of results in Gnuplot format.
"""
gp = Gnuplotter(self.title, self.key_location)
for i in sorted(results.keys()):
try:
t = self.run_titles[i]
except IndexError:
t = '??? %s ???' % i
results[i].sort()
gp.line(results[i], i + 1, t, None, t, fmt=fmt)
for bar_tuple in self.vertical_bars:
try:
x, type, label, comment = bar_tuple
except ValueError:
x, type, label = bar_tuple
comment = label
gp.vertical_bar(x, type, label, comment)
gp.draw()
def logfile_name(self, invocation):
"""
Returns the absolute path of a log file for the specificed
invocation number.
"""
name = self.prefix_run + '-%d.log' % invocation
return os.path.join(self.outdir, name)
def outdent(self, s):
"""
Strip as many spaces from each line as are found at the beginning
of the first line in the list.
"""
lines = s.split('\n')
if lines[0] == '':
lines = lines[1:]
spaces = re.match(' *', lines[0]).group(0)
def strip_initial_spaces(line, s=spaces):
if line.startswith(spaces):
line = line[len(spaces):]
return line
return '\n'.join([strip_initial_spaces(l) for l in lines]) + '\n'
def profile_name(self, invocation):
"""
Returns the absolute path of a profile file for the specified
invocation number.
"""
name = self.prefix_run + '-%d.prof' % invocation
return os.path.join(self.outdir, name)
def set_env(self, key, value):
os.environ[key] = value
#
def get_debug_times(self, file, time_string=None):
"""
Fetch times from the --debug=time strings in the specified file.
"""
if time_string is None:
search_string = self.time_string_all
else:
search_string = time_string
with open(file) as f:
contents = f.read()
if not contents:
sys.stderr.write('file %s has no contents!\n' % repr(file))
return None
result = re.findall(r'%s: ([\d.]*)' % search_string, contents)[-4:]
result = [float(r) for r in result]
if time_string is not None:
try:
result = result[0]
except IndexError:
sys.stderr.write('file %s has no results!\n' % repr(file))
return None
return result
def get_function_profile(self, file, function):
"""
Returns the file, line number, function name, and cumulative time.
"""
try:
import pstats
except ImportError as e:
sys.stderr.write('%s: func: %s\n' % (self.name, e))
sys.stderr.write('%s This version of Python is missing the profiler.\n' % self.name_spaces)
sys.stderr.write('%s Cannot use the "func" subcommand.\n' % self.name_spaces)
sys.exit(1)
statistics = pstats.Stats(file).stats
matches = [e for e in statistics.items() if e[0][2] == function]
r = matches[0]
return r[0][0], r[0][1], r[0][2], r[1][3]
def get_function_time(self, file, function):
"""
Returns just the cumulative time for the specified function.
"""
return self.get_function_profile(file, function)[3]
def get_memory(self, file, memory_string=None):
"""
Returns a list of integers of the amount of memory used. The
default behavior is to return all the stages.
"""
if memory_string is None:
search_string = self.memory_string_all
else:
search_string = memory_string
with open(file) as f:
lines = f.readlines()
lines = [l for l in lines if l.startswith(search_string)][-4:]
result = [int(l.split()[-1]) for l in lines[-4:]]
if len(result) == 1:
result = result[0]
return result
def get_object_counts(self, file, object_name, index=None):
"""
Returns the counts of the specified object_name.
"""
object_string = ' ' + object_name + '\n'
with open(file) as f:
lines = f.readlines()
line = [l for l in lines if l.endswith(object_string)][0]
result = [int(field) for field in line.split()[:4]]
if index is not None:
result = result[index]
return result
command_alias = {}
def execute_subcommand(self, argv):
"""
Executes the do_*() function for the specified subcommand (argv[0]).
"""
if not argv:
return
cmdName = self.command_alias.get(argv[0], argv[0])
try:
func = getattr(self, 'do_' + cmdName)
except AttributeError:
return self.default(argv)
try:
return func(argv)
except TypeError as e:
sys.stderr.write("%s %s: %s\n" % (self.name, cmdName, e))
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.write("Try '%s help %s'\n" % (self.name, cmdName))
def default(self, argv):
"""
The default behavior for an unknown subcommand. Prints an
error message and exits.
"""
sys.stderr.write('%s: Unknown subcommand "%s".\n' % (self.name, argv[0]))
sys.stderr.write('Type "%s help" for usage.\n' % self.name)
sys.exit(1)
#
def do_help(self, argv):
"""
"""
if argv[1:]:
for arg in argv[1:]:
try:
func = getattr(self, 'do_' + arg)
except AttributeError:
sys.stderr.write('%s: No help for "%s"\n' % (self.name, arg))
else:
try:
help = getattr(self, 'help_' + arg)
except AttributeError:
sys.stdout.write(self.doc_to_help(func))
sys.stdout.flush()
else:
help()
else:
doc = self.doc_to_help(self.__class__)
if doc:
sys.stdout.write(doc)
sys.stdout.flush()
return None
#
def help_func(self):
help = """\
Usage: scons-time func [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
--func=NAME, --function=NAME Report time for function NAME
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_func(self, argv):
"""
"""
format = 'ascii'
function_name = '_main'
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'func=',
'function=',
'help',
'prefix=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('--func', '--function'):
function_name = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'func'])
sys.exit(0)
elif o in ('--max',):
max_time = int(a)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if self.config_file:
with open(self.config_file, 'r') as f:
config = f.read()
exec(config, self.__dict__)
if self.chdir:
os.chdir(self.chdir)
if not args:
pattern = '%s*.prof' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: func: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.prof files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help func" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
for file in args:
try:
f, line, func, time = \
self.get_function_profile(file, function_name)
except ValueError as e:
sys.stderr.write("%s: func: %s: %s\n" %
(self.name, file, e))
else:
if f.startswith(cwd_):
f = f[len(cwd_):]
print("%.3f %s:%d(%s)" % (time, f, line, func))
elif format == 'gnuplot':
results = self.collect_results(args, self.get_function_time,
function_name)
self.gnuplot_results(results)
else:
sys.stderr.write('%s: func: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
#
def help_mem(self):
help = """\
Usage: scons-time mem [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--stage=STAGE Plot memory at the specified stage:
pre-read, post-read, pre-build,
post-build (default: post-build)
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_mem(self, argv):
format = 'ascii'
def _logfile_path(x):
return x
logfile_path = _logfile_path
stage = self.default_stage
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'stage=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'mem'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--stage',):
if a not in self.stages:
sys.stderr.write('%s: mem: Unrecognized stage "%s".\n' % (self.name, a))
sys.exit(1)
stage = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if self.config_file:
with open(self.config_file, 'r') as f:
config = f.read()
HACK_for_exec(config, self.__dict__)
if self.chdir:
os.chdir(self.chdir)
def _logfile_path_join(x):
return os.path.join(self.chdir, x)
logfile_path = _logfile_path_join
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: mem: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help mem" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
# cwd_ = os.getcwd() + os.sep
if format == 'ascii':
self.ascii_table(args, tuple(self.stages), self.get_memory, logfile_path)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_memory,
self.stage_strings[stage])
self.gnuplot_results(results)
else:
sys.stderr.write('%s: mem: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
return 0
#
def help_obj(self):
help = """\
Usage: scons-time obj [OPTIONS] OBJECT FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--stage=STAGE Plot memory at the specified stage:
pre-read, post-read, pre-build,
post-build (default: post-build)
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_obj(self, argv):
format = 'ascii'
def _logfile_path(x):
return x
logfile_path = _logfile_path
stage = self.default_stage
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'stage=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'obj'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--stage',):
if a not in self.stages:
sys.stderr.write('%s: obj: Unrecognized stage "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
stage = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if not args:
sys.stderr.write('%s: obj: Must specify an object name.\n' % self.name)
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
object_name = args.pop(0)
if self.config_file:
with open(self.config_file, 'r') as f:
config = f.read()
HACK_for_exec(config, self.__dict__)
if self.chdir:
os.chdir(self.chdir)
def _logfile_path_join(x):
return os.path.join(self.chdir, x)
logfile_path = _logfile_path_join
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: obj: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
self.ascii_table(args, tuple(self.stages), self.get_object_counts, logfile_path, object_name)
elif format == 'gnuplot':
stage_index = 0
for s in self.stages:
if stage == s:
break
stage_index = stage_index + 1
results = self.collect_results(args, self.get_object_counts,
object_name, stage_index)
self.gnuplot_results(results)
else:
sys.stderr.write('%s: obj: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
return 0
#
def help_run(self):
help = """\
Usage: scons-time run [OPTIONS] [FILE ...]
--chdir=DIR Name of unpacked directory for chdir
-f FILE, --file=FILE Read configuration from specified FILE
-h, --help Print this help and exit
-n, --no-exec No execute, just print command lines
--number=NUMBER Put output in files for run NUMBER
--outdir=OUTDIR Put output files in OUTDIR
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--python=PYTHON Time using the specified PYTHON
-q, --quiet Don't print command lines
--scons=SCONS Time using the specified SCONS
--svn=URL, --subversion=URL Use SCons from Subversion URL
-v, --verbose Display output of commands
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_run(self, argv):
"""
"""
run_number_list = [None]
short_opts = '?f:hnp:qs:v'
long_opts = [
'file=',
'help',
'no-exec',
'number=',
'outdir=',
'prefix=',
'python=',
'quiet',
'scons=',
'svn=',
'subdir=',
'subversion=',
'verbose',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-f', '--file'):
self.config_file = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'run'])
sys.exit(0)
elif o in ('-n', '--no-exec'):
self.execute = self._do_not_execute
elif o in ('--number',):
run_number_list = self.split_run_numbers(a)
elif o in ('--outdir',):
self.outdir = a
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--python',):
self.python = a
elif o in ('-q', '--quiet'):
self.display = self._do_not_display
elif o in ('-s', '--subdir'):
self.subdir = a
elif o in ('--scons',):
self.scons = a
elif o in ('--svn', '--subversion'):
self.subversion_url = a
elif o in ('-v', '--verbose'):
self.redirect = tee_to_file
self.verbose = True
self.svn_co_flag = ''
if not args and not self.config_file:
sys.stderr.write('%s: run: No arguments or -f config file specified.\n' % self.name)
sys.stderr.write('%s Type "%s help run" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
if self.config_file:
with open(self.config_file, 'r') as f:
config = f.read()
exec(config, self.__dict__)
if args:
self.archive_list = args
archive_file_name = os.path.split(self.archive_list[0])[1]
if not self.subdir:
self.subdir = self.archive_splitext(archive_file_name)[0]
if not self.prefix:
self.prefix = self.archive_splitext(archive_file_name)[0]
prepare = None
if self.subversion_url:
prepare = self.prep_subversion_run
for run_number in run_number_list:
self.individual_run(run_number, self.archive_list, prepare)
def split_run_numbers(self, s):
result = []
for n in s.split(','):
try:
x, y = n.split('-')
except ValueError:
result.append(int(n))
else:
result.extend(list(range(int(x), int(y) + 1)))
return result
def scons_path(self, dir):
return os.path.join(dir, 'scripts', 'scons.py')
def scons_lib_dir_path(self, dir):
"""build the path to the engine.
this used to join src/engine, but no longer.
"""
return dir
def prep_subversion_run(self, commands, removals):
self.svn_tmpdir = tempfile.mkdtemp(prefix=self.name + '-svn-')
removals.append((shutil.rmtree, 'rm -rf %%s', self.svn_tmpdir))
self.scons = self.scons_path(self.svn_tmpdir)
self.scons_lib_dir = self.scons_lib_dir_path(self.svn_tmpdir)
commands.extend([
'%(svn)s co %(svn_co_flag)s -r %(run_number)s %(subversion_url)s %(svn_tmpdir)s',
])
def individual_run(self, run_number, archive_list, prepare=None):
"""
Performs an individual run of the default SCons invocations.
"""
commands = []
removals = []
if prepare:
prepare(commands, removals)
save_scons = self.scons
save_scons_wrapper = self.scons_wrapper
save_scons_lib_dir = self.scons_lib_dir
if self.outdir is None:
self.outdir = self.orig_cwd
elif not os.path.isabs(self.outdir):
self.outdir = os.path.join(self.orig_cwd, self.outdir)
if self.scons is None:
self.scons = self.scons_path(self.orig_cwd)
if self.scons_lib_dir is None:
self.scons_lib_dir = self.scons_lib_dir_path(self.orig_cwd)
if self.scons_wrapper is None:
self.scons_wrapper = self.scons
if not run_number:
run_number = self.find_next_run_number(self.outdir, self.prefix)
self.run_number = str(run_number)
self.prefix_run = self.prefix + '-%03d' % run_number
if self.targets0 is None:
self.targets0 = self.startup_targets
if self.targets1 is None:
self.targets1 = self.targets
if self.targets2 is None:
self.targets2 = self.targets
self.tmpdir = tempfile.mkdtemp(prefix=self.name + '-')
commands.extend([
(os.chdir, 'cd %%s', self.tmpdir),
])
for archive in archive_list:
if not os.path.isabs(archive):
archive = os.path.join(self.orig_cwd, archive)
if os.path.isdir(archive):
dest = os.path.split(archive)[1]
commands.append((shutil.copytree, 'cp -r %%s %%s', archive, dest))
else:
suffix = self.archive_splitext(archive)[1]
unpack_command = self.unpack_map.get(suffix)
if not unpack_command:
dest = os.path.split(archive)[1]
commands.append((shutil.copyfile, 'cp %%s %%s', archive, dest))
else:
commands.append(unpack_command + (archive,))
commands.extend([
(os.chdir, 'cd %%s', self.subdir),
])
commands.extend(self.initial_commands)
commands.extend([
(lambda: read_tree('.'),
'find * -type f | xargs cat > /dev/null'),
(self.set_env, 'export %%s=%%s',
'SCONS_LIB_DIR', self.scons_lib_dir),
'%(python)s %(scons_wrapper)s --version',
])
index = 0
for run_command in self.run_commands:
setattr(self, 'prof%d' % index, self.profile_name(index))
c = (
self.log_execute,
self.log_display,
run_command,
self.logfile_name(index),
)
commands.append(c)
index = index + 1
commands.extend([
(os.chdir, 'cd %%s', self.orig_cwd),
])
if not os.environ.get('PRESERVE'):
commands.extend(removals)
commands.append((shutil.rmtree, 'rm -rf %%s', self.tmpdir))
self.run_command_list(commands, self.__dict__)
self.scons = save_scons
self.scons_lib_dir = save_scons_lib_dir
self.scons_wrapper = save_scons_wrapper
#
def help_time(self):
help = """\
Usage: scons-time time [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--which=TIMER Plot timings for TIMER: total,
SConscripts, SCons, commands.
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_time(self, argv):
format = 'ascii'
def _logfile_path(x):
return x
logfile_path = _logfile_path
tail = None
which = 'total'
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'tail=',
'title=',
'which=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'time'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
elif o in ('--which',):
if a not in list(self.time_strings.keys()):
sys.stderr.write('%s: time: Unrecognized timer "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
which = a
if self.config_file:
with open(self.config_file, 'r') as f:
config = f.read()
HACK_for_exec(config, self.__dict__)
if self.chdir:
os.chdir(self.chdir)
def _logfile_path_join(x):
return os.path.join(self.chdir, x)
logfile_path = _logfile_path_join
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: time: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
columns = ("Total", "SConscripts", "SCons", "commands")
self.ascii_table(args, columns, self.get_debug_times, logfile_path)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_debug_times,
self.time_strings[which])
self.gnuplot_results(results, fmt='%s %.6f')
else:
sys.stderr.write('%s: time: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'h?V', ['help', 'version'])
ST = SConsTimer()
for o, a in opts:
if o in ('-?', '-h', '--help'):
ST.do_help(['help'])
sys.exit(0)
elif o in ('-V', '--version'):
sys.stdout.write('scons-time version\n')
sys.exit(0)
if not args:
sys.stderr.write('Type "%s help" for usage.\n' % ST.name)
sys.exit(1)
ST.execute_subcommand(args)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 | 2,479,979,609,741,842,400 | -9,077,633,811,407,820,000 | 30.640501 | 119 | 0.502867 | false |
sagarpabba/robotframework-seleniumlibrary | src/SeleniumLibrary/screenshot.py | 9 | 3763 | # Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import base64
from robot import utils
class Screenshot(object):
def capture_screenshot(self, filename=None):
"""Takes a screenshot of the entire screen and embeds it into the log.
If no `filename` is given, the screenshot is saved into file
`selenium-screenshot-<counter>.png` under the directory where
the Robot Framework log file is written into. The `filename` is
also considered relative to the same directory, if it is not
given in absolute format.
When running on a locked Windows machine, the resulting screenshots
will be all black. A workaround is using the `Capture Page Screenshot`
keyword instead.
There were some changes to this keyword in the 2.3 release:
- Possibility to take screenshots also when the Selenium Server is
running on a remote machine was added.
- Support for absolute `filename` paths was added.
- Automatic creation of intermediate directories in the path
where the screenshot is saved was removed.
`OperatingSystem.Create Directory` can be used instead.
"""
data = self._selenium.capture_screenshot_to_string()
self._save_screenshot(data, filename)
def capture_page_screenshot(self, filename=None, css='background=#CCFFDD'):
"""Takes a screenshot of the current page and embeds it into the log.
`filename` argument specifies the name of the file to write the
screenshot into. It works the same was as with `Capture Screenshot`.
`css` can be used to modify how the screenshot is taken. By default
the bakground color is changed to avoid possible problems with
background leaking when the page layout is somehow broken.
Selenium currently supports this keyword out-of-the-box only with
Firefox browser. To make it work with IE, you can start the Selenium
Server with `-singleWindow` option and use `*ieproxy` as the browser.
Additionally, the browser independent `Capture Screenshot` keyword
can be used instead.
This keyword was added in SeleniumLibrary 2.3.
"""
data = self._selenium.capture_entire_page_screenshot_to_string(css)
self._save_screenshot(data, filename)
def _save_screenshot(self, data, filename):
path, link = self._get_screenshot_paths(filename)
outfile = open(path, 'wb')
# 'decodestring' is used instead of 'b64decode'to support Jython 2.2
outfile.write(base64.decodestring(data))
outfile.close()
# Image is shown on its own row and thus prev row is closed on purpose
self._html('</td></tr><tr><td colspan="3"><a href="%s">'
'<img src="%s" width="800px"></a>' % (link, link))
def _get_screenshot_paths(self, filename):
if not filename:
filename = self._namegen.next()
else:
filename = filename.replace('/', os.sep)
logdir = self._get_log_dir()
path = os.path.join(logdir, filename)
link = utils.get_link_path(path, logdir)
return path, link
| apache-2.0 | 4,270,348,449,723,188,700 | 1,774,254,381,311,180,500 | 42.755814 | 79 | 0.675791 | false |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/vm/_completers.py | 9 | 1524 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.parameters import get_one_of_subscription_locations
from azure.cli.core.decorators import Completer
from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc, get_vm_sizes
@Completer
def get_urn_aliases_completion_list(cmd, prefix, namespace): # pylint: disable=unused-argument
images = load_images_from_aliases_doc(cmd.cli_ctx)
return [i['urnAlias'] for i in images]
@Completer
def get_vm_size_completion_list(cmd, prefix, namespace): # pylint: disable=unused-argument
location = namespace.location
if not location:
location = get_one_of_subscription_locations(cmd.cli_ctx)
result = get_vm_sizes(cmd.cli_ctx, location)
return [r.name for r in result]
@Completer
def get_vm_run_command_completion_list(cmd, prefix, namespace): # pylint: disable=unused-argument
from ._client_factory import _compute_client_factory
try:
location = namespace.location
except AttributeError:
location = get_one_of_subscription_locations(cmd.cli_ctx)
result = _compute_client_factory(cmd.cli_ctx).virtual_machine_run_commands.list(location)
return [r.id for r in result]
| mit | -4,237,963,815,244,804,600 | -7,994,192,436,195,063,000 | 42.542857 | 98 | 0.652231 | false |
isaac-s/cloudify-manager | rest-service/manager_rest/flask_utils.py | 1 | 3338 | #########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from collections import namedtuple
from flask import Flask
from flask_migrate import Migrate
from flask_security import Security
from manager_rest import config
from manager_rest.storage import user_datastore, db
def setup_flask_app(manager_ip='localhost',
driver='',
hash_salt=None,
secret_key=None):
"""Setup a functioning flask app, when working outside the rest-service
:param manager_ip: The IP of the manager
:param driver: SQLA driver for postgres (e.g. pg8000)
:param hash_salt: The salt to be used when creating user passwords
:param secret_key: Secret key used when hashing flask tokens
:return: A Flask app
"""
app = Flask(__name__)
db_uri = _get_postgres_db_uri(manager_ip, driver)
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
set_flask_security_config(app, hash_salt, secret_key)
Security(app=app, datastore=user_datastore)
Migrate(app=app, db=db)
db.init_app(app)
app.app_context().push()
return app
def _get_postgres_db_uri(manager_ip, driver):
"""Get a valid SQLA DB URI
"""
dialect = 'postgresql+{0}'.format(driver) if driver else 'postgres'
conf = get_postgres_conf()
return '{dialect}://{username}:{password}@{host}/{db_name}'.format(
dialect=dialect,
username=conf.username,
password=conf.password,
host=manager_ip,
db_name=conf.db_name
)
def get_postgres_conf():
"""Return a namedtuple with info used to connect to cloudify's PG DB
"""
conf = namedtuple('PGConf', 'username password db_name')
return conf(
username='cloudify',
password='cloudify',
db_name='cloudify_db'
)
def set_flask_security_config(app, hash_salt=None, secret_key=None):
"""Set all necessary Flask-Security configurations
:param app: Flask app object
:param hash_salt: The salt to be used when creating user passwords
:param secret_key: Secret key used when hashing flask tokens
"""
hash_salt = hash_salt or config.instance.security_hash_salt
secret_key = secret_key or config.instance.security_secret_key
# Make sure that it's possible to get users from the datastore
# by username and not just by email (the default behavior)
app.config['SECURITY_USER_IDENTITY_ATTRIBUTES'] = 'username, email'
app.config['SECURITY_PASSWORD_HASH'] = 'pbkdf2_sha256'
app.config['SECURITY_TOKEN_MAX_AGE'] = 36000 # 10 hours
app.config['SECURITY_PASSWORD_SALT'] = hash_salt
app.config['SECURITY_REMEMBER_SALT'] = hash_salt
app.config['SECRET_KEY'] = secret_key
| apache-2.0 | -4,486,636,598,283,956,000 | 963,047,887,985,497,000 | 34.892473 | 77 | 0.685141 | false |
joakim-hove/ert | python/python/ert_gui/tools/export/export_tool.py | 4 | 1955 | # Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'export_tool.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from weakref import ref
from ert_gui.ertwidgets import resourceIcon
from ert_gui.ertwidgets.closabledialog import ClosableDialog
from ert_gui.ertwidgets.models.ertmodel import getCurrentCaseName
from ert_gui.tools import Tool
from ert_gui.tools.export import ExportPanel, Exporter, ExportKeywordModel
class ExportTool(Tool):
def __init__(self):
super(ExportTool, self).__init__("Export Data", "tools/export", resourceIcon("ide/table_export"))
self.__export_widget = None
self.__dialog = None
self.__exporter = None
self.setEnabled(ExportKeywordModel().hasKeywords())
def trigger(self):
if self.__export_widget is None:
self.__export_widget = ref(ExportPanel(self.parent()))
self.__exporter = Exporter()
self.__export_widget().runExport.connect(self.__exporter.runExport)
self.__export_widget().setSelectedCase(getCurrentCaseName())
self.__dialog = ref(ClosableDialog("Export", self.__export_widget(), self.parent()))
self.__export_widget().updateExportButton.connect(self.__dialog().toggleButton)
self.__dialog().addButton("Export", self.export)
self.__dialog().show()
def export(self):
self.__export_widget().export()
self.__dialog().accept()
| gpl-3.0 | 4,793,033,580,908,310,000 | -8,555,838,446,575,065,000 | 40.595745 | 105 | 0.694118 | false |
aabbox/kbengine | kbe/res/scripts/common/Lib/rlcompleter.py | 100 | 5763 | """Word completion for GNU readline.
The completer completes keywords, built-ins and globals in a selectable
namespace (which defaults to __main__); when completing NAME.NAME..., it
evaluates (!) the expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and generally cause
the completion to fail). This is a feature -- since readline sets the tty
device in raw (or cbreak) mode, printing a traceback wouldn't work well
without some complicated hoopla to save, reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary application
defined code to be executed if an object with a __getattr__ hook is found.
Since it is the responsibility of the application (or the user) to enable this
feature, I consider this an acceptable risk. More complicated expressions
(e.g. function calls or indexing operations) are *not* evaluated.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import atexit
import builtins
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError('namespace must be a dictionary')
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if callable(val):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
matches.append(word)
for nspace in [builtins.__dict__, self.namespace]:
for word, val in nspace.items():
if word[:n] == text and word != "__builtins__":
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = dir(thisobject)
if "__builtins__" in words:
words.remove("__builtins__")
if hasattr(thisobject, '__class__'):
words.append('__class__')
words.extend(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and hasattr(thisobject, word):
val = getattr(thisobject, word)
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
# Release references early at shutdown (the readline module's
# contents are quasi-immortal, and the completer function holds a
# reference to globals).
atexit.register(lambda: readline.set_completer(None))
| lgpl-3.0 | -5,528,708,637,828,465,000 | -98,298,411,951,543,550 | 33.927273 | 80 | 0.616693 | false |
Yearcoin-dev/yearcoin | qa/rpc-tests/invalidateblock.py | 104 | 3077 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class InvalidateTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
print("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
print("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print("\nMake sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
print("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print("..and then mine a block")
self.nodes[2].generate(1)
print("Verify all nodes are at the right height")
time.sleep(5)
for i in range(3):
print(i,self.nodes[i].getblockcount())
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| mit | -1,954,258,218,611,425,300 | -3,546,218,352,926,988,300 | 39.486842 | 95 | 0.630484 | false |
fafaman/django | django/template/utils.py | 308 | 4736 | import os
import warnings
from collections import Counter, OrderedDict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class InvalidTemplateEngineError(ImproperlyConfigured):
pass
class EngineHandler(object):
def __init__(self, templates=None):
"""
templates is an optional list of template engine definitions
(structured like settings.TEMPLATES).
"""
self._templates = templates
self._engines = {}
@cached_property
def templates(self):
if self._templates is None:
self._templates = settings.TEMPLATES
if not self._templates:
warnings.warn(
"You haven't defined a TEMPLATES setting. You must do so "
"before upgrading to Django 1.10. Otherwise Django will be "
"unable to load templates.", RemovedInDjango110Warning)
self._templates = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': settings.TEMPLATE_DIRS,
'OPTIONS': {
'allowed_include_roots': settings.ALLOWED_INCLUDE_ROOTS,
'context_processors': settings.TEMPLATE_CONTEXT_PROCESSORS,
'debug': settings.TEMPLATE_DEBUG,
'loaders': settings.TEMPLATE_LOADERS,
'string_if_invalid': settings.TEMPLATE_STRING_IF_INVALID,
},
},
]
templates = OrderedDict()
backend_names = []
for tpl in self._templates:
tpl = tpl.copy()
try:
# This will raise an exception if 'BACKEND' doesn't exist or
# isn't a string containing at least one dot.
default_name = tpl['BACKEND'].rsplit('.', 2)[-2]
except Exception:
invalid_backend = tpl.get('BACKEND', '<not defined>')
raise ImproperlyConfigured(
"Invalid BACKEND for a template engine: {}. Check "
"your TEMPLATES setting.".format(invalid_backend))
tpl.setdefault('NAME', default_name)
tpl.setdefault('DIRS', [])
tpl.setdefault('APP_DIRS', False)
tpl.setdefault('OPTIONS', {})
templates[tpl['NAME']] = tpl
backend_names.append(tpl['NAME'])
counts = Counter(backend_names)
duplicates = [alias for alias, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Template engine aliases aren't unique, duplicates: {}. "
"Set a unique NAME for each engine in settings.TEMPLATES."
.format(", ".join(duplicates)))
return templates
def __getitem__(self, alias):
try:
return self._engines[alias]
except KeyError:
try:
params = self.templates[alias]
except KeyError:
raise InvalidTemplateEngineError(
"Could not find config for '{}' "
"in settings.TEMPLATES".format(alias))
# If importing or initializing the backend raises an exception,
# self._engines[alias] isn't set and this code may get executed
# again, so we must preserve the original params. See #24265.
params = params.copy()
backend = params.pop('BACKEND')
engine_cls = import_string(backend)
engine = engine_cls(params)
self._engines[alias] = engine
return engine
def __iter__(self):
return iter(self.templates)
def all(self):
return [self[alias] for alias in self]
@lru_cache.lru_cache()
def get_app_template_dirs(dirname):
"""
Return an iterable of paths of directories to load app templates from.
dirname is the name of the subdirectory containing templates inside
installed applications.
"""
template_dirs = []
for app_config in apps.get_app_configs():
if not app_config.path:
continue
template_dir = os.path.join(app_config.path, dirname)
if os.path.isdir(template_dir):
template_dirs.append(upath(template_dir))
# Immutable return value because it will be cached and shared by callers.
return tuple(template_dirs)
| bsd-3-clause | -8,363,846,446,572,310,000 | 3,521,563,972,168,453,000 | 35.713178 | 83 | 0.587416 | false |
huangciyin/youtube-dl | youtube_dl/extractor/rtbf.py | 44 | 1654 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
class RTBFIE(InfoExtractor):
_VALID_URL = r'https?://www.rtbf.be/video/[^\?]+\?id=(?P<id>\d+)'
_TEST = {
'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274',
'md5': '799f334ddf2c0a582ba80c44655be570',
'info_dict': {
'id': '1921274',
'ext': 'mp4',
'title': 'Les Diables au coeur (épisode 2)',
'description': 'Football - Diables Rouges',
'duration': 3099,
'timestamp': 1398456336,
'upload_date': '20140425',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage('https://www.rtbf.be/video/embed?id=%s' % video_id, video_id)
data = json.loads(self._html_search_regex(
r'<div class="js-player-embed(?: player-embed)?" data-video="([^"]+)"', page, 'data video'))['data']
video_url = data.get('downloadUrl') or data.get('url')
if data['provider'].lower() == 'youtube':
return self.url_result(video_url, 'Youtube')
return {
'id': video_id,
'url': video_url,
'title': data['title'],
'description': data.get('description') or data.get('subtitle'),
'thumbnail': data['thumbnail']['large'],
'duration': data.get('duration') or data.get('realDuration'),
'timestamp': data['created'],
'view_count': data['viewCount'],
}
| unlicense | 7,156,215,253,108,511,000 | -1,407,858,610,982,851,300 | 32.734694 | 112 | 0.543255 | false |
forevernull/incubator-airflow | airflow/ti_deps/deps/runnable_exec_date_dep.py | 42 | 1941 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
class RunnableExecDateDep(BaseTIDep):
NAME = "Execution Date"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
cur_date = datetime.now()
if ti.execution_date > cur_date:
yield self._failing_status(
reason="Execution date {0} is in the future (the current "
"date is {1}).".format(ti.execution_date.isoformat(),
cur_date.isoformat()))
if ti.task.end_date and ti.execution_date > ti.task.end_date:
yield self._failing_status(
reason="The execution date is {0} but this is after the task's end date "
"{1}.".format(
ti.execution_date.isoformat(),
ti.task.end_date.isoformat()))
if (ti.task.dag and
ti.task.dag.end_date and
ti.execution_date > ti.task.dag.end_date):
yield self._failing_status(
reason="The execution date is {0} but this is after the task's DAG's "
"end date {1}.".format(
ti.execution_date.isoformat(),
ti.task.dag.end_date.isoformat()))
| apache-2.0 | 8,798,046,123,592,006,000 | -8,175,114,636,860,805,000 | 39.4375 | 89 | 0.612056 | false |
Tecnativa/website | website_event_register_free_with_sale/controllers/website_event.py | 28 | 2630 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2015 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import http
from openerp.http import request
from openerp.addons.website_event_sale.controllers.main import website_event
class WebsiteEvent(website_event):
@http.route(['/event/cart/update'], type='http', auth="public",
methods=['POST'], website=True)
def cart_update(self, **post):
has_paid_tickets = False
post['free_tickets'] = 0
for key, value in post.items():
qty = int(value or "0")
ticket_words = key.split("-")
ticket_id = (ticket_words[0] == 'ticket' and
int(ticket_words[1]) or None)
if not qty or not ticket_id:
continue
ticket = request.env['event.event.ticket'].sudo().browse(ticket_id)
if not ticket.price:
# Accumulate possible multiple free tickets
post['free_tickets'] = (
str(int(post['free_tickets']) + qty))
else:
has_paid_tickets = True
# Add to shopping cart the rest of the items
order = request.website.sale_get_order(force_create=1)
order.with_context(event_ticket_id=ticket.id)._cart_update(
product_id=ticket.product_id.id, add_qty=qty)
if not post['free_tickets'] and not has_paid_tickets:
return request.redirect("/event/%s" % post['event_id'])
request.session.update({
'free_tickets': post['free_tickets'],
'event_id': post['event_id'],
})
return request.redirect("/shop/checkout")
| agpl-3.0 | -4,575,355,721,008,508,400 | -1,527,300,194,252,357,600 | 45.964286 | 79 | 0.572243 | false |
sameetb-cuelogic/edx-platform-test | common/djangoapps/course_about/data.py | 16 | 1855 | """Data Aggregation Layer for the Course About API.
This is responsible for combining data from the following resources:
* CourseDescriptor
* CourseAboutDescriptor
"""
import logging
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from course_about.serializers import serialize_content
from course_about.errors import CourseNotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
log = logging.getLogger(__name__)
ABOUT_ATTRIBUTES = [
'effort'
]
def get_course_about_details(course_id): # pylint: disable=unused-argument
"""
Return course information for a given course id.
Args:
course_id(str) : The course id to retrieve course information for.
Returns:
Serializable dictionary of the Course About Information.
Raises:
CourseNotFoundError
"""
try:
course_key = CourseKey.from_string(course_id)
course_descriptor = modulestore().get_course(course_key)
if course_descriptor is None:
raise CourseNotFoundError("course not found")
except InvalidKeyError as err:
raise CourseNotFoundError(err.message)
about_descriptor = {
attribute: _fetch_course_detail(course_key, attribute)
for attribute in ABOUT_ATTRIBUTES
}
course_info = serialize_content(course_descriptor=course_descriptor, about_descriptor=about_descriptor)
return course_info
def _fetch_course_detail(course_key, attribute):
"""
Fetch the course about attribute for the given course's attribute from persistence and return its value.
"""
usage_key = course_key.make_usage_key('about', attribute)
try:
value = modulestore().get_item(usage_key).data
except ItemNotFoundError:
value = None
return value
| agpl-3.0 | -301,716,734,621,402,050 | 4,119,578,556,528,051,700 | 29.916667 | 108 | 0.722372 | false |
sbidoul/odoo | addons/project/__openerp__.py | 259 | 2562 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Management',
'version': '1.1',
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/project-management',
'category': 'Project Management',
'sequence': 8,
'summary': 'Projects, Tasks',
'depends': [
'base_setup',
'product',
'analytic',
'board',
'mail',
'resource',
'web_kanban'
],
'description': """
Track multi-level projects, tasks, work done on tasks
=====================================================
This application allows an operational project management system to organize your activities into tasks and plan the work you need to get the tasks completed.
Gantt diagrams will give you a graphical representation of your project plans, as well as resources availability and workload.
Dashboard / Reports for Project Management will include:
--------------------------------------------------------
* My Tasks
* Open Tasks
* Tasks Analysis
* Cumulative Flow
""",
'data': [
'security/project_security.xml',
'wizard/project_task_delegate_view.xml',
'security/ir.model.access.csv',
'project_data.xml',
'project_view.xml',
'res_partner_view.xml',
'report/project_report_view.xml',
'report/project_cumulative.xml',
'res_config_view.xml',
'views/project.xml',
],
'demo': ['project_demo.xml'],
'test': [
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,921,916,777,010,440,700 | 5,089,227,915,159,927,000 | 34.09589 | 158 | 0.586651 | false |
rh-s/heat | heat_integrationtests/functional/test_hooks.py | 3 | 13215 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import yaml
from heat_integrationtests.common import test
LOG = logging.getLogger(__name__)
class HooksTest(test.HeatIntegrationTest):
def setUp(self):
super(HooksTest, self).setUp()
self.client = self.orchestration_client
self.template = {'heat_template_version': '2014-10-16',
'resources': {
'foo_step1': {'type': 'OS::Heat::RandomString'},
'foo_step2': {'type': 'OS::Heat::RandomString',
'depends_on': 'foo_step1'},
'foo_step3': {'type': 'OS::Heat::RandomString',
'depends_on': 'foo_step2'}}}
def test_hook_pre_create(self):
env = {'resource_registry':
{'resources':
{'foo_step2':
{'hooks': 'pre-create'}}}}
# Note we don't wait for CREATE_COMPLETE, because we need to
# signal to clear the hook before create will complete
stack_identifier = self.stack_create(
template=self.template,
environment=env,
expected_status='CREATE_IN_PROGRESS')
self._wait_for_resource_status(
stack_identifier, 'foo_step1', 'CREATE_COMPLETE')
self._wait_for_resource_status(
stack_identifier, 'foo_step2', 'INIT_COMPLETE')
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='CREATE paused until Hook pre-create is cleared',
rsrc_name='foo_step2')
self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
self.client.resources.signal(stack_identifier, 'foo_step2',
data={'unset_hook': 'pre-create'})
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='Hook pre-create is cleared',
rsrc_name='foo_step2')
self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
self._wait_for_resource_status(
stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
def test_hook_pre_update_nochange(self):
env = {'resource_registry':
{'resources':
{'foo_step2':
{'hooks': 'pre-update'}}}}
stack_identifier = self.stack_create(
template=self.template,
environment=env)
res_before = self.client.resources.get(stack_identifier, 'foo_step2')
# Note we don't wait for UPDATE_COMPLETE, because we need to
# signal to clear the hook before update will complete
self.update_stack(
stack_identifier,
template=self.template,
environment=env,
expected_status='UPDATE_IN_PROGRESS')
# Note when a hook is specified, the resource status doesn't change
# when we hit the hook, so we look for the event, then assert the
# state is unchanged.
self._wait_for_resource_status(
stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='UPDATE paused until Hook pre-update is cleared',
rsrc_name='foo_step2')
self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
self.client.resources.signal(stack_identifier, 'foo_step2',
data={'unset_hook': 'pre-update'})
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='Hook pre-update is cleared',
rsrc_name='foo_step2')
self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
self._wait_for_resource_status(
stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
res_after = self.client.resources.get(stack_identifier, 'foo_step2')
self.assertEqual(res_before.physical_resource_id,
res_after.physical_resource_id)
def test_hook_pre_update_replace(self):
env = {'resource_registry':
{'resources':
{'foo_step2':
{'hooks': 'pre-update'}}}}
stack_identifier = self.stack_create(
template=self.template,
environment=env)
res_before = self.client.resources.get(stack_identifier, 'foo_step2')
# Note we don't wait for UPDATE_COMPLETE, because we need to
# signal to clear the hook before update will complete
self.template['resources']['foo_step2']['properties'] = {'length': 10}
self.update_stack(
stack_identifier,
template=self.template,
environment=env,
expected_status='UPDATE_IN_PROGRESS')
# Note when a hook is specified, the resource status doesn't change
# when we hit the hook, so we look for the event, then assert the
# state is unchanged.
self._wait_for_resource_status(
stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='UPDATE paused until Hook pre-update is cleared',
rsrc_name='foo_step2')
self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
self.client.resources.signal(stack_identifier, 'foo_step2',
data={'unset_hook': 'pre-update'})
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='Hook pre-update is cleared',
rsrc_name='foo_step2')
self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
self._wait_for_resource_status(
stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
res_after = self.client.resources.get(stack_identifier, 'foo_step2')
self.assertNotEqual(res_before.physical_resource_id,
res_after.physical_resource_id)
def test_hook_pre_update_in_place(self):
env = {'resource_registry':
{'resources':
{'rg':
{'hooks': 'pre-update'}}}}
template = {'heat_template_version': '2014-10-16',
'resources': {
'rg': {
'type': 'OS::Heat::ResourceGroup',
'properties': {
'count': 1,
'resource_def': {
'type': 'OS::Heat::RandomString'}}}}}
# Note we don't wait for CREATE_COMPLETE, because we need to
# signal to clear the hook before create will complete
stack_identifier = self.stack_create(
template=template,
environment=env)
res_before = self.client.resources.get(stack_identifier, 'rg')
template['resources']['rg']['properties']['count'] = 2
self.update_stack(
stack_identifier,
template=template,
environment=env,
expected_status='UPDATE_IN_PROGRESS')
# Note when a hook is specified, the resource status doesn't change
# when we hit the hook, so we look for the event, then assert the
# state is unchanged.
self._wait_for_resource_status(
stack_identifier, 'rg', 'CREATE_COMPLETE')
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='UPDATE paused until Hook pre-update is cleared',
rsrc_name='rg')
self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
self.client.resources.signal(stack_identifier, 'rg',
data={'unset_hook': 'pre-update'})
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='Hook pre-update is cleared',
rsrc_name='rg')
self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
res_after = self.client.resources.get(stack_identifier, 'rg')
self.assertEqual(res_before.physical_resource_id,
res_after.physical_resource_id)
def test_hook_pre_create_nested(self):
files = {'nested.yaml': yaml.dump(self.template)}
env = {'resource_registry':
{'resources':
{'nested':
{'foo_step2':
{'hooks': 'pre-create'}}}}}
template = {'heat_template_version': '2014-10-16',
'resources': {
'nested': {'type': 'nested.yaml'}}}
# Note we don't wait for CREATE_COMPLETE, because we need to
# signal to clear the hook before create will complete
stack_identifier = self.stack_create(
template=template,
environment=env,
files=files,
expected_status='CREATE_IN_PROGRESS')
self._wait_for_resource_status(stack_identifier, 'nested',
'CREATE_IN_PROGRESS')
nested_identifier = self.assert_resource_is_a_stack(
stack_identifier, 'nested', wait=True)
self._wait_for_resource_status(
nested_identifier, 'foo_step1', 'CREATE_COMPLETE')
self._wait_for_resource_status(
nested_identifier, 'foo_step2', 'INIT_COMPLETE')
ev = self.wait_for_event_with_reason(
nested_identifier,
reason='CREATE paused until Hook pre-create is cleared',
rsrc_name='foo_step2')
self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
self.client.resources.signal(nested_identifier, 'foo_step2',
data={'unset_hook': 'pre-create'})
ev = self.wait_for_event_with_reason(
nested_identifier,
reason='Hook pre-create is cleared',
rsrc_name='foo_step2')
self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
self._wait_for_resource_status(
nested_identifier, 'foo_step2', 'CREATE_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
def test_hook_pre_create_wildcard(self):
env = {'resource_registry':
{'resources':
{'foo_*':
{'hooks': 'pre-create'}}}}
# Note we don't wait for CREATE_COMPLETE, because we need to
# signal to clear the hook before create will complete
stack_identifier = self.stack_create(
template=self.template,
environment=env,
expected_status='CREATE_IN_PROGRESS')
self._wait_for_resource_status(
stack_identifier, 'foo_step1', 'INIT_COMPLETE')
self.wait_for_event_with_reason(
stack_identifier,
reason='CREATE paused until Hook pre-create is cleared',
rsrc_name='foo_step1')
self.client.resources.signal(stack_identifier, 'foo_step1',
data={'unset_hook': 'pre-create'})
self.wait_for_event_with_reason(
stack_identifier,
reason='Hook pre-create is cleared',
rsrc_name='foo_step1')
self._wait_for_resource_status(
stack_identifier, 'foo_step2', 'INIT_COMPLETE')
self.wait_for_event_with_reason(
stack_identifier,
reason='CREATE paused until Hook pre-create is cleared',
rsrc_name='foo_step2')
self.client.resources.signal(stack_identifier, 'foo_step2',
data={'unset_hook': 'pre-create'})
self.wait_for_event_with_reason(
stack_identifier,
reason='Hook pre-create is cleared',
rsrc_name='foo_step2')
self._wait_for_resource_status(
stack_identifier, 'foo_step3', 'INIT_COMPLETE')
self.wait_for_event_with_reason(
stack_identifier,
reason='CREATE paused until Hook pre-create is cleared',
rsrc_name='foo_step3')
self.client.resources.signal(stack_identifier, 'foo_step3',
data={'unset_hook': 'pre-create'})
self.wait_for_event_with_reason(
stack_identifier,
reason='Hook pre-create is cleared',
rsrc_name='foo_step3')
self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
| apache-2.0 | -8,922,958,416,142,746,000 | 4,753,959,275,623,854,000 | 45.045296 | 78 | 0.569126 | false |
dcraft2/dcraft1127-myclone | gui/dialog.py | 14 | 5395 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"Common dialogs (function wrappers for wxPython dialogs)"
__author__ = "Mariano Reingart ([email protected])"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
import wx
from wx.lib import dialogs
from .font import Font
def alert(message, title="", parent=None, scrolled=False, icon="exclamation"):
"Show a simple pop-up modal dialog"
if not scrolled:
icons = {'exclamation': wx.ICON_EXCLAMATION, 'error': wx.ICON_ERROR,
'question': wx.ICON_QUESTION, 'info': wx.ICON_INFORMATION}
style = wx.OK | icons[icon]
result = dialogs.messageDialog(parent, message, title, style)
else:
result = dialogs.scrolledMessageDialog(parent, message, title)
def prompt(message="", title="", default="", multiline=False, password=None,
parent=None):
"Modal dialog asking for an input, returns string or None if cancelled"
if password:
style = wx.TE_PASSWORD | wx.OK | wx.CANCEL
result = dialogs.textEntryDialog(parent, message, title, default, style)
elif multiline:
style = wx.TE_MULTILINE | wx.OK | wx.CANCEL
result = dialogs.textEntryDialog(parent, message, title, default, style)
# workaround for Mac OS X
result.text = '\n'.join(result.text.splitlines())
else:
result = dialogs.textEntryDialog(parent, message, title, default)
if result.accepted:
return result.text
def confirm(message="", title="", default=False, ok=False, cancel=False,
parent=None):
"Ask for confirmation (yes/no or ok and cancel), returns True or False"
style = wx.CENTRE
if ok:
style |= wx.OK
else:
style |= wx.YES | wx.NO
if default:
style |= wx.YES_DEFAULT
else:
style |= wx.NO_DEFAULT
if cancel:
style |= wx.CANCEL
result = dialogs.messageDialog(parent, message, title, style)
if cancel and result.returned == wx.ID_CANCEL:
return None
return result.accepted # True or False
def select_font(message="", title="", font=None, parent=None):
"Show a dialog to select a font"
if font is not None:
wx_font = font._get_wx_font() # use as default
else:
wx_font = None
font = Font() # create an empty font
result = dialogs.fontDialog(parent, font=wx_font)
if result.accepted:
font_data = result.fontData
result.color = result.fontData.GetColour().Get()
wx_font = result.fontData.GetChosenFont()
font.set_wx_font(wx_font)
wx_font = None
return font
def select_color(message="", title="", color=None, parent=None):
"Show a dialog to pick a color"
result = dialogs.colorDialog(parent, color=color)
return result.accepted and result.color
def open_file(title="Open", directory='', filename='',
wildcard='All Files (*.*)|*.*', multiple=False, parent=None):
"Show a dialog to select files to open, return path(s) if accepted"
style = wx.OPEN
if multiple:
style |= wx.MULTIPLE
result = dialogs.fileDialog(parent, title, directory, filename, wildcard,
style)
if result.paths and not multiple:
return result.paths[0]
else:
return result.paths
def save_file(title="Save", directory='', filename='',
wildcard='All Files (*.*)|*.*', overwrite=False, parent=None):
"Show a dialog to select file to save, return path(s) if accepted"
style = wx.SAVE
if not overwrite:
style |= wx.OVERWRITE_PROMPT
result = dialogs.fileDialog(parent, title, directory, filename, wildcard,
style)
return result.paths
def choose_directory(message='Choose a directory', path="", parent=None):
"Show a dialog to choose a directory"
result = dialogs.directoryDialog(parent, message, path)
return result.path
def single_choice(options=[], message='', title='', parent=None):
result = dialogs.singleChoiceDialog(parent, message, title, options)
return result.selection
def multiple_choice(options=[], message='', title='', parent=None):
result = dialogs.multipleChoiceDialog(parent, message, title, options)
return result.selection
def find(default='', whole_words=0, case_sensitive=0, parent=None):
"Shows a find text dialog"
result = dialogs.findDialog(parent, default, whole_words, case_sensitive)
return {'text': result.searchText, 'whole_words': result.wholeWordsOnly,
'case_sensitive': result.caseSensitive}
if __name__ == "__main__":
app = wx.App(redirect=False)
alert("hola!", "Alert!", icon="error")
text = prompt("Input your name:", "Prompt...", "mariano")
print text
ok = confirm("do you agree?", "Confirm?", default=True, cancel=True)
print ok
font = select_font("Select a font!")
print font
color = select_color("Pick a color")
print color
print open_file()
print save_file(overwrite=True)
print choose_directory()
print single_choice(["1", 'b', '$'])
print multiple_choice(["1", 'b', '$'])
print find("hola")
| lgpl-3.0 | -8,123,360,083,837,842,000 | 7,856,714,445,143,773,000 | 33.493421 | 80 | 0.609639 | false |
ecdpalma/napscheduler | napscheduler/scheduler.py | 1 | 16056 | """
This module is the main part of the library. It houses the Scheduler class
and related exceptions.
"""
from threading import Thread, Event, Lock
from datetime import datetime, timedelta
from logging import getLogger
import os
import sys
from napscheduler.util import *
from napscheduler.triggers import SimpleTrigger, IntervalTrigger, CronTrigger
from napscheduler.job import Job, MaxInstancesReachedError
from napscheduler.events import *
from napscheduler.threadpool import ThreadPool
from napscheduler.observable import Observable
logger = getLogger(__name__)
class SchedulerAlreadyRunningError(Exception):
"""
Raised when attempting to start or configure the scheduler when it's
already running.
"""
def __str__(self):
return 'Scheduler is already running'
class Scheduler(Observable):
"""
This class is responsible for scheduling jobs and triggering
their execution.
"""
_stopped = False
_thread = None
def __init__(self, gconfig={}, **options):
Observable.__init__(self)
self._wakeup = Event()
self._jobs = []
self._jobs_lock = Lock()
self._pending_jobs = []
self.configure(gconfig, **options)
def configure(self, gconfig={}, **options):
"""
Reconfigures the scheduler with the given options. Can only be done
when the scheduler isn't running.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Set general options
config = combine_opts(gconfig, 'napscheduler.', options)
self.misfire_grace_time = int(config.pop('misfire_grace_time', 1))
self.coalesce = asbool(config.pop('coalesce', True))
self.daemonic = asbool(config.pop('daemonic', True))
# Configure the thread pool
if 'threadpool' in config:
self._threadpool = maybe_ref(config['threadpool'])
else:
threadpool_opts = combine_opts(config, 'threadpool.')
self._threadpool = ThreadPool(**threadpool_opts)
def start(self):
"""
Starts the scheduler in a new thread.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Schedule all pending jobs
for job in self._pending_jobs:
self._real_add_job(job, False)
del self._pending_jobs[:]
self._stopped = False
self._thread = Thread(target=self._main_loop, name='NAPScheduler')
self._thread.setDaemon(self.daemonic)
self._thread.start()
def shutdown(self, wait=True, shutdown_threadpool=True):
"""
Shuts down the scheduler and terminates the thread.
Does not interrupt any currently running jobs.
:param wait: ``True`` to wait until all currently executing jobs have
finished (if ``shutdown_threadpool`` is also ``True``)
:param shutdown_threadpool: ``True`` to shut down the thread pool
"""
if not self.running:
return
self._stopped = True
self._wakeup.set()
# Shut down the thread pool
if shutdown_threadpool:
self._threadpool.shutdown(wait)
# Wait until the scheduler thread terminates
self._thread.join()
@property
def running(self):
return not self._stopped and self._thread and self._thread.isAlive()
def _real_add_job(self, job, wakeup):
job.compute_next_run_time(datetime.now())
if not job.next_run_time:
raise ValueError('Not adding job since it would never be run')
self._jobs_lock.acquire()
self._jobs.append(job)
self._jobs_lock.release()
# Notify listeners that a new job has been added
event = SchedulerEvent(EVENT_SCHEDULER_JOB_ADDED, job)
self.notify_listeners(event)
logger.info('Added job "%s" to scheduler', job)
# Notify the scheduler about the new job
if wakeup:
self._wakeup.set()
def add_job(self, trigger, func, args, kwargs,
**options):
"""
Adds the given job to the job list and notifies the scheduler thread.
:param trigger: alias of the job store to store the job in
:param func: callable to run at the given time
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:rtype: :class:`~napscheduler.job.Job`
"""
job = Job(trigger, func, args or [], kwargs or {},
options.pop('misfire_grace_time', self.misfire_grace_time),
options.pop('coalesce', self.coalesce), **options)
if not self.running:
self._pending_jobs.append(job)
logger.info('Adding job tentatively -- it will be properly '
'scheduled when the scheduler starts')
else:
self._real_add_job(job, True)
return job
def _remove_job(self, job):
self._jobs.remove(job)
# Notify listeners that a job has been removed
event = SchedulerEvent(EVENT_SCHEDULER_JOB_REMOVED, job)
self.notify_listeners(event)
logger.info('Removed job "%s"', job)
def add_date_job(self, func, date, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on a specific date and time.
:param func: callable to run at the given time
:param date: the date/time to run the job at
:param name: name of the job
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:type date: :class:`datetime.date`
:rtype: :class:`~napscheduler.job.Job`
"""
trigger = SimpleTrigger(date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0,
seconds=0, start_date=None, args=None, kwargs=None,
**options):
"""
Schedules a job to be completed on specified intervals.
:param func: callable to run
:param weeks: number of weeks to wait
:param days: number of days to wait
:param hours: number of hours to wait
:param minutes: number of minutes to wait
:param seconds: number of seconds to wait
:param start_date: when to first execute the job and start the
counter (default is after the given interval)
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:rtype: :class:`~napscheduler.job.Job`
"""
interval = timedelta(weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds)
trigger = IntervalTrigger(interval, start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_cron_job(self, func, year=None, month=None, day=None, week=None,
day_of_week=None, hour=None, minute=None, second=None,
start_date=None, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on times that match the given
expressions.
:param func: callable to run
:param year: year to run on
:param month: month to run on
:param day: day of month to run on
:param week: week of the year to run on
:param day_of_week: weekday to run on (0 = Monday)
:param hour: hour to run on
:param second: second to run on
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:return: the scheduled job
:rtype: :class:`~napscheduler.job.Job`
"""
trigger = CronTrigger(year=year, month=month, day=day, week=week,
day_of_week=day_of_week, hour=hour,
minute=minute, second=second,
start_date=start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def cron_schedule(self, **options):
"""
Decorator version of :meth:`add_cron_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
"""
def inner(func):
func.job = self.add_cron_job(func, **options)
return func
return inner
def interval_schedule(self, **options):
"""
Decorator version of :meth:`add_interval_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
"""
def inner(func):
func.job = self.add_interval_job(func, **options)
return func
return inner
def get_jobs(self):
"""
Returns a list of all scheduled jobs.
:return: list of :class:`~napscheduler.job.Job` objects
"""
return self._jobs
def unschedule_job(self, job):
"""
Removes a job, preventing it from being run any more.
"""
self._jobs_lock.acquire()
try:
if job in list(self._jobs):
self._remove_job(job)
return
finally:
self._jobs_lock.release()
raise KeyError('Job "%s" is not scheduled in any job store' % job)
def unschedule_func(self, func):
"""
Removes all jobs that would execute the given function.
"""
found = False
self._jobs_lock.acquire()
try:
for job in list(self._jobs):
if job.func == func:
self._remove_job(job)
found = True
finally:
self._jobs_lock.release()
if not found:
raise KeyError('The given function is not scheduled in this '
'scheduler')
def print_jobs(self, out=None):
"""
Prints out a textual listing of all jobs currently scheduled on this
scheduler.
:param out: a file-like object to print to (defaults to **sys.stdout**
if nothing is given)
"""
out = out or sys.stdout
job_strs = []
self._jobs_lock.acquire()
try:
if self._jobs:
for job in self._jobs:
job_strs.append('%s' % job)
else:
job_strs.append('No scheduled jobs')
finally:
self._jobs_lock.release()
out.write(os.linesep.join(job_strs) + os.linesep)
def _run_job(self, job, run_times):
"""
Acts as a harness that runs the actual job code in a thread.
"""
for run_time in run_times:
# See if the job missed its run time window, and handle possible
# misfires accordingly
difference = datetime.now() - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
# Notify listeners about a missed run
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self.notify_listeners(event)
job.notify_listeners(event)
logger.warning('Run time of job "%s" was missed by %s',
job, difference)
else:
try:
job.add_instance()
except MaxInstancesReachedError:
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self.notify_listeners(event)
job.notify_listeners(event)
logger.warning('Execution of job "%s" skipped: '
'maximum number of running instances '
'reached (%d)', job, job.max_instances)
break
logger.info('Running job "%s" (scheduled at %s)', job,
run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except:
# Notify listeners about the exception
exc, tb = sys.exc_info()[1:]
event = JobEvent(EVENT_JOB_ERROR, job, run_time,
exception=exc, traceback=tb)
self.notify_listeners(event)
job.notify_listeners(event)
logger.exception('Job "%s" raised an exception', job)
else:
# Notify listeners about successful execution
event = JobEvent(EVENT_JOB_EXECUTED, job, run_time,
retval=retval)
self.notify_listeners(event)
job.notify_listeners(event)
logger.info('Job "%s" executed successfully', job)
job.remove_instance()
# If coalescing is enabled, don't attempt any further runs
if job.coalesce:
break
def _process_jobs(self, now):
"""
Iterates through jobs, starts pending jobs
and figures out the next wakeup time.
"""
next_wakeup_time = None
self._jobs_lock.acquire()
try:
for job in tuple([job for job in self._jobs if job.active is True]):
run_times = job.get_run_times(now)
if run_times:
self._threadpool.submit(self._run_job, job, run_times)
# Increase the job's run count
if job.coalesce:
job.runs += 1
else:
job.runs += len(run_times)
# Don't keep finished jobs around
if not job.compute_next_run_time(now + timedelta(microseconds=1)):
self._remove_job(job)
if not next_wakeup_time:
next_wakeup_time = job.next_run_time
elif job.next_run_time:
next_wakeup_time = min(next_wakeup_time,
job.next_run_time)
return next_wakeup_time
finally:
self._jobs_lock.release()
def _main_loop(self):
"""Executes jobs on schedule."""
logger.info('Scheduler started')
self.notify_listeners(SchedulerEvent(EVENT_SCHEDULER_START))
self._wakeup.clear()
while not self._stopped:
logger.debug('Looking for jobs to run')
now = datetime.now()
next_wakeup_time = self._process_jobs(now)
# Sleep until the next job is scheduled to be run,
# a new job is added or the scheduler is stopped
if next_wakeup_time is not None:
wait_seconds = time_difference(next_wakeup_time, now)
logger.debug('Next wakeup is due at %s (in %f seconds)',
next_wakeup_time, wait_seconds)
self._wakeup.wait(wait_seconds)
else:
logger.debug('No jobs; waiting until a job is added')
self._wakeup.wait()
self._wakeup.clear()
logger.info('Scheduler has been shut down')
self.notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))
| mit | 7,558,082,462,729,335,000 | -4,028,350,657,113,455,600 | 36.166667 | 86 | 0.564275 | false |
bukalov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/queueengine.py | 120 | 6867 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import sys
import traceback
from datetime import datetime, timedelta
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.outputtee import OutputTee
_log = logging.getLogger(__name__)
# FIXME: This will be caught by "except Exception:" blocks, we should consider
# making this inherit from SystemExit instead (or BaseException, except that's not recommended).
class TerminateQueue(Exception):
pass
class QueueEngineDelegate:
def queue_log_path(self):
raise NotImplementedError, "subclasses must implement"
def work_item_log_path(self, work_item):
raise NotImplementedError, "subclasses must implement"
def begin_work_queue(self):
raise NotImplementedError, "subclasses must implement"
def should_continue_work_queue(self):
raise NotImplementedError, "subclasses must implement"
def next_work_item(self):
raise NotImplementedError, "subclasses must implement"
def process_work_item(self, work_item):
raise NotImplementedError, "subclasses must implement"
def handle_unexpected_error(self, work_item, message):
raise NotImplementedError, "subclasses must implement"
class QueueEngine:
def __init__(self, name, delegate, wakeup_event, seconds_to_sleep=120):
self._name = name
self._delegate = delegate
self._wakeup_event = wakeup_event
self._output_tee = OutputTee()
self._seconds_to_sleep = seconds_to_sleep
log_date_format = "%Y-%m-%d %H:%M:%S"
handled_error_code = 2
# Child processes exit with a special code to the parent queue process can detect the error was handled.
@classmethod
def exit_after_handled_error(cls, error):
_log.error(error)
sys.exit(cls.handled_error_code)
def run(self):
self._begin_logging()
self._delegate.begin_work_queue()
while (self._delegate.should_continue_work_queue()):
try:
self._ensure_work_log_closed()
work_item = self._delegate.next_work_item()
if not work_item:
self._sleep("No work item.")
continue
# FIXME: Work logs should not depend on bug_id specificaly.
# This looks fixed, no?
self._open_work_log(work_item)
try:
if not self._delegate.process_work_item(work_item):
_log.warning("Unable to process work item.")
continue
except ScriptError, e:
# Use a special exit code to indicate that the error was already
# handled in the child process and we should just keep looping.
if e.exit_code == self.handled_error_code:
continue
message = "Unexpected failure when processing patch! Please file a bug against webkit-patch.\n%s" % e.message_with_output()
self._delegate.handle_unexpected_error(work_item, message)
except TerminateQueue, e:
self._stopping("TerminateQueue exception received.")
return 0
except KeyboardInterrupt, e:
self._stopping("User terminated queue.")
return 1
except Exception, e:
traceback.print_exc()
# Don't try tell the status bot, in case telling it causes an exception.
self._sleep("Exception while preparing queue")
self._stopping("Delegate terminated queue.")
return 0
def _stopping(self, message):
_log.info("\n%s" % message)
self._delegate.stop_work_queue(message)
# Be careful to shut down our OutputTee or the unit tests will be unhappy.
self._ensure_work_log_closed()
self._output_tee.remove_log(self._queue_log)
def _begin_logging(self):
self._queue_log = self._output_tee.add_log(self._delegate.queue_log_path())
self._work_log = None
def _open_work_log(self, work_item):
work_item_log_path = self._delegate.work_item_log_path(work_item)
if not work_item_log_path:
return
self._work_log = self._output_tee.add_log(work_item_log_path)
def _ensure_work_log_closed(self):
# If we still have a bug log open, close it.
if self._work_log:
self._output_tee.remove_log(self._work_log)
self._work_log = None
def _now(self):
"""Overriden by the unit tests to allow testing _sleep_message"""
return datetime.now()
def _sleep_message(self, message):
wake_time = self._now() + timedelta(seconds=self._seconds_to_sleep)
if self._seconds_to_sleep < 3 * 60:
sleep_duration_text = str(self._seconds_to_sleep) + ' seconds'
else:
sleep_duration_text = str(round(self._seconds_to_sleep / 60)) + ' minutes'
return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(self.log_date_format), sleep_duration_text)
def _sleep(self, message):
_log.info(self._sleep_message(message))
self._wakeup_event.wait(self._seconds_to_sleep)
self._wakeup_event.clear()
| bsd-3-clause | -3,312,378,739,534,687,700 | 1,895,127,823,124,275,700 | 40.618182 | 144 | 0.652978 | false |
Zimbra-Community/python-zimbra | tests/test_request_json.py | 3 | 10019 | """ Request tests """
import json
from unittest import TestCase
from pythonzimbra.request_json import RequestJson
from pythonzimbra.exceptions.request import RequestHeaderContextException
class TestRequestJson(TestCase):
""" Request tests
"""
request = None
""" The request to be tested against """
def cleanUp(self):
""" Clean up after one step to leave a dedicated result for the other
test cases.
"""
self.setUp()
def setUp(self):
self.request = RequestJson()
def test_empty_request(self):
""" Create an empty request and check the created xml
"""
expected_result = {
"Body": {},
"Header": {
"context": {
"_jsns": "urn:zimbra",
"format": {
"type": "js"
}
}
}
}
self.assertEqual(
expected_result,
json.loads(self.request.get_request())
)
def test_set_context_params_failtype(self):
""" Add context parameters to the request and expect the method to
send an exception
"""
self.assertRaises(
RequestHeaderContextException,
self.request.set_context_params,
{
'invalidParam': {
'invalidAttribute': 'invalidValue'
}
}
)
def test_set_context_params(self):
""" Add all currently accepted params and check the result
"""
self.request.set_context_params(
{
'authToken': {
'_content': '1234567890abcdef'
},
'authTokenControl': {
'voidOnExpired': '1'
},
'session': {
'id': '1234567890abcdef',
'seq': '1234567890',
'type': 'admin'
},
'account': {
'by': 'name',
'_content': '[email protected]'
},
'change': {
'token': '1234567890abcdef',
'type': 'new'
},
'targetServer': {
'_content': 'mailboxserver.zimbra.com'
},
'userAgent': {
'name': 'Mozilla',
'version': '1.0'
},
'via': {
'_content': 'proxyserver.zimbra.com'
}
}
)
expected_result = {
"Body": {},
"Header": {
"context": {
"authToken": {
"_content": "1234567890abcdef"
},
"account": {
"by": "name",
"_content": "[email protected]"
},
"via": {
"_content": "proxyserver.zimbra.com"
},
"targetServer": {
"_content": "mailboxserver.zimbra.com"
},
"format": {
"type": "js"
},
"_jsns": "urn:zimbra",
"session": {
"type": "admin",
"id": "1234567890abcdef",
"seq": "1234567890"
},
"authTokenControl": {
"voidOnExpired": "1"
},
"userAgent": {
"version": "1.0",
"name": "Mozilla"
},
"change": {
"token": "1234567890abcdef",
"type": "new"
}
}
}
}
self.assertEqual(
expected_result,
json.loads(self.request.get_request())
)
# Clean up after this test
self.cleanUp()
def test_enable_batch_default(self):
""" Test enabling batch requests
"""
# Check with default parameter
self.request.enable_batch()
expected_result = {
"Body": {
"BatchRequest": {
"onerror": "continue",
"_jsns": "urn:zimbra"
}
},
"Header": {
"context": {
"_jsns": "urn:zimbra",
"format": {
"type": "js"
}
}
}
}
self.assertEqual(
expected_result,
json.loads(self.request.get_request())
)
# Clean up
self.cleanUp()
def test_enable_batch_stop(self):
""" Test enabling batch requests with additional parameter
"""
self.request.enable_batch('stop')
expected_result = {
"Body": {
"BatchRequest": {
"onerror": "stop",
"_jsns": "urn:zimbra"
}
},
"Header": {
"context": {
"_jsns": "urn:zimbra",
"format": {
"type": "js"
}
}
}
}
self.assertEqual(
expected_result,
json.loads(self.request.get_request())
)
# Clean up
self.cleanUp()
def test_batch_add_request(self):
""" Test adding multiple request to a batch request
"""
self.request.enable_batch()
request_id = self.request.add_request(
'GetInfoRequest',
{
'sections': 'mbox,prefs'
},
"urn_zimbra"
)
self.assertIsInstance(
request_id,
int,
msg="Returned request_id for request 1 is not of type int, "
"but of type %s" % (
type(request_id)
)
)
self.assertEqual(
1,
request_id,
msg="Returned request_id for request 1 is not 1, but %s" % (
str(request_id)
)
)
expected_result = {
"Body": {
"BatchRequest": {
"onerror": "continue",
"_jsns": "urn:zimbra",
"GetInfoRequest": {
"_jsns": "urn_zimbra",
"sections": "mbox,prefs",
"requestId": 1
}
}
},
"Header": {
"context": {
"_jsns": "urn:zimbra",
"format": {
"type": "js"
}
}
}
}
self.assertEqual(
expected_result,
json.loads(self.request.get_request())
)
request_id = self.request.add_request(
'GetInfoRequest',
{
'sections': 'zimlets'
},
"urn:zimbra"
)
self.assertIsInstance(
request_id,
int,
msg="Returned request_id for request 2 is not of type int, "
"but of type %s" % (
type(request_id)
)
)
self.assertEqual(
2,
request_id,
msg="Returned request_id for request 2 is not 2, but %s" % (
str(request_id)
)
)
expected_result = {
"Body": {
"BatchRequest": {
"onerror": "continue",
"_jsns": "urn:zimbra",
"GetInfoRequest": [
{
"_jsns": "urn_zimbra",
"sections": "mbox,prefs",
"requestId": 1
},
{
"_jsns": "urn:zimbra",
"sections": "zimlets",
"requestId": 2
}
]
}
},
"Header": {
"context": {
"_jsns": "urn:zimbra",
"format": {
"type": "js"
}
}
}
}
self.assertEqual(
expected_result,
json.loads(self.request.get_request())
)
# Clean up
self.setUp()
def test_add_request(self):
""" Test adding a request
"""
request_id = self.request.add_request(
'GetInfoRequest',
{
'sections': 'mbox,prefs'
},
'urn:zimbra'
)
self.assertIsNone(
request_id,
msg="Returned request_id for request 1 is not none, "
"but %s" % (
str(request_id)
)
)
expected_result = {
"Body": {
"GetInfoRequest": {
"_jsns": "urn:zimbra",
"sections": "mbox,prefs"
}
},
"Header": {
"context": {
"_jsns": "urn:zimbra",
"format": {
"type": "js"
}
}
}
}
self.assertEqual(
expected_result,
json.loads(self.request.get_request())
)
# Clean up
self.setUp()
def tearDown(self):
self.request = None
| bsd-2-clause | 6,553,910,179,825,910,000 | 6,023,541,425,738,745,000 | 24.822165 | 77 | 0.353329 | false |
liulion/mayavi | tvtk/tests/test_indenter.py | 2 | 9233 | """Tests for indenter.py."""
# Author: Prabhu Ramachandran
# License: BSD style
# Copyright (c) 2004, Enthought, Inc.
import unittest
import cStringIO
from tvtk import indenter
class TestIndent(unittest.TestCase):
def test_basic(self):
"""Simple tests for indenter."""
id = indenter.Indent()
self.assertEqual(str(id), '')
id.incr()
self.assertEqual(str(id), ' ')
id.incr()
self.assertEqual(str(id), ' ')
id.decr()
self.assertEqual(str(id), ' ')
id.decr()
self.assertEqual(str(id), '')
id.incr(); id.incr()
id.reset()
self.assertEqual(str(id), '')
def test_format(self):
"""Tests if formatting works ok."""
id = indenter.Indent()
id.incr()
# test one liner with trailing newlines
txt = """class foo:\n\n \n \n"""
t1 = id.format(txt)
self.assertEqual(t1, ' class foo:\n')
# test one liner with no trailing newline.
txt = """class foo:"""
t1 = id.format(txt)
self.assertEqual(t1, ' class foo:\n')
# test multi-line text.
txt = """print "hi!"
if name == 'hi':
print "hi, hi!"
"""
res = """ print "hi!"\n if name == 'hi':\n print "hi, hi!"\n"""
self.assertEqual(id.format(txt), res)
txt = """
class Foo:
def __init__(self):
pass
def _get_a(self):
return self._a"""
res = """ class Foo:
def __init__(self):
pass
def _get_a(self):
return self._a""" + '\n'
self.assertEqual(id.format(txt), res)
class TestVTKDocMassager(unittest.TestCase):
def test_doc_massage(self):
"""Test massage method."""
doc = "This is a test. All VTK classes and vtk classes\n"\
"are named like this: vtkActor, vtkLODProperty,\n"\
"vtkXMLDataReader, vtk3DSImporter etc. The methods \n"\
"of a VTK object are like GetData, GetOutput, \n"\
"SetRepresentationToWireframe. Ivars are named like\n"\
"SpecularColor, Write3DPropsAsRasterImage etc."
ret = "This is a test. All VTK classes and vtk classes\n"\
"are named like this: Actor, LODProperty,\n"\
"XMLDataReader, ThreeDSImporter etc. The methods \n"\
"of a VTK object are like get_data, get_output, \n"\
"set_representation_to_wireframe. Ivars are named like\n"\
"specular_color, write3d_props_as_raster_image etc."
dm = indenter.VTKDocMassager()
self.assertEqual(dm.massage(doc), ret)
def test_rename_class(self):
"""Test if VTK classes are renamed correctly."""
dm = indenter.VTKDocMassager()
t = 'vtkFooBar vtkXMLDataReader vtk3DSReader vtk2000Bug'
r = dm._rename_class(t)
correct = 'FooBar XMLDataReader ThreeDSReader Two000Bug'
self.assertEqual(r, correct)
def test_remove_sig(self):
"""Test if function signature is removed correctly."""
dm = indenter.VTKDocMassager()
t = 'V.GetOutput(int) -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput (int idx);\n'\
'V.GetOutput() -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput ();\n\n'\
' Set/Get the output of this reader.\n'
r = dm._remove_sig(t)
correct = ' Set/Get the output of this reader.\n'
self.assertEqual(r, correct)
t = 'V.GetOutput(int) -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput (int idx);\n'\
'V.GetOutput() -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput ();\n\n'
r = dm._remove_sig(t)
correct = ''
self.assertEqual(r, correct)
def test_class_doc(self):
"""Test if class docs are generated correctly."""
dm = indenter.VTKDocMassager()
indent = indenter.Indent()
out = cStringIO.StringIO()
doc = "vtkLODProperty, vtkXMLDataReader, vtk3DSImporter\n"\
"SetRepresentationToWireframe, Write3DPropsAsRasterImage"
dm.write_class_doc(doc, out, indent)
out.seek(0)
ret = out.read()
correct = ''' """
LODProperty, XMLDataReader, ThreeDSImporter
set_representation_to_wireframe, write3d_props_as_raster_image
"""\n'''
#print ret
#print correct
self.assertEqual(ret, correct)
# Test empty doc
out = cStringIO.StringIO()
doc = ""
dm.write_class_doc(doc, out, indent)
out.seek(0)
ret = out.read()
self.assertEqual(ret, ' """\n \n """\n')
def test_trait_doc(self):
"""Test if trait docs are generated correctly."""
dm = indenter.VTKDocMassager()
indent = indenter.Indent()
out = cStringIO.StringIO()
doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput (int idx);\n'\
'V.GetOutput() -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput ();\n\n'\
'vtkLODProperty, vtkXMLDataReader, vtk3DSImporter\n'\
'SetRepresentationToWireframe, Write3DPropsAsRasterImage'
dm.write_trait_doc(doc, out, indent)
out.seek(0)
ret = out.read()
correct = ''' """
LODProperty, XMLDataReader, ThreeDSImporter
set_representation_to_wireframe, write3d_props_as_raster_image
"""\n'''
#print ret
#print correct
self.assertEqual(ret, correct)
# Test empty doc.
out = cStringIO.StringIO()
doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput (int idx);\n'\
'V.GetOutput() -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput ();\n\n'
dm.write_trait_doc(doc, out, indent)
out.seek(0)
ret = out.read()
self.assertEqual(ret, ' """\n \n """\n')
def test_method_doc(self):
"""Test if method docs are generated correctly."""
dm = indenter.VTKDocMassager()
indent = indenter.Indent()
out = cStringIO.StringIO()
doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput (int idx);\n'\
'V.GetOutput() -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput ();\n\n'\
'vtkLODProperty, vtkXMLDataReader, vtk3DSImporter\n'\
'SetRepresentationToWireframe, Write3DPropsAsRasterImage'
dm.write_method_doc(doc, out, indent)
out.seek(0)
ret = out.read()
correct = ''' """
V.get_output(int) -> StructuredPoints
V.get_output() -> StructuredPoints
LODProperty, XMLDataReader, ThreeDSImporter
set_representation_to_wireframe, write3d_props_as_raster_image
"""\n'''
#print ret
#print correct
self.assertEqual(ret, correct)
# Test empty doc.
out = cStringIO.StringIO()
doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput (int idx);\n'\
'V.GetOutput() -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput ();\n\n'
dm.write_method_doc(doc, out, indent)
out.seek(0)
ret = out.read()
correct = ''' """
V.get_output(int) -> StructuredPoints
V.get_output() -> StructuredPoints
"""\n'''
#print ret
#print correct
self.assertEqual(ret, correct)
def test_get_method_doc(self):
"""Test if get_method_doc works correctly."""
dm = indenter.VTKDocMassager()
doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput (int idx);\n'\
'V.GetOutput() -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput ();\n\n'\
'vtkLODProperty, vtkXMLDataReader, vtk3DSImporter\n'\
'SetRepresentationToWireframe, Write3DPropsAsRasterImage'
ret = dm.get_method_doc(doc)
correct = 'V.get_output(int) -> StructuredPoints\n'\
'V.get_output() -> StructuredPoints\n\n'\
'LODProperty, XMLDataReader, ThreeDSImporter\n'\
'set_representation_to_wireframe, '\
'write3d_props_as_raster_image'
#print ret
#print correct
self.assertEqual(ret, correct)
# Test empty doc (only signature exists).
doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput (int idx);\n'\
'V.GetOutput() -> vtkStructuredPoints\n'\
'C++: vtkStructuredPoints *GetOutput ();\n\n'
ret = dm.get_method_doc(doc)
correct = 'V.get_output(int) -> StructuredPoints\n'\
'V.get_output() -> StructuredPoints\n'
self.assertEqual(ret, correct)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -8,827,943,312,132,584,000 | -9,016,456,172,913,090,000 | 36.995885 | 84 | 0.562222 | false |
adit-chandra/tensorflow | tensorflow/lite/tutorials/mnist_tflite.py | 5 | 2870 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to evaluate accuracy of TFLite flatbuffer model on mnist dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.lite.tutorials import dataset
flags = tf.app.flags
flags.DEFINE_string('data_dir', '/tmp/data_dir',
'Directory where data is stored.')
flags.DEFINE_string('model_file', '',
'The path to the TFLite flatbuffer model file.')
flags = flags.FLAGS
def test_image_generator():
# Generates an iterator over images
with tf.compat.v1.Session() as sess:
input_data = tf.compat.v1.data.make_one_shot_iterator(dataset.test(
flags.data_dir)).get_next()
try:
while True:
yield sess.run(input_data)
except tf.errors.OutOfRangeError:
pass
def run_eval(interpreter, input_image):
"""Performs evaluation for input image over specified model.
Args:
interpreter: TFLite interpreter initialized with model to execute.
input_image: Image input to the model.
Returns:
output: output tensor of model being executed.
"""
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on the input images.
input_image = np.reshape(input_image, input_details[0]['shape'])
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
output = np.squeeze(output_data)
return output
def main(_):
interpreter = tf.lite.Interpreter(model_path=flags.model_file)
interpreter.allocate_tensors()
num_correct, total = 0, 0
for input_data in test_image_generator():
output = run_eval(interpreter, input_data[0])
total += 1
if output == input_data[1]:
num_correct += 1
if total % 500 == 0:
print('Accuracy after %i images: %f' %
(total, float(num_correct) / float(total)))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.app.run(main)
| apache-2.0 | 4,521,375,208,151,257,600 | 4,777,331,814,877,944,000 | 31.988506 | 80 | 0.681185 | false |
vertigo235/Sick-Beard-XEM | cherrypy/_cptools.py | 35 | 19648 | """CherryPy tools. A "tool" is any helper, adapted to CP.
Tools are usually designed to be used in a variety of ways (although some
may only offer one if they choose):
Library calls:
All tools are callables that can be used wherever needed.
The arguments are straightforward and should be detailed within the
docstring.
Function decorators:
All tools, when called, may be used as decorators which configure
individual CherryPy page handlers (methods on the CherryPy tree).
That is, "@tools.anytool()" should "turn on" the tool via the
decorated function's _cp_config attribute.
CherryPy config:
If a tool exposes a "_setup" callable, it will be called
once per Request (if the feature is "turned on" via config).
Tools may be implemented as any object with a namespace. The builtins
are generally either modules or instances of the tools.Tool class.
"""
import cherrypy
import warnings
def _getargs(func):
"""Return the names of all static arguments to the given function."""
# Use this instead of importing inspect for less mem overhead.
import types
if isinstance(func, types.MethodType):
func = func.im_func
co = func.func_code
return co.co_varnames[:co.co_argcount]
_attr_error = ("CherryPy Tools cannot be turned on directly. Instead, turn them "
"on via config, or use them as decorators on your page handlers.")
class Tool(object):
"""A registered function for use with CherryPy request-processing hooks.
help(tool.callable) should give you more information about this Tool.
"""
namespace = "tools"
def __init__(self, point, callable, name=None, priority=50):
self._point = point
self.callable = callable
self._name = name
self._priority = priority
self.__doc__ = self.callable.__doc__
self._setargs()
def _get_on(self):
raise AttributeError(_attr_error)
def _set_on(self, value):
raise AttributeError(_attr_error)
on = property(_get_on, _set_on)
def _setargs(self):
"""Copy func parameter names to obj attributes."""
try:
for arg in _getargs(self.callable):
setattr(self, arg, None)
except (TypeError, AttributeError):
if hasattr(self.callable, "__call__"):
for arg in _getargs(self.callable.__call__):
setattr(self, arg, None)
# IronPython 1.0 raises NotImplementedError because
# inspect.getargspec tries to access Python bytecode
# in co_code attribute.
except NotImplementedError:
pass
# IronPython 1B1 may raise IndexError in some cases,
# but if we trap it here it doesn't prevent CP from working.
except IndexError:
pass
def _merged_args(self, d=None):
"""Return a dict of configuration entries for this Tool."""
if d:
conf = d.copy()
else:
conf = {}
tm = cherrypy.serving.request.toolmaps[self.namespace]
if self._name in tm:
conf.update(tm[self._name])
if "on" in conf:
del conf["on"]
return conf
def __call__(self, *args, **kwargs):
"""Compile-time decorator (turn on the tool in config).
For example:
@tools.proxy()
def whats_my_base(self):
return cherrypy.request.base
whats_my_base.exposed = True
"""
if args:
raise TypeError("The %r Tool does not accept positional "
"arguments; you must use keyword arguments."
% self._name)
def tool_decorator(f):
if not hasattr(f, "_cp_config"):
f._cp_config = {}
subspace = self.namespace + "." + self._name + "."
f._cp_config[subspace + "on"] = True
for k, v in kwargs.items():
f._cp_config[subspace + k] = v
return f
return tool_decorator
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
cherrypy.serving.request.hooks.attach(self._point, self.callable,
priority=p, **conf)
class HandlerTool(Tool):
"""Tool which is called 'before main', that may skip normal handlers.
If the tool successfully handles the request (by setting response.body),
if should return True. This will cause CherryPy to skip any 'normal' page
handler. If the tool did not handle the request, it should return False
to tell CherryPy to continue on and call the normal page handler. If the
tool is declared AS a page handler (see the 'handler' method), returning
False will raise NotFound.
"""
def __init__(self, callable, name=None):
Tool.__init__(self, 'before_handler', callable, name)
def handler(self, *args, **kwargs):
"""Use this tool as a CherryPy page handler.
For example:
class Root:
nav = tools.staticdir.handler(section="/nav", dir="nav",
root=absDir)
"""
def handle_func(*a, **kw):
handled = self.callable(*args, **self._merged_args(kwargs))
if not handled:
raise cherrypy.NotFound()
return cherrypy.serving.response.body
handle_func.exposed = True
return handle_func
def _wrapper(self, **kwargs):
if self.callable(**kwargs):
cherrypy.serving.request.handler = None
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
cherrypy.serving.request.hooks.attach(self._point, self._wrapper,
priority=p, **conf)
class HandlerWrapperTool(Tool):
"""Tool which wraps request.handler in a provided wrapper function.
The 'newhandler' arg must be a handler wrapper function that takes a
'next_handler' argument, plus *args and **kwargs. Like all page handler
functions, it must return an iterable for use as cherrypy.response.body.
For example, to allow your 'inner' page handlers to return dicts
which then get interpolated into a template:
def interpolator(next_handler, *args, **kwargs):
filename = cherrypy.request.config.get('template')
cherrypy.response.template = env.get_template(filename)
response_dict = next_handler(*args, **kwargs)
return cherrypy.response.template.render(**response_dict)
cherrypy.tools.jinja = HandlerWrapperTool(interpolator)
"""
def __init__(self, newhandler, point='before_handler', name=None, priority=50):
self.newhandler = newhandler
self._point = point
self._name = name
self._priority = priority
def callable(self, debug=False):
innerfunc = cherrypy.serving.request.handler
def wrap(*args, **kwargs):
return self.newhandler(innerfunc, *args, **kwargs)
cherrypy.serving.request.handler = wrap
class ErrorTool(Tool):
"""Tool which is used to replace the default request.error_response."""
def __init__(self, callable, name=None):
Tool.__init__(self, None, callable, name)
def _wrapper(self):
self.callable(**self._merged_args())
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
cherrypy.serving.request.error_response = self._wrapper
# Builtin tools #
from cherrypy.lib import cptools, encoding, auth, static, jsontools
from cherrypy.lib import sessions as _sessions, xmlrpc as _xmlrpc
from cherrypy.lib import caching as _caching
from cherrypy.lib import auth_basic, auth_digest
class SessionTool(Tool):
"""Session Tool for CherryPy.
sessions.locking:
When 'implicit' (the default), the session will be locked for you,
just before running the page handler.
When 'early', the session will be locked before reading the request
body. This is off by default for safety reasons; for example,
a large upload would block the session, denying an AJAX
progress meter (see http://www.cherrypy.org/ticket/630).
When 'explicit' (or any other value), you need to call
cherrypy.session.acquire_lock() yourself before using
session data.
"""
def __init__(self):
# _sessions.init must be bound after headers are read
Tool.__init__(self, 'before_request_body', _sessions.init)
def _lock_session(self):
cherrypy.serving.session.acquire_lock()
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
hooks = cherrypy.serving.request.hooks
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
hooks.attach(self._point, self.callable, priority=p, **conf)
locking = conf.pop('locking', 'implicit')
if locking == 'implicit':
hooks.attach('before_handler', self._lock_session)
elif locking == 'early':
# Lock before the request body (but after _sessions.init runs!)
hooks.attach('before_request_body', self._lock_session,
priority=60)
else:
# Don't lock
pass
hooks.attach('before_finalize', _sessions.save)
hooks.attach('on_end_request', _sessions.close)
def regenerate(self):
"""Drop the current session and make a new one (with a new id)."""
sess = cherrypy.serving.session
sess.regenerate()
# Grab cookie-relevant tool args
conf = dict([(k, v) for k, v in self._merged_args().items()
if k in ('path', 'path_header', 'name', 'timeout',
'domain', 'secure')])
_sessions.set_response_cookie(**conf)
class XMLRPCController(object):
"""A Controller (page handler collection) for XML-RPC.
To use it, have your controllers subclass this base class (it will
turn on the tool for you).
You can also supply the following optional config entries:
tools.xmlrpc.encoding: 'utf-8'
tools.xmlrpc.allow_none: 0
XML-RPC is a rather discontinuous layer over HTTP; dispatching to the
appropriate handler must first be performed according to the URL, and
then a second dispatch step must take place according to the RPC method
specified in the request body. It also allows a superfluous "/RPC2"
prefix in the URL, supplies its own handler args in the body, and
requires a 200 OK "Fault" response instead of 404 when the desired
method is not found.
Therefore, XML-RPC cannot be implemented for CherryPy via a Tool alone.
This Controller acts as the dispatch target for the first half (based
on the URL); it then reads the RPC method from the request body and
does its own second dispatch step based on that method. It also reads
body params, and returns a Fault on error.
The XMLRPCDispatcher strips any /RPC2 prefix; if you aren't using /RPC2
in your URL's, you can safely skip turning on the XMLRPCDispatcher.
Otherwise, you need to use declare it in config:
request.dispatch: cherrypy.dispatch.XMLRPCDispatcher()
"""
# Note we're hard-coding this into the 'tools' namespace. We could do
# a huge amount of work to make it relocatable, but the only reason why
# would be if someone actually disabled the default_toolbox. Meh.
_cp_config = {'tools.xmlrpc.on': True}
def default(self, *vpath, **params):
rpcparams, rpcmethod = _xmlrpc.process_body()
subhandler = self
for attr in str(rpcmethod).split('.'):
subhandler = getattr(subhandler, attr, None)
if subhandler and getattr(subhandler, "exposed", False):
body = subhandler(*(vpath + rpcparams), **params)
else:
# http://www.cherrypy.org/ticket/533
# if a method is not found, an xmlrpclib.Fault should be returned
# raising an exception here will do that; see
# cherrypy.lib.xmlrpc.on_error
raise Exception('method "%s" is not supported' % attr)
conf = cherrypy.serving.request.toolmaps['tools'].get("xmlrpc", {})
_xmlrpc.respond(body,
conf.get('encoding', 'utf-8'),
conf.get('allow_none', 0))
return cherrypy.serving.response.body
default.exposed = True
class SessionAuthTool(HandlerTool):
def _setargs(self):
for name in dir(cptools.SessionAuth):
if not name.startswith("__"):
setattr(self, name, None)
class CachingTool(Tool):
"""Caching Tool for CherryPy."""
def _wrapper(self, **kwargs):
request = cherrypy.serving.request
if _caching.get(**kwargs):
request.handler = None
else:
if request.cacheable:
# Note the devious technique here of adding hooks on the fly
request.hooks.attach('before_finalize', _caching.tee_output,
priority=90)
_wrapper.priority = 20
def _setup(self):
"""Hook caching into cherrypy.request."""
conf = self._merged_args()
p = conf.pop("priority", None)
cherrypy.serving.request.hooks.attach('before_handler', self._wrapper,
priority=p, **conf)
class Toolbox(object):
"""A collection of Tools.
This object also functions as a config namespace handler for itself.
Custom toolboxes should be added to each Application's toolboxes dict.
"""
def __init__(self, namespace):
self.namespace = namespace
def __setattr__(self, name, value):
# If the Tool._name is None, supply it from the attribute name.
if isinstance(value, Tool):
if value._name is None:
value._name = name
value.namespace = self.namespace
object.__setattr__(self, name, value)
def __enter__(self):
"""Populate request.toolmaps from tools specified in config."""
cherrypy.serving.request.toolmaps[self.namespace] = map = {}
def populate(k, v):
toolname, arg = k.split(".", 1)
bucket = map.setdefault(toolname, {})
bucket[arg] = v
return populate
def __exit__(self, exc_type, exc_val, exc_tb):
"""Run tool._setup() for each tool in our toolmap."""
map = cherrypy.serving.request.toolmaps.get(self.namespace)
if map:
for name, settings in map.items():
if settings.get("on", False):
tool = getattr(self, name)
tool._setup()
class DeprecatedTool(Tool):
_name = None
warnmsg = "This Tool is deprecated."
def __init__(self, point, warnmsg=None):
self.point = point
if warnmsg is not None:
self.warnmsg = warnmsg
def __call__(self, *args, **kwargs):
warnings.warn(self.warnmsg)
def tool_decorator(f):
return f
return tool_decorator
def _setup(self):
warnings.warn(self.warnmsg)
default_toolbox = _d = Toolbox("tools")
_d.session_auth = SessionAuthTool(cptools.session_auth)
_d.proxy = Tool('before_request_body', cptools.proxy, priority=30)
_d.response_headers = Tool('on_start_resource', cptools.response_headers)
_d.log_tracebacks = Tool('before_error_response', cptools.log_traceback)
_d.log_headers = Tool('before_error_response', cptools.log_request_headers)
_d.log_hooks = Tool('on_end_request', cptools.log_hooks, priority=100)
_d.err_redirect = ErrorTool(cptools.redirect)
_d.etags = Tool('before_finalize', cptools.validate_etags, priority=75)
_d.decode = Tool('before_request_body', encoding.decode)
# the order of encoding, gzip, caching is important
_d.encode = Tool('before_handler', encoding.ResponseEncoder, priority=70)
_d.gzip = Tool('before_finalize', encoding.gzip, priority=80)
_d.staticdir = HandlerTool(static.staticdir)
_d.staticfile = HandlerTool(static.staticfile)
_d.sessions = SessionTool()
_d.xmlrpc = ErrorTool(_xmlrpc.on_error)
_d.caching = CachingTool('before_handler', _caching.get, 'caching')
_d.expires = Tool('before_finalize', _caching.expires)
_d.tidy = DeprecatedTool('before_finalize',
"The tidy tool has been removed from the standard distribution of CherryPy. "
"The most recent version can be found at http://tools.cherrypy.org/browser.")
_d.nsgmls = DeprecatedTool('before_finalize',
"The nsgmls tool has been removed from the standard distribution of CherryPy. "
"The most recent version can be found at http://tools.cherrypy.org/browser.")
_d.ignore_headers = Tool('before_request_body', cptools.ignore_headers)
_d.referer = Tool('before_request_body', cptools.referer)
_d.basic_auth = Tool('on_start_resource', auth.basic_auth)
_d.digest_auth = Tool('on_start_resource', auth.digest_auth)
_d.trailing_slash = Tool('before_handler', cptools.trailing_slash, priority=60)
_d.flatten = Tool('before_finalize', cptools.flatten)
_d.accept = Tool('on_start_resource', cptools.accept)
_d.redirect = Tool('on_start_resource', cptools.redirect)
_d.autovary = Tool('on_start_resource', cptools.autovary, priority=0)
_d.json_in = Tool('before_request_body', jsontools.json_in, priority=30)
_d.json_out = Tool('before_handler', jsontools.json_out, priority=30)
_d.auth_basic = Tool('before_handler', auth_basic.basic_auth, priority=1)
_d.auth_digest = Tool('before_handler', auth_digest.digest_auth, priority=1)
del _d, cptools, encoding, auth, static
| gpl-3.0 | -3,063,080,762,473,110,500 | 1,044,752,572,151,344,100 | 37.453815 | 83 | 0.597974 | false |
SetBased/py-etlt | test/helper/AllenTest.py | 1 | 2762 | import unittest
from etlt.helper.Allen import Allen
class AllenTest(unittest.TestCase):
"""
Test cases for class Allen algebra.
"""
# ------------------------------------------------------------------------------------------------------------------
def _test1(self, expected, x, y):
relation1 = Allen.relation(x[0], x[1], y[0], y[1])
self.assertEqual(expected, relation1)
relation2 = Allen.relation(y[0], y[1], x[0], x[1])
self.assertEqual(relation1, -1 * relation2)
relation3 = Allen.relation(x[1], x[0], y[0], y[1])
self.assertIsNone(relation3)
relation4 = Allen.relation(x[0], x[1], y[1], y[0])
self.assertIsNone(relation4)
relation5 = Allen.relation(x[1], x[0], y[1], y[0])
self.assertIsNone(relation5)
relation6 = Allen.relation(y[1], y[0], x[0], x[1])
self.assertIsNone(relation6)
relation7 = Allen.relation(y[0], y[1], x[1], x[0])
self.assertIsNone(relation7)
relation8 = Allen.relation(y[1], y[0], x[1], x[0])
self.assertIsNone(relation8)
# ------------------------------------------------------------------------------------------------------------------
def test_x_takes_place_before_y(self):
self._test1(Allen.X_BEFORE_Y, (1, 3), (5, 7))
# ------------------------------------------------------------------------------------------------------------------
def test_x_meets_y(self):
self._test1(Allen.X_MEETS_Y, (1, 2), (3, 5))
# ------------------------------------------------------------------------------------------------------------------
def test_x_overlaps_with_y(self):
self._test1(Allen.X_OVERLAPS_WITH_Y, (1, 4), (3, 5))
self._test1(Allen.X_OVERLAPS_WITH_Y, (1, 3), (3, 5))
# ------------------------------------------------------------------------------------------------------------------
def test_x_starts_y(self):
self._test1(Allen.X_STARTS_Y, (1, 2), (1, 5))
# ------------------------------------------------------------------------------------------------------------------
def test_x_during_y(self):
self._test1(Allen.X_DURING_Y, (2, 3), (1, 5))
# ------------------------------------------------------------------------------------------------------------------
def test_x_finish_y(self):
self._test1(Allen.X_FINISHES_Y, (3, 5), (1, 5))
# ------------------------------------------------------------------------------------------------------------------
def test_x_equal_y(self):
self._test1(Allen.X_EQUAL_Y, (1, 5), (1, 5))
# ----------------------------------------------------------------------------------------------------------------------
| mit | 4,741,894,070,162,777,000 | -2,831,857,838,832,256,500 | 40.848485 | 120 | 0.34323 | false |
nikesh-mahalka/nova | nova/api/openstack/compute/schemas/fixed_ips.py | 79 | 1027 | # Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
reserve = {
'type': 'object',
'properties': {
'reserve': parameter_types.none,
},
'required': ['reserve'],
'additionalProperties': False,
}
unreserve = {
'type': 'object',
'properties': {
'unreserve': parameter_types.none,
},
'required': ['unreserve'],
'additionalProperties': False,
}
| apache-2.0 | -8,021,963,518,206,702,000 | -2,770,402,289,931,849,000 | 27.527778 | 78 | 0.674781 | false |
msebire/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/win32/context_i386.py | 102 | 16108 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
CONTEXT structure for i386.
"""
__revision__ = "$Id$"
from winappdbg.win32.defines import *
from winappdbg.win32.version import ARCH_I386
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- CONTEXT structures and constants -----------------------------------------
# The following values specify the type of access in the first parameter
# of the exception record when the exception code specifies an access
# violation.
EXCEPTION_READ_FAULT = 0 # exception caused by a read
EXCEPTION_WRITE_FAULT = 1 # exception caused by a write
EXCEPTION_EXECUTE_FAULT = 8 # exception caused by an instruction fetch
CONTEXT_i386 = 0x00010000 # this assumes that i386 and
CONTEXT_i486 = 0x00010000 # i486 have identical context records
CONTEXT_CONTROL = (CONTEXT_i386 | long(0x00000001)) # SS:SP, CS:IP, FLAGS, BP
CONTEXT_INTEGER = (CONTEXT_i386 | long(0x00000002)) # AX, BX, CX, DX, SI, DI
CONTEXT_SEGMENTS = (CONTEXT_i386 | long(0x00000004)) # DS, ES, FS, GS
CONTEXT_FLOATING_POINT = (CONTEXT_i386 | long(0x00000008)) # 387 state
CONTEXT_DEBUG_REGISTERS = (CONTEXT_i386 | long(0x00000010)) # DB 0-3,6,7
CONTEXT_EXTENDED_REGISTERS = (CONTEXT_i386 | long(0x00000020)) # cpu specific extensions
CONTEXT_FULL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS)
CONTEXT_ALL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | \
CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS | \
CONTEXT_EXTENDED_REGISTERS)
SIZE_OF_80387_REGISTERS = 80
MAXIMUM_SUPPORTED_EXTENSION = 512
# typedef struct _FLOATING_SAVE_AREA {
# DWORD ControlWord;
# DWORD StatusWord;
# DWORD TagWord;
# DWORD ErrorOffset;
# DWORD ErrorSelector;
# DWORD DataOffset;
# DWORD DataSelector;
# BYTE RegisterArea[SIZE_OF_80387_REGISTERS];
# DWORD Cr0NpxState;
# } FLOATING_SAVE_AREA;
class FLOATING_SAVE_AREA(Structure):
_pack_ = 1
_fields_ = [
('ControlWord', DWORD),
('StatusWord', DWORD),
('TagWord', DWORD),
('ErrorOffset', DWORD),
('ErrorSelector', DWORD),
('DataOffset', DWORD),
('DataSelector', DWORD),
('RegisterArea', BYTE * SIZE_OF_80387_REGISTERS),
('Cr0NpxState', DWORD),
]
_integer_members = ('ControlWord', 'StatusWord', 'TagWord', 'ErrorOffset', 'ErrorSelector', 'DataOffset', 'DataSelector', 'Cr0NpxState')
@classmethod
def from_dict(cls, fsa):
'Instance a new structure from a Python dictionary.'
fsa = dict(fsa)
s = cls()
for key in cls._integer_members:
setattr(s, key, fsa.get(key))
ra = fsa.get('RegisterArea', None)
if ra is not None:
for index in compat.xrange(0, SIZE_OF_80387_REGISTERS):
s.RegisterArea[index] = ra[index]
return s
def to_dict(self):
'Convert a structure into a Python dictionary.'
fsa = dict()
for key in self._integer_members:
fsa[key] = getattr(self, key)
ra = [ self.RegisterArea[index] for index in compat.xrange(0, SIZE_OF_80387_REGISTERS) ]
ra = tuple(ra)
fsa['RegisterArea'] = ra
return fsa
PFLOATING_SAVE_AREA = POINTER(FLOATING_SAVE_AREA)
LPFLOATING_SAVE_AREA = PFLOATING_SAVE_AREA
# typedef struct _CONTEXT {
# DWORD ContextFlags;
# DWORD Dr0;
# DWORD Dr1;
# DWORD Dr2;
# DWORD Dr3;
# DWORD Dr6;
# DWORD Dr7;
# FLOATING_SAVE_AREA FloatSave;
# DWORD SegGs;
# DWORD SegFs;
# DWORD SegEs;
# DWORD SegDs;
# DWORD Edi;
# DWORD Esi;
# DWORD Ebx;
# DWORD Edx;
# DWORD Ecx;
# DWORD Eax;
# DWORD Ebp;
# DWORD Eip;
# DWORD SegCs;
# DWORD EFlags;
# DWORD Esp;
# DWORD SegSs;
# BYTE ExtendedRegisters[MAXIMUM_SUPPORTED_EXTENSION];
# } CONTEXT;
class CONTEXT(Structure):
arch = ARCH_I386
_pack_ = 1
# Context Frame
#
# This frame has a several purposes: 1) it is used as an argument to
# NtContinue, 2) is is used to constuct a call frame for APC delivery,
# and 3) it is used in the user level thread creation routines.
#
# The layout of the record conforms to a standard call frame.
_fields_ = [
# The flags values within this flag control the contents of
# a CONTEXT record.
#
# If the context record is used as an input parameter, then
# for each portion of the context record controlled by a flag
# whose value is set, it is assumed that that portion of the
# context record contains valid context. If the context record
# is being used to modify a threads context, then only that
# portion of the threads context will be modified.
#
# If the context record is used as an IN OUT parameter to capture
# the context of a thread, then only those portions of the thread's
# context corresponding to set flags will be returned.
#
# The context record is never used as an OUT only parameter.
('ContextFlags', DWORD),
# This section is specified/returned if CONTEXT_DEBUG_REGISTERS is
# set in ContextFlags. Note that CONTEXT_DEBUG_REGISTERS is NOT
# included in CONTEXT_FULL.
('Dr0', DWORD),
('Dr1', DWORD),
('Dr2', DWORD),
('Dr3', DWORD),
('Dr6', DWORD),
('Dr7', DWORD),
# This section is specified/returned if the
# ContextFlags word contains the flag CONTEXT_FLOATING_POINT.
('FloatSave', FLOATING_SAVE_AREA),
# This section is specified/returned if the
# ContextFlags word contains the flag CONTEXT_SEGMENTS.
('SegGs', DWORD),
('SegFs', DWORD),
('SegEs', DWORD),
('SegDs', DWORD),
# This section is specified/returned if the
# ContextFlags word contains the flag CONTEXT_INTEGER.
('Edi', DWORD),
('Esi', DWORD),
('Ebx', DWORD),
('Edx', DWORD),
('Ecx', DWORD),
('Eax', DWORD),
# This section is specified/returned if the
# ContextFlags word contains the flag CONTEXT_CONTROL.
('Ebp', DWORD),
('Eip', DWORD),
('SegCs', DWORD), # MUST BE SANITIZED
('EFlags', DWORD), # MUST BE SANITIZED
('Esp', DWORD),
('SegSs', DWORD),
# This section is specified/returned if the ContextFlags word
# contains the flag CONTEXT_EXTENDED_REGISTERS.
# The format and contexts are processor specific.
('ExtendedRegisters', BYTE * MAXIMUM_SUPPORTED_EXTENSION),
]
_ctx_debug = ('Dr0', 'Dr1', 'Dr2', 'Dr3', 'Dr6', 'Dr7')
_ctx_segs = ('SegGs', 'SegFs', 'SegEs', 'SegDs', )
_ctx_int = ('Edi', 'Esi', 'Ebx', 'Edx', 'Ecx', 'Eax')
_ctx_ctrl = ('Ebp', 'Eip', 'SegCs', 'EFlags', 'Esp', 'SegSs')
@classmethod
def from_dict(cls, ctx):
'Instance a new structure from a Python dictionary.'
ctx = Context(ctx)
s = cls()
ContextFlags = ctx['ContextFlags']
setattr(s, 'ContextFlags', ContextFlags)
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in s._ctx_debug:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT:
fsa = ctx['FloatSave']
s.FloatSave = FLOATING_SAVE_AREA.from_dict(fsa)
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in s._ctx_segs:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in s._ctx_int:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in s._ctx_ctrl:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_EXTENDED_REGISTERS) == CONTEXT_EXTENDED_REGISTERS:
er = ctx['ExtendedRegisters']
for index in compat.xrange(0, MAXIMUM_SUPPORTED_EXTENSION):
s.ExtendedRegisters[index] = er[index]
return s
def to_dict(self):
'Convert a structure into a Python native type.'
ctx = Context()
ContextFlags = self.ContextFlags
ctx['ContextFlags'] = ContextFlags
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in self._ctx_debug:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT:
ctx['FloatSave'] = self.FloatSave.to_dict()
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in self._ctx_segs:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in self._ctx_int:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in self._ctx_ctrl:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_EXTENDED_REGISTERS) == CONTEXT_EXTENDED_REGISTERS:
er = [ self.ExtendedRegisters[index] for index in compat.xrange(0, MAXIMUM_SUPPORTED_EXTENSION) ]
er = tuple(er)
ctx['ExtendedRegisters'] = er
return ctx
PCONTEXT = POINTER(CONTEXT)
LPCONTEXT = PCONTEXT
class Context(dict):
"""
Register context dictionary for the i386 architecture.
"""
arch = CONTEXT.arch
def __get_pc(self):
return self['Eip']
def __set_pc(self, value):
self['Eip'] = value
pc = property(__get_pc, __set_pc)
def __get_sp(self):
return self['Esp']
def __set_sp(self, value):
self['Esp'] = value
sp = property(__get_sp, __set_sp)
def __get_fp(self):
return self['Ebp']
def __set_fp(self, value):
self['Ebp'] = value
fp = property(__get_fp, __set_fp)
#--- LDT_ENTRY structure ------------------------------------------------------
# typedef struct _LDT_ENTRY {
# WORD LimitLow;
# WORD BaseLow;
# union {
# struct {
# BYTE BaseMid;
# BYTE Flags1;
# BYTE Flags2;
# BYTE BaseHi;
# } Bytes;
# struct {
# DWORD BaseMid :8;
# DWORD Type :5;
# DWORD Dpl :2;
# DWORD Pres :1;
# DWORD LimitHi :4;
# DWORD Sys :1;
# DWORD Reserved_0 :1;
# DWORD Default_Big :1;
# DWORD Granularity :1;
# DWORD BaseHi :8;
# } Bits;
# } HighWord;
# } LDT_ENTRY,
# *PLDT_ENTRY;
class _LDT_ENTRY_BYTES_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', BYTE),
('Flags1', BYTE),
('Flags2', BYTE),
('BaseHi', BYTE),
]
class _LDT_ENTRY_BITS_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', DWORD, 8),
('Type', DWORD, 5),
('Dpl', DWORD, 2),
('Pres', DWORD, 1),
('LimitHi', DWORD, 4),
('Sys', DWORD, 1),
('Reserved_0', DWORD, 1),
('Default_Big', DWORD, 1),
('Granularity', DWORD, 1),
('BaseHi', DWORD, 8),
]
class _LDT_ENTRY_HIGHWORD_(Union):
_pack_ = 1
_fields_ = [
('Bytes', _LDT_ENTRY_BYTES_),
('Bits', _LDT_ENTRY_BITS_),
]
class LDT_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('LimitLow', WORD),
('BaseLow', WORD),
('HighWord', _LDT_ENTRY_HIGHWORD_),
]
PLDT_ENTRY = POINTER(LDT_ENTRY)
LPLDT_ENTRY = PLDT_ENTRY
###############################################################################
# BOOL WINAPI GetThreadSelectorEntry(
# __in HANDLE hThread,
# __in DWORD dwSelector,
# __out LPLDT_ENTRY lpSelectorEntry
# );
def GetThreadSelectorEntry(hThread, dwSelector):
_GetThreadSelectorEntry = windll.kernel32.GetThreadSelectorEntry
_GetThreadSelectorEntry.argtypes = [HANDLE, DWORD, LPLDT_ENTRY]
_GetThreadSelectorEntry.restype = bool
_GetThreadSelectorEntry.errcheck = RaiseIfZero
ldt = LDT_ENTRY()
_GetThreadSelectorEntry(hThread, dwSelector, byref(ldt))
return ldt
# BOOL WINAPI GetThreadContext(
# __in HANDLE hThread,
# __inout LPCONTEXT lpContext
# );
def GetThreadContext(hThread, ContextFlags = None, raw = False):
_GetThreadContext = windll.kernel32.GetThreadContext
_GetThreadContext.argtypes = [HANDLE, LPCONTEXT]
_GetThreadContext.restype = bool
_GetThreadContext.errcheck = RaiseIfZero
if ContextFlags is None:
ContextFlags = CONTEXT_ALL | CONTEXT_i386
Context = CONTEXT()
Context.ContextFlags = ContextFlags
_GetThreadContext(hThread, byref(Context))
if raw:
return Context
return Context.to_dict()
# BOOL WINAPI SetThreadContext(
# __in HANDLE hThread,
# __in const CONTEXT* lpContext
# );
def SetThreadContext(hThread, lpContext):
_SetThreadContext = windll.kernel32.SetThreadContext
_SetThreadContext.argtypes = [HANDLE, LPCONTEXT]
_SetThreadContext.restype = bool
_SetThreadContext.errcheck = RaiseIfZero
if isinstance(lpContext, dict):
lpContext = CONTEXT.from_dict(lpContext)
_SetThreadContext(hThread, byref(lpContext))
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| apache-2.0 | -8,243,240,625,507,824,000 | -4,371,942,360,862,866,000 | 34.875278 | 140 | 0.57369 | false |
h4ck3rm1k3/pywikibot-core | scripts/maintenance/make_i18n_dict.py | 3 | 5239 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Generate a i18n file from a given script.
usage:
run IDLE at topmost level
>>> import pwb
>>> from scripts.maintenance.make_i18n_dict import i18nBot
>>> bot = i18nBot('<scriptname>', '<msg dict>')
>>> bot.run()
If you have more than one message dictionary, give all these names to the bot:
>>> bot = i18nBot('<scriptname>', '<msg dict1>', '<msg dict2>', '<msg dict3>')
If you want to rename the message index use keyword arguments. This may be
mixed with preleading positonal argumens:
>>> bot = i18nBot('<scriptname>', '<msg dict1>', the_other_msg='<msg dict2>')
If you have the messages as instance constants you may call the bot as follows:
>>> bot = i18nBot('<scriptname>.<class instance>', '<msg dict1>', '<msg dict2>')
It's also possible to make json files too by using to_json method after
instantiating the bot. It also calls bot.run() to create the dictionaries.
>>> bot.to_json()
"""
#
# (C) xqt, 2013-2015
# (C) Pywikibot team, 2013-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import json
import os
from pywikibot import config
class i18nBot(object):
"""I18n bot."""
def __init__(self, script, *args, **kwargs):
"""Constructor."""
modules = script.split('.')
self.scriptname = modules[0]
self.script = __import__('scripts.' + self.scriptname)
for m in modules:
self.script = getattr(self.script, m)
self.messages = {}
# setup the message dict
for msg in args:
if hasattr(self.script, msg):
self.messages[msg] = msg
else:
print('message {0} not found'.format(msg))
for new, old in kwargs.items():
self.messages[old] = new.replace('_', '-')
self.dict = {}
def print_all(self):
"""Pretty print the dict as a file content to screen."""
if not self.dict:
print('No messages found, read them first.\n'
'Use "run" or "to_json" methods')
return
keys = list(self.dict.keys())
keys.remove('qqq')
keys.sort()
keys.insert(0, 'qqq')
if 'en' in keys:
keys.remove('en')
keys.insert(0, 'en')
print("# -*- coding: utf-8 -*-")
print("msg = {")
for code in keys:
print(" '%s': {" % code)
for msg in sorted(self.messages.values()):
label = "%s-%s" % (self.scriptname, msg)
if label in self.dict[code]:
print(" '%s': u'%s'," % (label,
self.dict[code][label]))
print(" },")
print("};")
def read(self, oldmsg, newmsg=None):
"""Read a single message from source script."""
msg = getattr(self.script, oldmsg)
keys = list(msg.keys())
keys.append('qqq')
if newmsg is None:
newmsg = oldmsg
for code in keys:
label = "%s-%s" % (self.scriptname, newmsg)
if code == 'qqq':
if code not in self.dict:
self.dict[code] = {}
self.dict[code][label] = (
u'Edit summary for message %s of %s report'
% (newmsg, self.scriptname))
elif code != 'commons':
if code not in self.dict:
self.dict[code] = {}
self.dict[code][label] = msg[code]
if 'en' not in keys:
print('WARNING: "en" key missing for message %s' % newmsg)
def run(self, quiet=False):
"""
Run the bot, read the messages from source and print the dict.
@param quiet: print the result if False
@type quiet: bool
"""
for item in self.messages.items():
self.read(*item)
if not quiet:
self.print_all()
def to_json(self, quiet=True):
"""
Run the bot and create json files.
@param quiet: Print the result if False
@type quiet: bool
"""
IDENT = 4
if not self.dict:
self.run(quiet)
json_dir = os.path.join(
config.base_dir, 'scripts/i18n', self.scriptname)
if not os.path.exists(json_dir):
os.makedirs(json_dir)
for lang in self.dict:
file_name = os.path.join(json_dir, '%s.json' % lang)
if os.path.isfile(file_name):
with codecs.open(file_name, 'r', 'utf-8') as json_file:
new_dict = json.loads(json_file.read())
else:
new_dict = {}
new_dict['@metadata'] = new_dict.get('@metadata', {'authors': []})
with codecs.open(file_name, 'w', 'utf-8') as json_file:
new_dict.update(self.dict[lang])
s = json.dumps(new_dict, ensure_ascii=False, sort_keys=True,
indent=IDENT, separators=(',', ': '))
s = s.replace(' ' * IDENT, '\t')
json_file.write(s)
if __name__ == '__main__':
print(__doc__)
| mit | 4,077,951,520,187,271,000 | -8,187,763,363,510,430,000 | 31.949686 | 80 | 0.523955 | false |
whereismyjetpack/ansible | lib/ansible/compat/selectors/_selectors2.py | 124 | 24265 | # This file is from the selectors2.py package. It backports the PSF Licensed
# selectors module from the Python-3.5 stdlib to older versions of Python.
# The author, Seth Michael Larson, dual licenses his modifications under the
# PSF License and MIT License:
# https://github.com/SethMichaelLarson/selectors2#license
#
# Seth's copy of the MIT license is reproduced below
#
# MIT License
#
# Copyright (c) 2016 Seth Michael Larson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
import socket
import sys
import time
from collections import namedtuple, Mapping
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
__author__ = 'Seth Michael Larson'
__email__ = '[email protected]'
__version__ = '1.1.0'
__license__ = 'MIT'
__all__ = [
'EVENT_READ',
'EVENT_WRITE',
'SelectorError',
'SelectorKey',
'DefaultSelector'
]
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
# Python 3.5 uses a more direct route to wrap system calls to increase speed.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all selectors restart system calls. """
try:
return func(*args, **kwargs)
except (OSError, IOError, select.error) as e:
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
raise SelectorError(errcode)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno=errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as err:
if err.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('SelectSelector')
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('PollSelector')
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
__all__.append('EpollSelector')
if hasattr(select, "devpoll"):
class DevpollSelector(BaseSelector):
"""Solaris /dev/poll selector."""
def __init__(self):
super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.devpoll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._devpoll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super(DevpollSelector, self).close()
__all__.append('DevpollSelector')
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._kqueue.control, True,
None, max_events, timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
__all__.append('KqueueSelector')
# Choose the best implementation, roughly:
# kqueue == epoll == devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
DefaultSelector = KqueueSelector
elif 'DevpollSelector' in globals():
DefaultSelector = DevpollSelector
elif 'EpollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = EpollSelector
elif 'PollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = PollSelector
elif 'SelectSelector' in globals(): # Platform-specific: Windows
DefaultSelector = SelectSelector
else: # Platform-specific: AppEngine
def no_selector(_):
raise ValueError("Platform does not have a selector")
DefaultSelector = no_selector
HAS_SELECT = False
| gpl-3.0 | 6,740,783,978,945,787,000 | 6,594,074,615,126,122,000 | 35.37931 | 89 | 0.554502 | false |
USGSDenverPychron/pychron | pychron/rpc/rpcable.py | 1 | 1606 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.config_loadable import ConfigLoadable
class RPCable(ConfigLoadable):
rpc_server = None
def load_rpc_server(self, port):
self.info('starting rpc server port={}'.format(port))
from pychron.rpc.server import RPCServer
self.rpc_server = RPCServer(manager=self,
port=port)
self.rpc_server.bootstrap()
def _load_hook(self, config):
if config.has_section('RPC'):
rpc_port = self.config_get(config, 'RPC', 'port', cast='int')
if rpc_port:
self.load_rpc_server(rpc_port)
# ============= EOF =============================================
| apache-2.0 | -7,403,270,830,758,106,000 | -6,624,729,352,123,645,000 | 42.405405 | 81 | 0.537983 | false |
Johnzero/erp | openerp/addons/base/res/report/__init__.py | 79 | 1201 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#from report import report_sxw
#report_sxw.report_sxw('report.partner.list', 'res.partner', 'addons/base/res/partner/report/partner_list.rml')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,967,564,427,902,701,000 | -5,066,753,058,520,841,000 | 41.892857 | 111 | 0.623647 | false |
vdenPython/wwwgaseng | wwwgaseng/settings.py | 1 | 3071 | """
Django settings for wwwgaseng project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^#f2&b!=kivg))3%%622c*wizr!p!mo7p+z6ablz&vs)7b6_xt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ge', # Базавое приложени
'pto', # Проиложение для отдела ПТО
'oks', # Приложение для отдела ОКС
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'wwwgaseng.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wwwgaseng.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'gaseng',
'USER': 'wwwadmin',
'PASSWORD': 'HMgaz004',
'HOST': '192.168.0.34',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Asia/Yekaterinburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/' | apache-2.0 | 6,870,902,605,219,755,000 | 3,570,561,904,453,105,000 | 25.646018 | 78 | 0.679734 | false |
modicum/wyoming | source/conf.py | 1 | 11524 | # -*- coding: utf-8 -*-
#
# wyoming documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 25 11:12:20 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wyoming'
copyright = u'2016, Modicum'
author = u'Modicum'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0'
# The full version, including alpha/beta/rc tags.
release = u'0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'wyomingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'wyoming.tex', u'wyoming Documentation',
u'Modicum', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wyoming', u'wyoming Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'wyoming', u'wyoming Documentation',
author, 'wyoming', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None)}
| mit | -5,305,049,168,149,271,000 | -1,872,114,022,169,154,600 | 30.486339 | 79 | 0.705658 | false |
SlimRemix/android_external_chromium_org | tools/usb_gadget/server.py | 91 | 3886 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""WSGI application to manage a USB gadget.
"""
import datetime
import hashlib
import re
import subprocess
import sys
import time
import urllib2
from tornado import httpserver
from tornado import ioloop
from tornado import web
import default_gadget
VERSION_PATTERN = re.compile(r'.*usb_gadget-([a-z0-9]{32})\.zip')
address = None
chip = None
claimed_by = None
default = default_gadget.DefaultGadget()
gadget = None
hardware = None
interface = None
port = None
def SwitchGadget(new_gadget):
if chip.IsConfigured():
chip.Destroy()
global gadget
gadget = new_gadget
gadget.AddStringDescriptor(3, address)
chip.Create(gadget)
class VersionHandler(web.RequestHandler):
def get(self):
version = 'unpackaged'
for path in sys.path:
match = VERSION_PATTERN.match(path)
if match:
version = match.group(1)
break
self.write(version)
class UpdateHandler(web.RequestHandler):
def post(self):
fileinfo = self.request.files['file'][0]
match = VERSION_PATTERN.match(fileinfo['filename'])
if match is None:
self.write('Filename must contain MD5 hash.')
self.set_status(400)
return
content = fileinfo['body']
md5sum = hashlib.md5(content).hexdigest()
if md5sum != match.group(1):
self.write('File hash does not match.')
self.set_status(400)
return
filename = 'usb_gadget-{}.zip'.format(md5sum)
with open(filename, 'wb') as f:
f.write(content)
args = ['/usr/bin/python', filename,
'--interface', interface,
'--port', str(port),
'--hardware', hardware]
if claimed_by is not None:
args.extend(['--start-claimed', claimed_by])
print 'Reloading with version {}...'.format(md5sum)
global http_server
if chip.IsConfigured():
chip.Destroy()
http_server.stop()
child = subprocess.Popen(args, close_fds=True)
while True:
child.poll()
if child.returncode is not None:
self.write('New package exited with error {}.'
.format(child.returncode))
self.set_status(500)
http_server = httpserver.HTTPServer(app)
http_server.listen(port)
SwitchGadget(gadget)
return
try:
f = urllib2.urlopen('http://{}/version'.format(address))
if f.getcode() == 200:
# Update complete, wait 1 second to make sure buffers are flushed.
io_loop = ioloop.IOLoop.instance()
io_loop.add_timeout(datetime.timedelta(seconds=1), io_loop.stop)
return
except urllib2.URLError:
pass
time.sleep(0.1)
class ClaimHandler(web.RequestHandler):
def post(self):
global claimed_by
if claimed_by is None:
claimed_by = self.get_argument('session_id')
else:
self.write('Device is already claimed by "{}".'.format(claimed_by))
self.set_status(403)
class UnclaimHandler(web.RequestHandler):
def post(self):
global claimed_by
claimed_by = None
if gadget != default:
SwitchGadget(default)
class UnconfigureHandler(web.RequestHandler):
def post(self):
SwitchGadget(default)
class DisconnectHandler(web.RequestHandler):
def post(self):
if chip.IsConfigured():
chip.Destroy()
class ReconnectHandler(web.RequestHandler):
def post(self):
if not chip.IsConfigured():
chip.Create(gadget)
app = web.Application([
(r'/version', VersionHandler),
(r'/update', UpdateHandler),
(r'/claim', ClaimHandler),
(r'/unclaim', UnclaimHandler),
(r'/unconfigure', UnconfigureHandler),
(r'/disconnect', DisconnectHandler),
(r'/reconnect', ReconnectHandler),
])
http_server = httpserver.HTTPServer(app)
| bsd-3-clause | -7,475,960,196,599,251,000 | -5,343,616,964,872,363,000 | 21.858824 | 76 | 0.654143 | false |
Shaps/ansible | test/units/inventory/test_group.py | 53 | 5176 | # Copyright 2018 Alan Rominger <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from units.compat import unittest
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.errors import AnsibleError
class TestGroup(unittest.TestCase):
def test_depth_update(self):
A = Group('A')
B = Group('B')
Z = Group('Z')
A.add_child_group(B)
A.add_child_group(Z)
self.assertEqual(A.depth, 0)
self.assertEqual(Z.depth, 1)
self.assertEqual(B.depth, 1)
def test_depth_update_dual_branches(self):
alpha = Group('alpha')
A = Group('A')
alpha.add_child_group(A)
B = Group('B')
A.add_child_group(B)
Z = Group('Z')
alpha.add_child_group(Z)
beta = Group('beta')
B.add_child_group(beta)
Z.add_child_group(beta)
self.assertEqual(alpha.depth, 0) # apex
self.assertEqual(beta.depth, 3) # alpha -> A -> B -> beta
omega = Group('omega')
omega.add_child_group(alpha)
# verify that both paths are traversed to get the max depth value
self.assertEqual(B.depth, 3) # omega -> alpha -> A -> B
self.assertEqual(beta.depth, 4) # B -> beta
def test_depth_recursion(self):
A = Group('A')
B = Group('B')
A.add_child_group(B)
# hypothetical of adding B as child group to A
A.parent_groups.append(B)
B.child_groups.append(A)
# can't update depths of groups, because of loop
with self.assertRaises(AnsibleError):
B._check_children_depth()
def test_loop_detection(self):
A = Group('A')
B = Group('B')
C = Group('C')
A.add_child_group(B)
B.add_child_group(C)
with self.assertRaises(AnsibleError):
C.add_child_group(A)
def test_direct_host_ordering(self):
"""Hosts are returned in order they are added
"""
group = Group('A')
# host names not added in alphabetical order
host_name_list = ['z', 'b', 'c', 'a', 'p', 'q']
expected_hosts = []
for host_name in host_name_list:
h = Host(host_name)
group.add_host(h)
expected_hosts.append(h)
assert group.get_hosts() == expected_hosts
def test_sub_group_host_ordering(self):
"""With multiple nested groups, asserts that hosts are returned
in deterministic order
"""
top_group = Group('A')
expected_hosts = []
for name in ['z', 'b', 'c', 'a', 'p', 'q']:
child = Group('group_{0}'.format(name))
top_group.add_child_group(child)
host = Host('host_{0}'.format(name))
child.add_host(host)
expected_hosts.append(host)
assert top_group.get_hosts() == expected_hosts
def test_populates_descendant_hosts(self):
A = Group('A')
B = Group('B')
C = Group('C')
h = Host('h')
C.add_host(h)
A.add_child_group(B) # B is child of A
B.add_child_group(C) # C is descendant of A
A.add_child_group(B)
self.assertEqual(set(h.groups), set([C, B, A]))
h2 = Host('h2')
C.add_host(h2)
self.assertEqual(set(h2.groups), set([C, B, A]))
def test_ancestor_example(self):
# see docstring for Group._walk_relationship
groups = {}
for name in ['A', 'B', 'C', 'D', 'E', 'F']:
groups[name] = Group(name)
# first row
groups['A'].add_child_group(groups['D'])
groups['B'].add_child_group(groups['D'])
groups['B'].add_child_group(groups['E'])
groups['C'].add_child_group(groups['D'])
# second row
groups['D'].add_child_group(groups['E'])
groups['D'].add_child_group(groups['F'])
groups['E'].add_child_group(groups['F'])
self.assertEqual(
set(groups['F'].get_ancestors()),
set([
groups['A'], groups['B'], groups['C'], groups['D'], groups['E']
])
)
def test_ancestors_recursive_loop_safe(self):
'''
The get_ancestors method may be referenced before circular parenting
checks, so the method is expected to be stable even with loops
'''
A = Group('A')
B = Group('B')
A.parent_groups.append(B)
B.parent_groups.append(A)
# finishes in finite time
self.assertEqual(A.get_ancestors(), set([A, B]))
| gpl-3.0 | 6,468,758,720,486,443,000 | -5,248,913,066,299,448,000 | 33.052632 | 79 | 0.573995 | false |
fnouama/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_has_key.py | 326 | 3227 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for has_key().
Calls to .has_key() methods are expressed in terms of the 'in'
operator:
d.has_key(k) -> k in d
CAVEATS:
1) While the primary target of this fixer is dict.has_key(), the
fixer will change any has_key() method call, regardless of its
class.
2) Cases like this will not be converted:
m = d.has_key
if m(k):
...
Only *calls* to has_key() are converted. While it is possible to
convert the above to something like
m = d.__contains__
if m(k):
...
this is currently not done.
"""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, parenthesize
class FixHasKey(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
anchor=power<
before=any+
trailer< '.' 'has_key' >
trailer<
'('
( not(arglist | argument<any '=' any>) arg=any
| arglist<(not argument<any '=' any>) arg=any ','>
)
')'
>
after=any*
>
|
negation=not_test<
'not'
anchor=power<
before=any+
trailer< '.' 'has_key' >
trailer<
'('
( not(arglist | argument<any '=' any>) arg=any
| arglist<(not argument<any '=' any>) arg=any ','>
)
')'
>
>
>
"""
def transform(self, node, results):
assert results
syms = self.syms
if (node.parent.type == syms.not_test and
self.pattern.match(node.parent)):
# Don't transform a node matching the first alternative of the
# pattern when its parent matches the second alternative
return None
negation = results.get("negation")
anchor = results["anchor"]
prefix = node.prefix
before = [n.clone() for n in results["before"]]
arg = results["arg"].clone()
after = results.get("after")
if after:
after = [n.clone() for n in after]
if arg.type in (syms.comparison, syms.not_test, syms.and_test,
syms.or_test, syms.test, syms.lambdef, syms.argument):
arg = parenthesize(arg)
if len(before) == 1:
before = before[0]
else:
before = pytree.Node(syms.power, before)
before.prefix = u" "
n_op = Name(u"in", prefix=u" ")
if negation:
n_not = Name(u"not", prefix=u" ")
n_op = pytree.Node(syms.comp_op, (n_not, n_op))
new = pytree.Node(syms.comparison, (arg, n_op, before))
if after:
new = parenthesize(new)
new = pytree.Node(syms.power, (new,) + tuple(after))
if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
syms.and_expr, syms.shift_expr,
syms.arith_expr, syms.term,
syms.factor, syms.power):
new = parenthesize(new)
new.prefix = prefix
return new
| apache-2.0 | 6,785,757,719,360,233,000 | -251,184,386,678,802,850 | 28.336364 | 78 | 0.524016 | false |
scottdangelo/RemoveVolumeMangerLocks | cinder/volume/drivers/srb.py | 2 | 33438 | # Copyright (c) 2014 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for the Scality REST Block storage system
This driver provisions Linux SRB volumes leveraging RESTful storage platforms
(e.g. Scality CDMI).
"""
import contextlib
import functools
import re
import sys
import time
from oslo_concurrency import lockutils
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from six.moves import range
from cinder.brick.local_dev import lvm
from cinder import exception
from cinder.i18n import _, _LI, _LE, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
srb_opts = [
cfg.StrOpt('srb_base_urls',
default=None,
help='Comma-separated list of REST servers IP to connect to. '
'(eg http://IP1/,http://IP2:81/path'),
]
CONF = cfg.CONF
CONF.register_opts(srb_opts)
ACCEPTED_REST_SERVER = re.compile(r'^http://'
'(\d{1,3}\.){3}\d{1,3}'
'(:\d+)?/[a-zA-Z0-9\-_\/]*$')
class retry(object):
SLEEP_NONE = 'none'
SLEEP_DOUBLE = 'double'
SLEEP_INCREMENT = 'increment'
def __init__(self, exceptions, count, sleep_mechanism=SLEEP_INCREMENT,
sleep_factor=1):
if sleep_mechanism not in [self.SLEEP_NONE,
self.SLEEP_DOUBLE,
self.SLEEP_INCREMENT]:
raise ValueError('Invalid value for `sleep_mechanism` argument')
self._exceptions = exceptions
self._count = count
self._sleep_mechanism = sleep_mechanism
self._sleep_factor = sleep_factor
def __call__(self, fun):
func_name = fun.func_name
@functools.wraps(fun)
def wrapped(*args, **kwargs):
sleep_time = self._sleep_factor
exc_info = None
for attempt in range(self._count):
if attempt != 0:
LOG.warning(_LW('Retrying failed call to %(func)s, '
'attempt %(attempt)i.'),
{'func': func_name,
'attempt': attempt})
try:
return fun(*args, **kwargs)
except self._exceptions:
exc_info = sys.exc_info()
if attempt != self._count - 1:
if self._sleep_mechanism == self.SLEEP_NONE:
continue
elif self._sleep_mechanism == self.SLEEP_INCREMENT:
time.sleep(sleep_time)
sleep_time += self._sleep_factor
elif self._sleep_mechanism == self.SLEEP_DOUBLE:
time.sleep(sleep_time)
sleep_time *= 2
else:
raise ValueError('Unknown sleep mechanism: %r'
% self._sleep_mechanism)
six.reraise(exc_info[0], exc_info[1], exc_info[2])
return wrapped
class LVM(lvm.LVM):
def activate_vg(self):
"""Activate the Volume Group associated with this instantiation.
:raises: putils.ProcessExecutionError
"""
cmd = ['vgchange', '-ay', self.vg_name]
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def deactivate_vg(self):
"""Deactivate the Volume Group associated with this instantiation.
This forces LVM to release any reference to the device.
:raises: putils.ProcessExecutionError
"""
cmd = ['vgchange', '-an', self.vg_name]
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error deactivating Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def destroy_vg(self):
"""Destroy the Volume Group associated with this instantiation.
:raises: putils.ProcessExecutionError
"""
cmd = ['vgremove', '-f', self.vg_name]
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error destroying Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def pv_resize(self, pv_name, new_size_str):
"""Extend the size of an existing PV (for virtual PVs).
:raises: putils.ProcessExecutionError
"""
try:
self._execute('pvresize',
'--setphysicalvolumesize', new_size_str,
pv_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error resizing Physical Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def extend_thin_pool(self):
"""Extend the size of the thin provisioning pool.
This method extends the size of a thin provisioning pool to 95% of the
size of the VG, if the VG is configured as thin and owns a thin
provisioning pool.
:raises: putils.ProcessExecutionError
"""
if self.vg_thin_pool is None:
return
new_size_str = self._calculate_thin_pool_size()
try:
self._execute('lvextend',
'-L', new_size_str,
"%s/%s-pool" % (self.vg_name, self.vg_name),
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending thin provisioning pool'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
@contextlib.contextmanager
def patched(obj, attr, fun):
"""Context manager to locally patch a method.
Within the managed context, the `attr` method of `obj` will be replaced by
a method which calls `fun` passing in the original `attr` attribute of
`obj` as well as any positional and keyword arguments.
At the end of the context, the original method is restored.
"""
orig = getattr(obj, attr)
def patch(*args, **kwargs):
return fun(orig, *args, **kwargs)
setattr(obj, attr, patch)
try:
yield
finally:
setattr(obj, attr, orig)
@contextlib.contextmanager
def handle_process_execution_error(message, info_message, reraise=True):
"""Consistently handle `putils.ProcessExecutionError` exceptions
This context-manager will catch any `putils.ProcessExecutionError`
exceptions raised in the managed block, and generate logging output
accordingly.
The value of the `message` argument will be logged at `logging.ERROR`
level, and the `info_message` argument at `logging.INFO` level. Finally
the command string, exit code, standard output and error output of the
process will be logged at `logging.DEBUG` level.
The `reraise` argument specifies what should happen when a
`putils.ProcessExecutionError` is caught. If it's equal to `True`, the
exception will be re-raised. If it's some other non-`False` object, this
object will be raised instead (so you most likely want it to be some
`Exception`). Any `False` value will result in the exception to be
swallowed.
"""
try:
yield
except putils.ProcessExecutionError as exc:
LOG.error(message)
LOG.info(info_message)
LOG.debug('Command : %s', exc.cmd)
LOG.debug('Exit Code : %r', exc.exit_code)
LOG.debug('StdOut : %s', exc.stdout)
LOG.debug('StdErr : %s', exc.stderr)
if reraise is True:
raise
elif reraise:
raise reraise # pylint: disable=E0702
@contextlib.contextmanager
def temp_snapshot(driver, volume, src_vref):
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'volume_size': src_vref['size'],
'name': 'snapshot-clone-%s' % volume['id'],
'id': 'tmp-snap-%s' % volume['id'],
'size': src_vref['size']}
driver.create_snapshot(snapshot)
try:
yield snapshot
finally:
driver.delete_snapshot(snapshot)
@contextlib.contextmanager
def temp_raw_device(driver, volume):
driver._attach_file(volume)
try:
yield
finally:
driver._detach_file(volume)
@contextlib.contextmanager
def temp_lvm_device(driver, volume):
with temp_raw_device(driver, volume):
vg = driver._get_lvm_vg(volume)
vg.activate_vg()
yield vg
class SRBDriver(driver.VolumeDriver):
"""Scality SRB volume driver
This driver manages volumes provisioned by the Scality REST Block driver
Linux kernel module, backed by RESTful storage providers (e.g. Scality
CDMI).
"""
VERSION = '1.1.0'
# Over-allocation ratio (multiplied with requested size) for thin
# provisioning
OVER_ALLOC_RATIO = 2
SNAPSHOT_PREFIX = 'snapshot'
def __init__(self, *args, **kwargs):
super(SRBDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(srb_opts)
self.urls_setup = False
self.backend_name = None
self.base_urls = None
self.root_helper = utils.get_root_helper()
self._attached_devices = {}
def _setup_urls(self):
if not self.base_urls:
message = _("No url configured")
raise exception.VolumeBackendAPIException(data=message)
with handle_process_execution_error(
message=_LE('Cound not setup urls on the Block Driver.'),
info_message=_LI('Error creating Volume'),
reraise=False):
cmd = self.base_urls
path = '/sys/class/srb/add_urls'
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
self.urls_setup = True
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.backend_name = self.configuration.safe_get('volume_backend_name')
base_urls = self.configuration.safe_get('srb_base_urls')
sane_urls = []
if base_urls:
for url in base_urls.split(','):
stripped_url = url.strip()
if ACCEPTED_REST_SERVER.match(stripped_url):
sane_urls.append(stripped_url)
else:
LOG.warning(_LW("%s is not an accepted REST server "
"IP address"), stripped_url)
self.base_urls = ','.join(sane_urls)
self._setup_urls()
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if not self.base_urls:
LOG.warning(_LW("Configuration variable srb_base_urls"
" not set or empty."))
if self.urls_setup is False:
message = _("Could not setup urls properly")
raise exception.VolumeBackendAPIException(data=message)
@classmethod
def _is_snapshot(cls, volume):
return volume['name'].startswith(cls.SNAPSHOT_PREFIX)
@classmethod
def _get_volname(cls, volume):
"""Returns the name of the actual volume
If the volume is a snapshot, it returns the name of the parent volume.
otherwise, returns the volume's name.
"""
name = volume['name']
if cls._is_snapshot(volume):
name = "volume-%s" % (volume['volume_id'])
return name
@classmethod
def _get_volid(cls, volume):
"""Returns the ID of the actual volume
If the volume is a snapshot, it returns the ID of the parent volume.
otherwise, returns the volume's id.
"""
volid = volume['id']
if cls._is_snapshot(volume):
volid = volume['volume_id']
return volid
@classmethod
def _device_name(cls, volume):
volume_id = cls._get_volid(volume)
name = 'cinder-%s' % volume_id
# Device names can't be longer than 32 bytes (incl. \0)
return name[:31]
@classmethod
def _device_path(cls, volume):
return "/dev/" + cls._device_name(volume)
@classmethod
def _escape_snapshot(cls, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith(cls.SNAPSHOT_PREFIX):
return snapshot_name
return '_' + snapshot_name
@classmethod
def _mapper_path(cls, volume):
groupname = cls._get_volname(volume)
name = volume['name']
if cls._is_snapshot(volume):
name = cls._escape_snapshot(name)
# NOTE(vish): stops deprecation warning
groupname = groupname.replace('-', '--')
name = name.replace('-', '--')
return "/dev/mapper/%s-%s" % (groupname, name)
@staticmethod
def _size_int(size_in_g):
try:
return max(int(size_in_g), 1)
except ValueError:
message = (_("Invalid size parameter '%s': Cannot be interpreted"
" as an integer value.")
% size_in_g)
LOG.error(message)
raise exception.VolumeBackendAPIException(data=message)
@classmethod
def _set_device_path(cls, volume):
volume['provider_location'] = cls._get_volname(volume)
return {
'provider_location': volume['provider_location'],
}
@staticmethod
def _activate_lv(orig, *args, **kwargs):
"""Activate lv.
Use with `patched` to patch `lvm.LVM.activate_lv` to ignore `EEXIST`
"""
try:
orig(*args, **kwargs)
except putils.ProcessExecutionError as exc:
if exc.exit_code != 5:
raise
else:
LOG.debug('`activate_lv` returned 5, ignored')
def _get_lvm_vg(self, volume, create_vg=False):
# NOTE(joachim): One-device volume group to manage thin snapshots
# Get origin volume name even for snapshots
volume_name = self._get_volname(volume)
physical_volumes = [self._device_path(volume)]
with patched(lvm.LVM, 'activate_lv', self._activate_lv):
return LVM(volume_name, utils.get_root_helper(),
create_vg=create_vg,
physical_volumes=physical_volumes,
lvm_type='thin', executor=self._execute)
@staticmethod
def _volume_not_present(vg, volume_name):
# Used to avoid failing to delete a volume for which
# the create operation partly failed
return vg.get_volume(volume_name) is None
def _create_file(self, volume):
message = _('Could not create volume on any configured REST server.')
with handle_process_execution_error(
message=message,
info_message=_LI('Error creating Volume %s.') % volume['name'],
reraise=exception.VolumeBackendAPIException(data=message)):
size = self._size_int(volume['size']) * self.OVER_ALLOC_RATIO
cmd = volume['name']
cmd += ' %dG' % size
path = '/sys/class/srb/create'
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
return self._set_device_path(volume)
def _extend_file(self, volume, new_size):
message = _('Could not extend volume on any configured REST server.')
with handle_process_execution_error(
message=message,
info_message=(_LI('Error extending Volume %s.')
% volume['name']),
reraise=exception.VolumeBackendAPIException(data=message)):
size = self._size_int(new_size) * self.OVER_ALLOC_RATIO
cmd = volume['name']
cmd += ' %dG' % size
path = '/sys/class/srb/extend'
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
@staticmethod
def _destroy_file(volume):
message = _('Could not destroy volume on any configured REST server.')
volname = volume['name']
with handle_process_execution_error(
message=message,
info_message=_LI('Error destroying Volume %s.') % volname,
reraise=exception.VolumeBackendAPIException(data=message)):
cmd = volume['name']
path = '/sys/class/srb/destroy'
putils.execute('tee', path, process_input=cmd,
root_helper=utils.get_root_helper(),
run_as_root=True)
# NOTE(joachim): Must only be called within a function decorated by:
# @lockutils.synchronized('devices', 'cinder-srb-')
def _increment_attached_count(self, volume):
"""Increments the attach count of the device"""
volid = self._get_volid(volume)
if volid not in self._attached_devices:
self._attached_devices[volid] = 1
else:
self._attached_devices[volid] += 1
# NOTE(joachim): Must only be called within a function decorated by:
# @lockutils.synchronized('devices', 'cinder-srb-')
def _decrement_attached_count(self, volume):
"""Decrements the attach count of the device"""
volid = self._get_volid(volume)
if volid not in self._attached_devices:
raise exception.VolumeBackendAPIException(
(_("Internal error in srb driver: "
"Trying to detach detached volume %s."))
% (self._get_volname(volume))
)
self._attached_devices[volid] -= 1
if self._attached_devices[volid] == 0:
del self._attached_devices[volid]
# NOTE(joachim): Must only be called within a function decorated by:
# @lockutils.synchronized('devices', 'cinder-srb-')
def _get_attached_count(self, volume):
volid = self._get_volid(volume)
return self._attached_devices.get(volid, 0)
@lockutils.synchronized('devices', 'cinder-srb-')
def _is_attached(self, volume):
return self._get_attached_count(volume) > 0
@lockutils.synchronized('devices', 'cinder-srb-')
def _attach_file(self, volume):
name = self._get_volname(volume)
devname = self._device_name(volume)
LOG.debug('Attaching volume %(name)s as %(devname)s',
{'name': name, 'devname': devname})
count = self._get_attached_count(volume)
if count == 0:
message = (_('Could not attach volume %(vol)s as %(dev)s '
'on system.')
% {'vol': name, 'dev': devname})
with handle_process_execution_error(
message=message,
info_message=_LI('Error attaching Volume'),
reraise=exception.VolumeBackendAPIException(data=message)):
cmd = name + ' ' + devname
path = '/sys/class/srb/attach'
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
else:
LOG.debug('Volume %s already attached', name)
self._increment_attached_count(volume)
@retry(exceptions=(putils.ProcessExecutionError, ),
count=3, sleep_mechanism=retry.SLEEP_INCREMENT, sleep_factor=5)
def _do_deactivate(self, volume, vg):
vg.deactivate_vg()
@retry(exceptions=(putils.ProcessExecutionError, ),
count=5, sleep_mechanism=retry.SLEEP_DOUBLE, sleep_factor=1)
def _do_detach(self, volume, vg):
devname = self._device_name(volume)
volname = self._get_volname(volume)
cmd = devname
path = '/sys/class/srb/detach'
try:
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
except putils.ProcessExecutionError:
with excutils.save_and_reraise_exception(reraise=True):
try:
with patched(lvm.LVM, 'activate_lv', self._activate_lv):
vg.activate_lv(volname)
self._do_deactivate(volume, vg)
except putils.ProcessExecutionError:
LOG.warning(_LW('All attempts to recover failed detach '
'of %(volume)s failed.'),
{'volume': volname})
@lockutils.synchronized('devices', 'cinder-srb-')
def _detach_file(self, volume):
name = self._get_volname(volume)
devname = self._device_name(volume)
vg = self._get_lvm_vg(volume)
LOG.debug('Detaching device %s', devname)
count = self._get_attached_count(volume)
if count > 1:
LOG.info(_LI('Reference count of %(volume)s is %(count)d, '
'not detaching.'),
{'volume': volume['name'], 'count': count})
return
message = (_('Could not detach volume %(vol)s from device %(dev)s.')
% {'vol': name, 'dev': devname})
with handle_process_execution_error(
message=message,
info_message=_LI('Error detaching Volume'),
reraise=exception.VolumeBackendAPIException(data=message)):
try:
if vg is not None:
self._do_deactivate(volume, vg)
except putils.ProcessExecutionError:
LOG.error(_LE('Could not deactivate volume group %s'),
self._get_volname(volume))
raise
try:
self._do_detach(volume, vg=vg)
except putils.ProcessExecutionError:
LOG.error(_LE('Could not detach volume %(vol)s from device '
'%(dev)s.'), {'vol': name, 'dev': devname})
raise
self._decrement_attached_count(volume)
def _setup_lvm(self, volume):
# NOTE(joachim): One-device volume group to manage thin snapshots
size = self._size_int(volume['size']) * self.OVER_ALLOC_RATIO
size_str = '%dg' % size
vg = self._get_lvm_vg(volume, create_vg=True)
vg.create_volume(volume['name'], size_str, lv_type='thin')
def _destroy_lvm(self, volume):
vg = self._get_lvm_vg(volume)
if vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unable to delete due to existing snapshot '
'for volume: %s.'),
volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
vg.destroy_vg()
# NOTE(joachim) Force lvm vg flush through a vgs command
vgs = vg.get_all_volume_groups(root_helper=self.root_helper,
vg_name=vg.vg_name)
if len(vgs) != 0:
LOG.warning(_LW('Removed volume group %s still appears in vgs.'),
vg.vg_name)
def _create_and_copy_volume(self, dstvol, srcvol):
"""Creates a volume from a volume or a snapshot."""
updates = self._create_file(dstvol)
# We need devices attached for IO operations.
with temp_lvm_device(self, srcvol) as vg, \
temp_raw_device(self, dstvol):
self._setup_lvm(dstvol)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
with patched(lvm.LVM, 'activate_lv', self._activate_lv):
vg.activate_lv(srcvol['name'], True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self._mapper_path(srcvol),
self._mapper_path(dstvol),
srcvol['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
return updates
def create_volume(self, volume):
"""Creates a volume.
Can optionally return a Dictionary of changes to the volume object to
be persisted.
"""
updates = self._create_file(volume)
# We need devices attached for LVM operations.
with temp_raw_device(self, volume):
self._setup_lvm(volume)
return updates
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self._create_and_copy_volume(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
updates = None
with temp_lvm_device(self, src_vref):
with temp_snapshot(self, volume, src_vref) as snapshot:
updates = self._create_and_copy_volume(volume, snapshot)
return updates
def delete_volume(self, volume):
"""Deletes a volume."""
attached = False
if self._is_attached(volume):
attached = True
with temp_lvm_device(self, volume):
self._destroy_lvm(volume)
self._detach_file(volume)
LOG.debug('Deleting volume %(volume_name)s, attached=%(attached)s',
{'volume_name': volume['name'], 'attached': attached})
self._destroy_file(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
with temp_lvm_device(self, snapshot) as vg:
# NOTE(joachim) we only want to support thin lvm_types
vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
lv_type='thin')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
with temp_lvm_device(self, snapshot) as vg:
if self._volume_not_present(
vg, self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations"),
snapshot['name'])
return
vg.delete(self._escape_snapshot(snapshot['name']))
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service."""
stats = {
'vendor_name': 'Scality',
'driver_version': self.VERSION,
'storage_protocol': 'Scality Rest Block Device',
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'volume_backend_name': self.backend_name,
}
return stats
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
with temp_lvm_device(self, volume):
image_utils.fetch_to_volume_format(context,
image_service,
image_id,
self._mapper_path(volume),
'qcow2',
self.configuration.
volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
with temp_lvm_device(self, volume):
image_utils.upload_volume(context,
image_service,
image_meta,
self._mapper_path(volume))
def extend_volume(self, volume, new_size):
new_alloc_size = self._size_int(new_size) * self.OVER_ALLOC_RATIO
new_size_str = '%dg' % new_alloc_size
self._extend_file(volume, new_size)
with temp_lvm_device(self, volume) as vg:
vg.pv_resize(self._device_path(volume), new_size_str)
vg.extend_thin_pool()
vg.extend_volume(volume['name'], new_size_str)
class SRBISCSIDriver(SRBDriver, driver.ISCSIDriver):
"""Scality SRB volume driver with ISCSI support
This driver manages volumes provisioned by the Scality REST Block driver
Linux kernel module, backed by RESTful storage providers (e.g. Scality
CDMI), and exports them through ISCSI to Nova.
"""
VERSION = '1.0.0'
def __init__(self, *args, **kwargs):
self.db = kwargs.get('db')
self.target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
super(SRBISCSIDriver, self).__init__(*args, **kwargs)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'SRB_iSCSI'
self.protocol = 'iSCSI'
def ensure_export(self, context, volume):
device_path = self._mapper_path(volume)
model_update = self.target_driver.ensure_export(context,
volume,
device_path)
if model_update:
self.db.volume_update(context, volume['id'], model_update)
def create_export(self, context, volume, connector):
"""Creates an export for a logical volume."""
self._attach_file(volume)
vg = self._get_lvm_vg(volume)
vg.activate_vg()
# SRB uses the same name as the volume for the VG
volume_path = self._mapper_path(volume)
data = self.target_driver.create_export(context,
volume,
volume_path)
return {
'provider_location': data['location'],
'provider_auth': data['auth'],
}
def remove_export(self, context, volume):
# NOTE(joachim) Taken from iscsi._ExportMixin.remove_export
# This allows us to avoid "detaching" a device not attached by
# an export, and avoid screwing up the device attach refcount.
try:
# Raises exception.NotFound if export not provisioned
iscsi_target = self.target_driver._get_iscsi_target(context,
volume['id'])
# Raises an Exception if currently not exported
location = volume['provider_location'].split(' ')
iqn = location[1]
self.target_driver.show_target(iscsi_target, iqn=iqn)
self.target_driver.remove_export(context, volume)
self._detach_file(volume)
except exception.NotFound:
LOG.warning(_LW('Volume %r not found while trying to remove.'),
volume['id'])
except Exception as exc:
LOG.warning(_LW('Error while removing export: %r'), exc)
| apache-2.0 | 1,523,186,427,821,571,600 | 3,134,184,104,849,128,400 | 36.825792 | 79 | 0.559543 | false |
vismartltd/edx-platform | lms/djangoapps/shoppingcart/migrations/0024_auto__add_field_courseregistrationcode_mode_slug.py | 109 | 18586 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseRegistrationCode.mode_slug'
db.add_column('shoppingcart_courseregistrationcode', 'mode_slug',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseRegistrationCode.mode_slug'
db.delete_column('shoppingcart_courseregistrationcode', 'mode_slug')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 12, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregcodeitem': {
'Meta': {'object_name': 'CourseRegCodeItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.courseregcodeitemannotation': {
'Meta': {'object_name': 'CourseRegCodeItemAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 12, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donationconfiguration': {
'Meta': {'object_name': 'DonationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'personal'", 'max_length': '32'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 12, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 | -1,606,592,717,989,216,500 | 4,492,332,638,581,980,700 | 84.256881 | 182 | 0.560153 | false |
Lothilius/python-jumble | Permutations.py | 1 | 2071 | # Files: Permutations.py
#
# Description: Creats permutations of an entered word.
#
__author__ = 'lothilius'
import math
class Permutations():
"""This module contains functions to generate all unique permutations
of a string, and to count permutations.
"""
def countOccurrences(word):
# create a list of 26 0s to count occurrences of each
# letter.
word = word.lower()
occurs = [0]*26
for ch in word:
i = ord(ch) - ord('a')
occurs[i] += 1
return occurs
def howManyPerms(word):
"""Return the number of permutations and unique
permutations of a string.
"""
word = word.lower()
n = len(word)
# count the occurrences of each letter in word.
occurs = Permutations.countOccurrences(word)
# For any letter that recurs, the number of unique
# permutations is the totalPerms divided by the
# factorial of that count.
divisor = 1
for i in range(26):
if occurs[i] > 1:
divisor *= math.factorial(occurs[i])
totalPerms = math.factorial(n)
uniquePerms = totalPerms / divisor
return (totalPerms, uniquePerms)
# Fixed this so that it doesn't return duplicates.
def allPermsAux(word, permsSeen):
"""This is an auxiliary function that generates all
unique permutations of the input string not already in
the list permsSeen.
"""
if len(word) <=1:
yield word
else:
for perm in Permutations.allPermsAux(word[1:], permsSeen):
for i in range(len(perm)+1):
newperm = perm[:i] + word[0] + perm[i:]
if not newperm in permsSeen:
permsSeen.append(newperm)
yield newperm
def allPerms(word):
"""This function generates all unique permutations of the
input string.
"""
return Permutations.allPermsAux(word, [])
| mit | -546,986,297,163,965,760 | -5,597,644,824,291,532,000 | 31.359375 | 73 | 0.568807 | false |
hexlism/xx_net | gae_proxy/local/gae_handler.py | 1 | 28513 | #!/usr/bin/env python
# coding:utf-8
import errno
import time
import struct
import zlib
import functools
import re
import io
import string
import socket
import ssl
import httplib
import Queue
import urlparse
import threading
from proxy import xlog
from connect_manager import https_manager
from appids_manager import appid_manager
import OpenSSL
NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError)
from config import config
from google_ip import google_ip
def generate_message_html(title, banner, detail=''):
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
return string.Template(MESSAGE_TEMPLATE).substitute(title=title, banner=banner, detail=detail)
def spawn_later(seconds, target, *args, **kwargs):
def wrap(*args, **kwargs):
__import__('time').sleep(seconds)
try:
result = target(*args, **kwargs)
except:
result = None
return result
return __import__('thread').start_new_thread(wrap, args, kwargs)
skip_headers = frozenset(['Vary',
'Via',
'X-Google-Cache-Control',
'X-Forwarded-For',
'Proxy-Authorization',
'Proxy-Connection',
'Upgrade',
'X-Chrome-Variations',
'Connection',
'Cache-Control'
])
def send_header(wfile, keyword, value):
keyword = keyword.title()
if keyword == 'Set-Cookie':
# https://cloud.google.com/appengine/docs/python/urlfetch/responseobjects
for cookie in re.split(r', (?=[^ =]+(?:=|$))', value):
wfile.write("%s: %s\r\n" % (keyword, cookie))
#logging.debug("Head1 %s: %s", keyword, cookie)
elif keyword == 'Content-Disposition' and '"' not in value:
value = re.sub(r'filename=([^"\']+)', 'filename="\\1"', value)
wfile.write("%s: %s\r\n" % (keyword, value))
#logging.debug("Head1 %s: %s", keyword, value)
else:
wfile.write("%s: %s\r\n" % (keyword, value))
#logging.debug("Head1 %s: %s", keyword, value)
def _request(sock, headers, payload, bufsize=8192):
request_data = 'POST /_gh/ HTTP/1.1\r\n'
request_data += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items() if k not in skip_headers)
request_data += '\r\n'
if isinstance(payload, bytes):
sock.send(request_data.encode())
payload_len = len(payload)
start = 0
while start < payload_len:
send_size = min(payload_len - start, 65535)
sended = sock.send(payload[start:start+send_size])
start += sended
elif hasattr(payload, 'read'):
sock.send(request_data)
while True:
data = payload.read(bufsize)
if not data:
break
sock.send(data)
else:
raise TypeError('_request(payload) must be a string or buffer, not %r' % type(payload))
response = httplib.HTTPResponse(sock, buffering=True)
try:
orig_timeout = sock.gettimeout()
sock.settimeout(100)
response.begin()
sock.settimeout(orig_timeout)
except httplib.BadStatusLine as e:
#logging.warn("_request bad status line:%r", e)
response.close()
response = None
except Exception as e:
xlog.warn("_request:%r", e)
return response
class GAE_Exception(BaseException):
def __init__(self, type, message):
xlog.debug("GAE_Exception %r %r", type, message)
self.type = type
self.message = message
def request(headers={}, payload=None):
max_retry = 3
for i in range(max_retry):
ssl_sock = None
try:
ssl_sock = https_manager.get_ssl_connection()
if not ssl_sock:
xlog.debug('create_ssl_connection fail')
continue
if ssl_sock.host == '':
ssl_sock.appid = appid_manager.get_appid()
if not ssl_sock.appid:
google_ip.report_connect_closed(ssl_sock.ip, "no appid")
time.sleep(60)
raise GAE_Exception(1, "no appid can use")
headers['Host'] = ssl_sock.appid + ".appspot.com"
ssl_sock.host = headers['Host']
else:
headers['Host'] = ssl_sock.host
response = _request(ssl_sock, headers, payload)
if not response:
google_ip.report_connect_closed(ssl_sock.ip, "request_fail")
ssl_sock.close()
continue
response.ssl_sock = ssl_sock
return response
except Exception as e:
xlog.exception('request failed:%s', e)
if ssl_sock:
google_ip.report_connect_closed(ssl_sock.ip, "request_except")
ssl_sock.close()
raise GAE_Exception(2, "try max times")
def inflate(data):
return zlib.decompress(data, -zlib.MAX_WBITS)
def deflate(data):
return zlib.compress(data)[2:-4]
def fetch(method, url, headers, body):
if isinstance(body, basestring) and body:
if len(body) < 10 * 1024 * 1024 and 'Content-Encoding' not in headers:
zbody = deflate(body)
if len(zbody) < len(body):
body = zbody
headers['Content-Encoding'] = 'deflate'
if len(body) > 10 * 1024 * 1024:
xlog.warn("body len:%d %s %s", len(body), method, url)
headers['Content-Length'] = str(len(body))
# GAE donot allow set `Host` header
if 'Host' in headers:
del headers['Host']
kwargs = {}
if config.GAE_PASSWORD:
kwargs['password'] = config.GAE_PASSWORD
#kwargs['options'] =
#kwargs['validate'] =
kwargs['maxsize'] = config.AUTORANGE_MAXSIZE
kwargs['timeout'] = '19'
payload = '%s %s HTTP/1.1\r\n' % (method, url)
payload += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items() if k not in skip_headers)
#for k, v in headers.items():
# logging.debug("Send %s: %s", k, v)
payload += ''.join('X-URLFETCH-%s: %s\r\n' % (k, v) for k, v in kwargs.items() if v)
request_headers = {}
payload = deflate(payload)
body = '%s%s%s' % (struct.pack('!h', len(payload)), payload, body)
request_headers['Content-Length'] = str(len(body))
response = request(request_headers, body)
response.app_msg = ''
response.app_status = response.status
if response.app_status != 200:
return response
data = response.read(2)
if len(data) < 2:
xlog.warn("fetch too short lead byte len:%d %s", len(data), url)
response.app_status = 502
response.fp = io.BytesIO(b'connection aborted. too short lead byte data=' + data)
response.read = response.fp.read
return response
headers_length, = struct.unpack('!h', data)
data = response.read(headers_length)
if len(data) < headers_length:
xlog.warn("fetch too short header need:%d get:%d %s", headers_length, len(data), url)
response.app_status = 509
response.fp = io.BytesIO(b'connection aborted. too short headers data=' + data)
response.read = response.fp.read
return response
response.ssl_sock.received_size += headers_length
raw_response_line, headers_data = inflate(data).split('\r\n', 1)
_, response.status, response.reason = raw_response_line.split(None, 2)
response.status = int(response.status)
response.reason = response.reason.strip()
response.msg = httplib.HTTPMessage(io.BytesIO(headers_data))
response.app_msg = response.msg.fp.read()
return response
normcookie = functools.partial(re.compile(', ([^ =]+(?:=|$))').sub, '\\r\\nSet-Cookie: \\1')
normattachment = functools.partial(re.compile(r'filename=(.+?)').sub, 'filename="\\1"')
def send_response(wfile, status=404, headers={}, body=''):
headers = dict((k.title(), v) for k, v in headers.items())
if 'Transfer-Encoding' in headers:
del headers['Transfer-Encoding']
if 'Content-Length' not in headers:
headers['Content-Length'] = len(body)
if 'Connection' not in headers:
headers['Connection'] = 'close'
wfile.write("HTTP/1.1 %d\r\n" % status)
for key, value in headers.items():
#wfile.write("%s: %s\r\n" % (key, value))
send_header(wfile, key, value)
wfile.write("\r\n")
wfile.write(body)
def return_fail_message(wfile):
html = generate_message_html('504 GAEProxy Proxy Time out', u'连接超时,先休息一会再来!')
send_response(wfile, 504, body=html.encode('utf-8'))
return
# fix bug for android market app: Mobogenie
# GAE url_fetch refuse empty value in header.
def clean_empty_header(headers):
remove_list = []
for key in headers:
value = headers[key]
if value == "":
remove_list.append(key)
for key in remove_list:
del headers[key]
return headers
def handler(method, url, headers, body, wfile):
time_request = time.time()
headers = clean_empty_header(headers)
errors = []
response = None
while True:
if time.time() - time_request > 30: #time out
return return_fail_message(wfile)
try:
response = fetch(method, url, headers, body)
if response.app_status != 200:
xlog.warn("fetch gae status:%s url:%s", response.app_status, url)
try:
server_type = response.getheader('Server', "")
if "gws" not in server_type and "Google Frontend" not in server_type and "GFE" not in server_type:
xlog.warn("IP:%s not support GAE, server type:%s", response.ssl_sock.ip, server_type)
google_ip.report_connect_fail(response.ssl_sock.ip, force_remove=True)
response.close()
continue
except Exception as e:
errors.append(e)
xlog.warn('gae_handler.handler %r %s , retry...', e, url)
continue
if response.app_status == 404:
#xlog.warning('APPID %r not exists, remove it.', response.ssl_sock.appid)
appid_manager.report_not_exist(response.ssl_sock.appid, response.ssl_sock.ip)
google_ip.report_connect_closed(response.ssl_sock.ip, "appid not exist")
appid = appid_manager.get_appid()
if not appid:
html = generate_message_html('404 No usable Appid Exists', u'没有可用appid了,请配置可用的appid')
send_response(wfile, 404, body=html.encode('utf-8'))
response.close()
return
else:
response.close()
continue
if response.app_status == 403 or response.app_status == 405: #Method not allowed
# google have changed from gws to gvs, need to remove.
xlog.warning('405 Method not allowed. remove %s ', response.ssl_sock.ip)
# some ip can connect, and server type is gws
# but can't use as GAE server
# so we need remove it immediately
google_ip.report_connect_fail(response.ssl_sock.ip, force_remove=True)
response.close()
continue
if response.app_status == 503:
xlog.warning('APPID %r out of Quota, remove it. %s', response.ssl_sock.appid, response.ssl_sock.ip)
appid_manager.report_out_of_quota(response.ssl_sock.appid)
google_ip.report_connect_closed(response.ssl_sock.ip, "out of quota")
appid = appid_manager.get_appid()
if not appid:
html = generate_message_html('503 No usable Appid Exists', u'appid流量不足,请增加appid')
send_response(wfile, 503, body=html.encode('utf-8'))
response.close()
return
else:
response.close()
continue
if response.app_status < 500:
break
except GAE_Exception as e:
errors.append(e)
xlog.warn("gae_exception:%r %s", e, url)
except Exception as e:
errors.append(e)
xlog.exception('gae_handler.handler %r %s , retry...', e, url)
if response.status == 206:
return RangeFetch(method, url, headers, body, response, wfile).fetch()
try:
wfile.write("HTTP/1.1 %d %s\r\n" % (response.status, response.reason))
response_headers = {}
for key, value in response.getheaders():
key = key.title()
if key == 'Transfer-Encoding':
#http://en.wikipedia.org/wiki/Chunked_transfer_encoding
continue
if key in skip_headers:
continue
response_headers[key] = value
if 'X-Head-Content-Length' in response_headers:
if method == "HEAD":
response_headers['Content-Length'] = response_headers['X-Head-Content-Length']
del response_headers['X-Head-Content-Length']
send_to_browser = True
try:
for key in response_headers:
value = response_headers[key]
send_header(wfile, key, value)
#logging.debug("Head- %s: %s", key, value)
wfile.write("\r\n")
except Exception as e:
send_to_browser = False
xlog.warn("gae_handler.handler send response fail. t:%d e:%r %s", time.time()-time_request, e, url)
if len(response.app_msg):
xlog.warn("APPID error:%d url:%s", response.status, url)
wfile.write(response.app_msg)
google_ip.report_connect_closed(response.ssl_sock.ip, "app err")
response.close()
return
content_length = int(response.getheader('Content-Length', 0))
content_range = response.getheader('Content-Range', '')
if content_range:
start, end, length = tuple(int(x) for x in re.search(r'bytes (\d+)-(\d+)/(\d+)', content_range).group(1, 2, 3))
else:
start, end, length = 0, content_length-1, content_length
body_length = end - start + 1
last_read_time = time.time()
time_response = time.time()
while True:
if start > end:
time_finished = time.time()
if body_length > 1024 and time_finished - time_response > 0:
speed = body_length / (time_finished - time_response)
xlog.info("GAE %d|%s|%d t:%d s:%d hs:%d Spd:%d %d %s",
response.ssl_sock.fd, response.ssl_sock.ip, response.ssl_sock.received_size, (time_finished-time_request)*1000,
length, response.ssl_sock.handshake_time, int(speed), response.status, url)
else:
xlog.info("GAE %d|%s|%d t:%d s:%d hs:%d %d %s",
response.ssl_sock.fd, response.ssl_sock.ip, response.ssl_sock.received_size, (time_finished-time_request)*1000,
length, response.ssl_sock.handshake_time, response.status, url)
response.ssl_sock.received_size += body_length
https_manager.save_ssl_connection_for_reuse(response.ssl_sock, call_time=time_request)
return
data = response.read(config.AUTORANGE_BUFSIZE)
if not data:
if time.time() - last_read_time > 20:
google_ip.report_connect_closed(response.ssl_sock.ip, "down fail")
response.close()
xlog.warn("read timeout t:%d len:%d left:%d %s", (time.time()-time_request)*1000, length, (end-start), url)
return
else:
time.sleep(0.1)
continue
last_read_time = time.time()
data_len = len(data)
start += data_len
if send_to_browser:
try:
ret = wfile.write(data)
if ret == ssl.SSL_ERROR_WANT_WRITE or ret == ssl.SSL_ERROR_WANT_READ:
xlog.debug("send to browser wfile.write ret:%d", ret)
ret = wfile.write(data)
except Exception as e_b:
if e_b[0] in (errno.ECONNABORTED, errno.EPIPE, errno.ECONNRESET) or 'bad write retry' in repr(e_b):
xlog.warn('gae_handler send to browser return %r %r', e_b, url)
else:
xlog.warn('gae_handler send to browser return %r %r', e_b, url)
send_to_browser = False
except NetWorkIOError as e:
time_except = time.time()
time_cost = time_except - time_request
if e[0] in (errno.ECONNABORTED, errno.EPIPE) or 'bad write retry' in repr(e):
xlog.warn("gae_handler err:%r time:%d %s ", e, time_cost, url)
google_ip.report_connect_closed(response.ssl_sock.ip, "Net")
else:
xlog.exception("gae_handler except:%r %s", e, url)
except Exception as e:
xlog.exception("gae_handler except:%r %s", e, url)
class RangeFetch(object):
threads = config.AUTORANGE_THREADS
maxsize = config.AUTORANGE_MAXSIZE
bufsize = config.AUTORANGE_BUFSIZE
waitsize = config.AUTORANGE_WAITSIZE
def __init__(self, method, url, headers, body, response, wfile):
self.method = method
self.wfile = wfile
self.url = url
self.headers = headers
self.body = body
self.response = response
self._stopped = False
self._last_app_status = {}
self.expect_begin = 0
def fetch(self):
response_headers = dict((k.title(), v) for k, v in self.response.getheaders())
content_range = response_headers['Content-Range']
start, end, length = tuple(int(x) for x in re.search(r'bytes (\d+)-(\d+)/(\d+)', content_range).group(1, 2, 3))
if start == 0:
response_headers['Content-Length'] = str(length)
del response_headers['Content-Range']
else:
response_headers['Content-Range'] = 'bytes %s-%s/%s' % (start, end, length)
response_headers['Content-Length'] = str(length-start)
xlog.info('>>>>>>>>>>>>>>> RangeFetch started(%r) %d-%d', self.url, start, end)
try:
self.wfile.write("HTTP/1.1 200 OK\r\n")
for key in response_headers:
if key == 'Transfer-Encoding':
continue
if key == 'X-Head-Content-Length':
continue
if key in skip_headers:
continue
value = response_headers[key]
#logging.debug("Head %s: %s", key.title(), value)
send_header(self.wfile, key, value)
self.wfile.write("\r\n")
except Exception as e:
self._stopped = True
xlog.warn("RangeFetch send response fail:%r %s", e, self.url)
return
data_queue = Queue.PriorityQueue()
range_queue = Queue.PriorityQueue()
range_queue.put((start, end, self.response))
self.expect_begin = start
for begin in range(end+1, length, self.maxsize):
range_queue.put((begin, min(begin+self.maxsize-1, length-1), None))
for i in xrange(0, self.threads):
range_delay_size = i * self.maxsize
spawn_later(float(range_delay_size)/self.waitsize, self.__fetchlet, range_queue, data_queue, range_delay_size)
has_peek = hasattr(data_queue, 'peek')
peek_timeout = 120
while self.expect_begin < length - 1:
try:
if has_peek:
begin, data = data_queue.peek(timeout=peek_timeout)
if self.expect_begin == begin:
data_queue.get()
elif self.expect_begin < begin:
time.sleep(0.1)
continue
else:
xlog.error('RangeFetch Error: begin(%r) < expect_begin(%r), quit.', begin, self.expect_begin)
break
else:
begin, data = data_queue.get(timeout=peek_timeout)
if self.expect_begin == begin:
pass
elif self.expect_begin < begin:
data_queue.put((begin, data))
time.sleep(0.1)
continue
else:
xlog.error('RangeFetch Error: begin(%r) < expect_begin(%r), quit.', begin, self.expect_begin)
break
except Queue.Empty:
xlog.error('data_queue peek timeout, break')
break
try:
ret = self.wfile.write(data)
if ret == ssl.SSL_ERROR_WANT_WRITE or ret == ssl.SSL_ERROR_WANT_READ:
xlog.debug("send to browser wfile.write ret:%d, retry", ret)
ret = self.wfile.write(data)
xlog.debug("send to browser wfile.write ret:%d", ret)
self.expect_begin += len(data)
del data
except Exception as e:
xlog.warn('RangeFetch client closed(%s). %s', e, self.url)
break
self._stopped = True
def __fetchlet(self, range_queue, data_queue, range_delay_size):
headers = dict((k.title(), v) for k, v in self.headers.items())
headers['Connection'] = 'close'
while not self._stopped:
try:
try:
start, end, response = range_queue.get(timeout=1)
if self.expect_begin < start and data_queue.qsize() * self.bufsize + range_delay_size > 30*1024*1024:
range_queue.put((start, end, response))
time.sleep(10)
continue
headers['Range'] = 'bytes=%d-%d' % (start, end)
if not response:
response = fetch(self.method, self.url, headers, self.body)
except Queue.Empty:
continue
except Exception as e:
xlog.warning("RangeFetch fetch response %r in __fetchlet", e)
range_queue.put((start, end, None))
continue
if not response:
xlog.warning('RangeFetch %s return %r', headers['Range'], response)
range_queue.put((start, end, None))
continue
if response.app_status != 200:
xlog.warning('Range Fetch return %s "%s %s" %s ', response.app_status, self.method, self.url, headers['Range'])
if response.app_status == 404:
xlog.warning('APPID %r not exists, remove it.', response.ssl_sock.appid)
appid_manager.report_not_exist(response.ssl_sock.appid, response.ssl_sock.ip)
appid = appid_manager.get_appid()
if not appid:
xlog.error("no appid left")
self._stopped = True
response.close()
return
if response.app_status == 503:
xlog.warning('APPID %r out of Quota, remove it temporary.', response.ssl_sock.appid)
appid_manager.report_out_of_quota(response.ssl_sock.appid)
appid = appid_manager.get_appid()
if not appid:
xlog.error("no appid left")
self._stopped = True
response.close()
return
google_ip.report_connect_closed(response.ssl_sock.ip, "app err")
response.close()
range_queue.put((start, end, None))
continue
if response.getheader('Location'):
self.url = urlparse.urljoin(self.url, response.getheader('Location'))
xlog.info('RangeFetch Redirect(%r)', self.url)
google_ip.report_connect_closed(response.ssl_sock.ip, "reLocation")
response.close()
range_queue.put((start, end, None))
continue
if 200 <= response.status < 300:
content_range = response.getheader('Content-Range')
if not content_range:
xlog.warning('RangeFetch "%s %s" return Content-Range=%r: response headers=%r, retry %s-%s',
self.method, self.url, content_range, response.getheaders(), start, end)
google_ip.report_connect_closed(response.ssl_sock.ip, "no range")
response.close()
range_queue.put((start, end, None))
continue
content_length = int(response.getheader('Content-Length', 0))
xlog.info('>>>>>>>>>>>>>>> [thread %s] %s %s', threading.currentThread().ident, content_length, content_range)
time_last_read = time.time()
while start < end + 1:
try:
data = response.read(self.bufsize)
if not data:
if time.time() - time_last_read > 20:
break
else:
time.sleep(0.1)
continue
time_last_read = time.time()
data_len = len(data)
data_queue.put((start, data))
start += data_len
except Exception as e:
xlog.warning('RangeFetch "%s %s" %s failed: %s', self.method, self.url, headers['Range'], e)
break
if start < end + 1:
xlog.warning('RangeFetch "%s %s" retry %s-%s', self.method, self.url, start, end)
google_ip.report_connect_closed(response.ssl_sock.ip, "down err")
response.close()
range_queue.put((start, end, None))
continue
https_manager.save_ssl_connection_for_reuse(response.ssl_sock)
xlog.info('>>>>>>>>>>>>>>> Successfully reached %d bytes.', start - 1)
else:
xlog.error('RangeFetch %r return %s', self.url, response.status)
google_ip.report_connect_closed(response.ssl_sock.ip, "status err")
response.close()
range_queue.put((start, end, None))
continue
except StandardError as e:
xlog.exception('RangeFetch._fetchlet error:%s', e)
raise
| bsd-2-clause | -2,296,436,639,244,261,600 | -8,784,091,694,664,710,000 | 39.930935 | 135 | 0.530495 | false |
sam-tsai/django-old | django/contrib/gis/sitemaps/views.py | 20 | 4249 | from django.http import HttpResponse, Http404
from django.template import loader
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.contrib.gis.db.models.fields import GeometryField
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import get_model
from django.utils.encoding import smart_str
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
def index(request, sitemaps):
"""
This view generates a sitemap index that uses the proper view
for resolving geographic section sitemap URLs.
"""
current_site = Site.objects.get_current()
sites = []
protocol = request.is_secure() and 'https' or 'http'
for section, site in sitemaps.items():
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.gis.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites})
return HttpResponse(xml, mimetype='application/xml')
def sitemap(request, sitemaps, section=None):
"""
This view generates a sitemap with additional geographic
elements defined by Google.
"""
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps.append(sitemaps[section])
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
for site in maps:
try:
if callable(site):
urls.extend(site().get_urls(page))
else:
urls.extend(site.get_urls(page))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
xml = smart_str(loader.render_to_string('gis/sitemaps/geo_sitemap.xml', {'urlset': urls}))
return HttpResponse(xml, mimetype='application/xml')
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The model's default manager must be GeoManager, and the field name
must be that of a geographic field.
"""
placemarks = []
klass = get_model(label, model)
if not klass:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
info = klass._meta.get_field_by_name(field_name)
if not isinstance(info[0], GeometryField):
raise Exception
except:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.ops.postgis:
# PostGIS will take care of transformation.
placemarks = klass._default_manager.using(using).kml(field_name=field_name)
else:
# There's no KML method on Oracle or MySQL, so we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.ops.oracle:
qs = klass._default_manager.using(using).transform(4326, field_name=field_name)
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
setattr(mod, 'kml', getattr(mod, field_name).kml)
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places' : placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| bsd-3-clause | -4,030,819,206,250,149,000 | 319,723,499,205,984,700 | 37.627273 | 116 | 0.645328 | false |
ranqingfa/ardupilot | Tools/autotest/autotest.py | 1 | 18485 | #!/usr/bin/env python
"""
APM automatic test suite
Andrew Tridgell, October 2011
"""
from __future__ import print_function
import atexit
import fnmatch
import glob
import optparse
import os
import shutil
import signal
import sys
import time
import traceback
import apmrover2
import arducopter
import arduplane
import quadplane
import ardusub
from pysim import util
from pymavlink import mavutil
from pymavlink.generator import mavtemplate
def buildlogs_dirpath():
return os.getenv("BUILDLOGS", util.reltopdir("../buildlogs"))
def buildlogs_path(path):
'''return a string representing path in the buildlogs directory'''
bits = [buildlogs_dirpath()]
if isinstance(path, list):
bits.extend(path)
else:
bits.append(path)
return os.path.join(*bits)
def get_default_params(atype, binary):
"""Get default parameters."""
# use rover simulator so SITL is not starved of input
HOME = mavutil.location(40.071374969556928, -105.22978898137808, 1583.702759, 246)
if "plane" in binary or "rover" in binary:
frame = "rover"
else:
frame = "+"
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sitl = util.start_SITL(binary, wipe=True, model=frame, home=home, speedup=10, unhide_parameters=True)
mavproxy = util.start_MAVProxy_SITL(atype)
print("Dumping defaults")
idx = mavproxy.expect(['Please Run Setup', 'Saved [0-9]+ parameters to (\S+)'])
if idx == 0:
# we need to restart it after eeprom erase
util.pexpect_close(mavproxy)
util.pexpect_close(sitl)
sitl = util.start_SITL(binary, model=frame, home=home, speedup=10)
mavproxy = util.start_MAVProxy_SITL(atype)
idx = mavproxy.expect('Saved [0-9]+ parameters to (\S+)')
parmfile = mavproxy.match.group(1)
dest = buildlogs_path('%s-defaults.parm' % atype)
shutil.copy(parmfile, dest)
util.pexpect_close(mavproxy)
util.pexpect_close(sitl)
print("Saved defaults for %s to %s" % (atype, dest))
return True
def build_all():
"""Run the build_all.sh script."""
print("Running build_all.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_all.sh'), directory=util.reltopdir('.')) != 0:
print("Failed build_all.sh")
return False
return True
def build_binaries():
"""Run the build_binaries.py script."""
print("Running build_binaries.py")
# copy the script as it changes git branch, which can change the script while running
orig = util.reltopdir('Tools/scripts/build_binaries.py')
copy = util.reltopdir('./build_binaries.py')
shutil.copy2(orig, copy)
# also copy generate_manifest library:
orig_gm = util.reltopdir('Tools/scripts/generate_manifest.py')
copy_gm = util.reltopdir('./generate_manifest.py')
shutil.copy2(orig_gm, copy_gm)
if util.run_cmd(copy, directory=util.reltopdir('.')) != 0:
print("Failed build_binaries.py")
return False
return True
def build_devrelease():
"""Run the build_devrelease.sh script."""
print("Running build_devrelease.sh")
# copy the script as it changes git branch, which can change the script while running
orig = util.reltopdir('Tools/scripts/build_devrelease.sh')
copy = util.reltopdir('./build_devrelease.sh')
shutil.copy2(orig, copy)
if util.run_cmd(copy, directory=util.reltopdir('.')) != 0:
print("Failed build_devrelease.sh")
return False
return True
def build_examples():
"""Build examples."""
for target in 'px4-v2', 'navio':
print("Running build.examples for %s" % target)
try:
util.build_examples(target)
except Exception as e:
print("Failed build_examples on board=%s" % target)
print(str(e))
return False
return True
def build_parameters():
"""Run the param_parse.py script."""
print("Running param_parse.py")
if util.run_cmd(util.reltopdir('Tools/autotest/param_metadata/param_parse.py'), directory=util.reltopdir('.')) != 0:
print("Failed param_parse.py")
return False
return True
def convert_gpx():
"""Convert any tlog files to GPX and KML."""
mavlog = glob.glob(buildlogs_path("*.tlog"))
for m in mavlog:
util.run_cmd(util.reltopdir("modules/mavlink/pymavlink/tools/mavtogpx.py") + " --nofixcheck " + m)
gpx = m + '.gpx'
kml = m + '.kml'
util.run_cmd('gpsbabel -i gpx -f %s -o kml,units=m,floating=1,extrude=1 -F %s' % (gpx, kml), checkfail=False)
util.run_cmd('zip %s.kmz %s.kml' % (m, m), checkfail=False)
util.run_cmd("mavflightview.py --imagefile=%s.png %s" % (m, m))
return True
def test_prerequisites():
"""Check we have the right directories and tools to run tests."""
print("Testing prerequisites")
util.mkdir_p(buildlogs_dirpath())
return True
def alarm_handler(signum, frame):
"""Handle test timeout."""
global results, opts
try:
results.add('TIMEOUT', '<span class="failed-text">FAILED</span>', opts.timeout)
util.pexpect_close_all()
convert_gpx()
write_fullresults()
os.killpg(0, signal.SIGKILL)
except Exception:
pass
sys.exit(1)
def should_run_step(step):
"""See if a step should be skipped."""
for skip in skipsteps:
if fnmatch.fnmatch(step.lower(), skip.lower()):
return False
return True
__bin_names = {
"ArduCopter" : "arducopter",
"ArduPlane" : "arduplane",
"APMrover2" : "ardurover",
"AntennaTracker" : "antennatracker",
"CopterAVC" : "arducopter-heli",
"QuadPlane" : "arduplane",
"ArduSub" : "ardusub"
}
def binary_path(step, debug=False):
try:
vehicle = step.split(".")[1]
except Exception:
return None
if vehicle in __bin_names:
binary_name = __bin_names[vehicle]
else:
# cope with builds that don't have a specific binary
return None
if debug:
binary_basedir = "sitl-debug"
else:
binary_basedir = "sitl"
binary = util.reltopdir(os.path.join('build', binary_basedir, 'bin', binary_name))
if not os.path.exists(binary):
if os.path.exists(binary + ".exe"):
binary += ".exe"
else:
raise ValueError("Binary (%s) does not exist" % (binary,))
return binary
def run_step(step):
"""Run one step."""
# remove old logs
util.run_cmd('/bin/rm -f logs/*.BIN logs/LASTLOG.TXT')
if step == "prerequisites":
return test_prerequisites()
build_opts = {
"j": opts.j,
"debug": opts.debug,
"clean": not opts.no_clean,
"configure": not opts.no_configure,
}
if step == 'build.ArduPlane':
return util.build_SITL('bin/arduplane', **build_opts)
if step == 'build.APMrover2':
return util.build_SITL('bin/ardurover', **build_opts)
if step == 'build.ArduCopter':
return util.build_SITL('bin/arducopter', **build_opts)
if step == 'build.AntennaTracker':
return util.build_SITL('bin/antennatracker', **build_opts)
if step == 'build.Helicopter':
return util.build_SITL('bin/arducopter-heli', **build_opts)
if step == 'build.ArduSub':
return util.build_SITL('bin/ardusub', **build_opts)
binary = binary_path(step, debug=opts.debug)
if step.startswith("default"):
vehicle = step[8:]
return get_default_params(vehicle, binary)
fly_opts = {
"viewerip": opts.viewerip,
"use_map": opts.map,
"valgrind": opts.valgrind,
"gdb": opts.gdb,
"gdbserver": opts.gdbserver,
}
if opts.speedup is not None:
fly_opts.speedup = opts.speedup
if step == 'fly.ArduCopter':
return arducopter.fly_ArduCopter(binary, frame=opts.frame, **fly_opts)
if step == 'fly.CopterAVC':
return arducopter.fly_CopterAVC(binary, **fly_opts)
if step == 'fly.ArduPlane':
return arduplane.fly_ArduPlane(binary, **fly_opts)
if step == 'fly.QuadPlane':
return quadplane.fly_QuadPlane(binary, **fly_opts)
if step == 'drive.APMrover2':
return apmrover2.drive_APMrover2(binary, frame=opts.frame, **fly_opts)
if step == 'dive.ArduSub':
return ardusub.dive_ArduSub(binary, **fly_opts)
if step == 'build.All':
return build_all()
if step == 'build.Binaries':
return build_binaries()
if step == 'build.DevRelease':
return build_devrelease()
if step == 'build.Examples':
return build_examples()
if step == 'build.Parameters':
return build_parameters()
if step == 'convertgpx':
return convert_gpx()
raise RuntimeError("Unknown step %s" % step)
class TestResult(object):
"""Test result class."""
def __init__(self, name, result, elapsed):
self.name = name
self.result = result
self.elapsed = "%.1f" % elapsed
class TestFile(object):
"""Test result file."""
def __init__(self, name, fname):
self.name = name
self.fname = fname
class TestResults(object):
"""Test results class."""
def __init__(self):
self.date = time.asctime()
self.githash = util.run_cmd('git rev-parse HEAD', output=True, directory=util.reltopdir('.')).strip()
self.tests = []
self.files = []
self.images = []
def add(self, name, result, elapsed):
"""Add a result."""
self.tests.append(TestResult(name, result, elapsed))
def addfile(self, name, fname):
"""Add a result file."""
self.files.append(TestFile(name, fname))
def addimage(self, name, fname):
"""Add a result image."""
self.images.append(TestFile(name, fname))
def addglob(self, name, pattern):
"""Add a set of files."""
for f in glob.glob(buildlogs_path(pattern)):
self.addfile(name, os.path.basename(f))
def addglobimage(self, name, pattern):
"""Add a set of images."""
for f in glob.glob(buildlogs_path(pattern)):
self.addimage(name, os.path.basename(f))
def write_webresults(results_to_write):
"""Write webpage results."""
t = mavtemplate.MAVTemplate()
for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
html = util.loadfile(h)
f = open(buildlogs_path(os.path.basename(h)), mode='w')
t.write(f, html, results_to_write)
f.close()
for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
shutil.copy(f, buildlogs_path(os.path.basename(f)))
def write_fullresults():
"""Write out full results set."""
global results
results.addglob("Google Earth track", '*.kmz')
results.addfile('Full Logs', 'autotest-output.txt')
results.addglob('DataFlash Log', '*-log.bin')
results.addglob("MAVLink log", '*.tlog')
results.addglob("GPX track", '*.gpx')
# results common to all vehicles:
vehicle_files = [ ('{vehicle} build log', '{vehicle}.txt'),
('{vehicle} code size', '{vehicle}.sizes.txt'),
('{vehicle} stack sizes', '{vehicle}.framesizes.txt'),
('{vehicle} defaults', 'default_params/{vehicle}-defaults.parm'),
('{vehicle} core', '{vehicle}.core'),
('{vehicle} ELF', '{vehicle}.elf'),
]
vehicle_globs = [('{vehicle} log', '{vehicle}-*.BIN'),
]
for vehicle in 'ArduPlane','ArduCopter','APMrover2','AntennaTracker', 'ArduSub':
subs = { 'vehicle': vehicle }
for vehicle_file in vehicle_files:
description = vehicle_file[0].format(**subs)
filename = vehicle_file[1].format(**subs)
results.addfile(description, filename)
for vehicle_glob in vehicle_globs:
description = vehicle_glob[0].format(**subs)
glob = vehicle_glob[1].format(**subs)
results.addglob(description, glob)
results.addglob("CopterAVC log", 'CopterAVC-*.BIN')
results.addfile("CopterAVC core", 'CopterAVC.core')
results.addglob('APM:Libraries documentation', 'docs/libraries/index.html')
results.addglob('APM:Plane documentation', 'docs/ArduPlane/index.html')
results.addglob('APM:Copter documentation', 'docs/ArduCopter/index.html')
results.addglob('APM:Rover documentation', 'docs/APMrover2/index.html')
results.addglob('APM:Sub documentation', 'docs/ArduSub/index.html')
results.addglobimage("Flight Track", '*.png')
write_webresults(results)
def check_logs(step):
"""Check for log files from a step."""
print("check step: ", step)
if step.startswith('fly.'):
vehicle = step[4:]
elif step.startswith('drive.'):
vehicle = step[6:]
else:
return
logs = glob.glob("logs/*.BIN")
for log in logs:
bname = os.path.basename(log)
newname = buildlogs_path("%s-%s" % (vehicle, bname))
print("Renaming %s to %s" % (log, newname))
shutil.move(log, newname)
corefile = "core"
if os.path.exists(corefile):
newname = buildlogs_path("%s.core" % vehicle)
print("Renaming %s to %s" % (corefile, newname))
shutil.move(corefile, newname)
try:
util.run_cmd('/bin/cp build/sitl/bin/* %s' % buildlogs_dirpath(),
directory=util.reltopdir('.'))
except Exception:
print("Unable to save binary")
def run_tests(steps):
"""Run a list of steps."""
global results
passed = True
failed = []
for step in steps:
util.pexpect_close_all()
t1 = time.time()
print(">>>> RUNNING STEP: %s at %s" % (step, time.asctime()))
try:
if run_step(step):
results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1)
print(">>>> PASSED STEP: %s at %s" % (step, time.asctime()))
check_logs(step)
else:
print(">>>> FAILED STEP: %s at %s" % (step, time.asctime()))
passed = False
failed.append(step)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
except Exception as msg:
passed = False
failed.append(step)
print(">>>> FAILED STEP: %s at %s (%s)" % (step, time.asctime(), msg))
traceback.print_exc(file=sys.stdout)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
check_logs(step)
if not passed:
print("FAILED %u tests: %s" % (len(failed), failed))
util.pexpect_close_all()
write_fullresults()
return passed
if __name__ == "__main__":
############## main program #############
os.environ['PYTHONUNBUFFERED'] = '1'
os.putenv('TMPDIR', util.reltopdir('tmp'))
parser = optparse.OptionParser("autotest")
parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)')
parser.add_option("--list", action='store_true', default=False, help='list the available steps')
parser.add_option("--viewerip", default=None, help='IP address to send MAVLink and fg packets to')
parser.add_option("--map", action='store_true', default=False, help='show map')
parser.add_option("--experimental", default=False, action='store_true', help='enable experimental tests')
parser.add_option("--timeout", default=3000, type='int', help='maximum runtime in seconds')
parser.add_option("--speedup", default=None, type='int', help='speedup to run the simulations at')
parser.add_option("--valgrind", default=False, action='store_true', help='run ArduPilot binaries under valgrind')
parser.add_option("--gdb", default=False, action='store_true', help='run ArduPilot binaries under gdb')
parser.add_option("--debug", default=False, action='store_true', help='make built binaries debug binaries')
parser.add_option("-j", default=None, type='int', help='build CPUs')
parser.add_option("--frame", type='string', default=None, help='specify frame type')
parser.add_option("--gdbserver", default=False, action='store_true', help='run ArduPilot binaries under gdbserver')
parser.add_option("--no-clean", default=False, action='store_true', help='do not clean before building', dest="no_clean")
parser.add_option("--no-configure", default=False, action='store_true', help='do not configure before building', dest="no_configure")
opts, args = parser.parse_args()
steps = [
'prerequisites',
'build.All',
'build.Binaries',
# 'build.DevRelease',
'build.Examples',
'build.Parameters',
'build.ArduPlane',
'defaults.ArduPlane',
'fly.ArduPlane',
'fly.QuadPlane',
'build.APMrover2',
'defaults.APMrover2',
'drive.APMrover2',
'build.ArduCopter',
'defaults.ArduCopter',
'fly.ArduCopter',
'build.Helicopter',
'fly.CopterAVC',
'build.AntennaTracker',
'build.ArduSub',
'defaults.ArduSub',
'dive.ArduSub',
'convertgpx',
]
skipsteps = opts.skip.split(',')
# ensure we catch timeouts
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
if opts.list:
for step in steps:
print(step)
sys.exit(0)
util.mkdir_p(buildlogs_dirpath())
lckfile = buildlogs_path('autotest.lck')
print("lckfile=%s" % repr(lckfile))
lck = util.lock_file(lckfile)
if lck is None:
print("autotest is locked - exiting. lckfile=(%s)" % (lckfile,))
sys.exit(0)
atexit.register(util.pexpect_close_all)
if len(args) > 0:
# allow a wildcard list of steps
matched = []
for a in args:
matches = [step for step in steps if fnmatch.fnmatch(step.lower(), a.lower())]
if not len(matches):
print("No steps matched {}".format(a))
sys.exit(1)
matched.extend(matches)
steps = matched
# skip steps according to --skip option:
steps_to_run = [ s for s in steps if should_run_step(s) ]
results = TestResults()
try:
if not run_tests(steps_to_run):
sys.exit(1)
except KeyboardInterrupt:
util.pexpect_close_all()
sys.exit(1)
except Exception:
# make sure we kill off any children
util.pexpect_close_all()
raise
| gpl-3.0 | -860,403,988,771,481,700 | 4,982,660,859,373,442,000 | 31.316434 | 137 | 0.611036 | false |
hainm/scipy | scipy/special/setup.py | 77 | 4626 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import os
import sys
from os.path import join
from distutils.sysconfig import get_python_inc
import numpy
from numpy.distutils.misc_util import get_numpy_include_dirs
try:
from numpy.distutils.misc_util import get_info
except ImportError:
raise ValueError("numpy >= 1.4 is required (detected %s from %s)" %
(numpy.__version__, numpy.__file__))
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info as get_system_info
config = Configuration('special', parent_package, top_path)
define_macros = []
if sys.platform == 'win32':
# define_macros.append(('NOINFINITIES',None))
# define_macros.append(('NONANS',None))
define_macros.append(('_USE_MATH_DEFINES',None))
curdir = os.path.abspath(os.path.dirname(__file__))
inc_dirs = [get_python_inc(), os.path.join(curdir, "c_misc")]
if inc_dirs[0] != get_python_inc(plat_specific=1):
inc_dirs.append(get_python_inc(plat_specific=1))
inc_dirs.insert(0, get_numpy_include_dirs())
# C libraries
c_misc_src = [join('c_misc','*.c')]
c_misc_hdr = [join('c_misc','*.h')]
cephes_src = [join('cephes','*.c')]
cephes_hdr = [join('cephes', '*.h')]
config.add_library('sc_c_misc',sources=c_misc_src,
include_dirs=[curdir] + inc_dirs,
depends=(cephes_hdr + cephes_src
+ c_misc_hdr + cephes_hdr
+ ['*.h']),
macros=define_macros)
config.add_library('sc_cephes',sources=cephes_src,
include_dirs=[curdir] + inc_dirs,
depends=(cephes_hdr + ['*.h']),
macros=define_macros)
# Fortran/C++ libraries
mach_src = [join('mach','*.f')]
amos_src = [join('amos','*.f')]
cdf_src = [join('cdflib','*.f')]
specfun_src = [join('specfun','*.f')]
config.add_library('sc_mach',sources=mach_src,
config_fc={'noopt':(__file__,1)})
config.add_library('sc_amos',sources=amos_src)
config.add_library('sc_cdf',sources=cdf_src)
config.add_library('sc_specfun',sources=specfun_src)
# Extension specfun
config.add_extension('specfun',
sources=['specfun.pyf'],
f2py_options=['--no-wrap-functions'],
depends=specfun_src,
define_macros=[],
libraries=['sc_specfun'])
# Extension _ufuncs
headers = ['*.h', join('c_misc', '*.h'), join('cephes', '*.h')]
ufuncs_src = ['_ufuncs.c', 'sf_error.c', '_logit.c.src',
"amos_wrappers.c", "cdf_wrappers.c", "specfun_wrappers.c"]
ufuncs_dep = (headers + ufuncs_src + amos_src + c_misc_src + cephes_src
+ mach_src + cdf_src + specfun_src)
cfg = dict(get_system_info('lapack_opt'))
cfg.setdefault('include_dirs', []).extend([curdir] + inc_dirs + [numpy.get_include()])
cfg.setdefault('libraries', []).extend(['sc_amos','sc_c_misc','sc_cephes','sc_mach',
'sc_cdf', 'sc_specfun'])
cfg.setdefault('define_macros', []).extend(define_macros)
config.add_extension('_ufuncs',
depends=ufuncs_dep,
sources=ufuncs_src,
extra_info=get_info("npymath"),
**cfg)
# Extension _ufuncs_cxx
ufuncs_cxx_src = ['_ufuncs_cxx.cxx', 'sf_error.c',
'_faddeeva.cxx', 'Faddeeva.cc']
ufuncs_cxx_dep = (headers + ufuncs_cxx_src + cephes_src
+ ['*.hh'])
config.add_extension('_ufuncs_cxx',
sources=ufuncs_cxx_src,
depends=ufuncs_cxx_dep,
include_dirs=[curdir],
define_macros=define_macros,
extra_info=get_info("npymath"))
cfg = dict(get_system_info('lapack_opt'))
config.add_extension('_ellip_harm_2',
sources=['_ellip_harm_2.c', 'sf_error.c',],
**cfg
)
config.add_data_files('tests/*.py')
config.add_data_files('tests/data/README')
config.add_data_files('tests/data/*.npz')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause | -2,027,453,118,592,355,000 | 3,961,477,186,366,028,000 | 39.226087 | 90 | 0.539127 | false |
ergs/transmutagen | transmutagen/origen.py | 1 | 19764 | # PYTHON_ARGCOMPLETE_OK
import argparse
import os
from subprocess import run
import logging
from itertools import combinations
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import use_solver
import tables
from pyne.utils import toggle_warnings
import warnings
toggle_warnings()
warnings.simplefilter('ignore')
import pyne.data
import pyne.material
from pyne.origen22 import (nlbs, write_tape5_irradiation, write_tape4,
parse_tape9, merge_tape9, write_tape9, parse_tape6)
from pyne.material import from_atom_frac
from .util import load_sparse_csr, time_func
from .tape9utils import origen_to_name
from .codegen import CRAM_matrix_exp_lambdify
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
# Change to WARN for less output
logger.setLevel(logging.INFO)
ORIGEN = '/home/origen22/code/o2_therm_linux.exe'
# ORIGEN = '/home/o2prec/o2prec'
decay_TAPE9 = "/home/origen22/libs/decay.lib"
LIBS_DIR = "/home/origen22/libs"
DATA_DIR = os.path.abspath(os.path.join(__file__, os.path.pardir,
os.path.pardir, 'data'))
NUCLIDE_KEYS = ['activation_products', 'actinides', 'fission_products']
def run_origen(origen):
run(origen)
return parse_tape6()
def execute_origen(xs_tape9, time, nuclide, phi, origen, decay_tape9):
xs_tape9 = xs_tape9
if not os.path.isabs(xs_tape9):
xs_tape9 = os.path.join(LIBS_DIR, xs_tape9)
parsed_xs_tape9 = parse_tape9(xs_tape9)
parsed_decay_tape9 = parse_tape9(decay_tape9)
merged_tape9 = merge_tape9([parsed_decay_tape9, parsed_xs_tape9])
# Can set outfile to change directory, but the file name needs to be
# TAPE9.INP.
write_tape9(merged_tape9)
decay_nlb, xsfpy_nlb = nlbs(parsed_xs_tape9)
# Can set outfile, but the file name should be called TAPE5.INP.
write_tape5_irradiation("IRF", time/(60*60*24), phi,
xsfpy_nlb=xsfpy_nlb, cut_off=0, out_table_num=[4, 5],
out_table_nes=[True, False, False])
M = from_atom_frac({nuclide: 1}, mass=1, atoms_per_molecule=1)
write_tape4(M)
# Make pyne use naive atomic mass numbers to match ORIGEN
for i in pyne.data.atomic_mass_map:
pyne.data.atomic_mass_map[i] = float(pyne.nucname.anum(i))
origen_time, data = time_func(run_origen, origen)
logger.info("ORIGEN runtime: %s", origen_time)
return origen_time, data
def load_data(datafile):
import pyne.data
# Make pyne use naive atomic mass numbers to match ORIGEN
for i in pyne.data.atomic_mass_map:
pyne.data.atomic_mass_map[i] = float(pyne.nucname.anum(i))
with open(datafile) as f:
return eval(f.read(), {'array': np.array, 'pyne': pyne})
def origen_to_array(origen_dict, nucs):
new_data = np.zeros((len(nucs), 1))
nuc_to_idx = {v: i for i, v in enumerate(nucs)}
for i in origen_dict:
new_data[nuc_to_idx[origen_to_name(i)]] += origen_dict[i][1]
return new_data
def origen_data_to_array_weighted(ORIGEN_data, nucs, n_fission_fragments=2.004):
# Table 5 is grams
table_5_weights = {}
table_5_nuclide = ORIGEN_data['table_5']['nuclide']
for key in NUCLIDE_KEYS:
table_5_weights[key] = np.sum(origen_to_array(table_5_nuclide[key], nucs), axis=0)
table_5_weights['fission_products'] *= n_fission_fragments
# Table 4 is atom fraction
table_4_nuclide = ORIGEN_data['table_4']['nuclide']
new_data = np.zeros((len(nucs), 1))
for key in NUCLIDE_KEYS:
new_data += table_5_weights[key]*origen_to_array(table_4_nuclide[key], nucs)
return new_data
def origen_data_to_array_atom_fraction(ORIGEN_data, nucs):
# Table 4 is atom fraction
table_4_nuclide = ORIGEN_data['table_4']['nuclide']
new_data = np.zeros((len(nucs), 1))
for key in NUCLIDE_KEYS:
new_data += origen_to_array(table_4_nuclide[key], nucs)
return new_data
def origen_data_to_array_materials(ORIGEN_data, nucs):
material = ORIGEN_data['materials'][1]
new_data = np.zeros((len(nucs), 1))
nuc_to_idx = {v: i for i, v in enumerate(nucs)}
for nuc, atom_frac in material.to_atom_frac().items():
new_data[nuc_to_idx[pyne.nucname.name(nuc)]] = atom_frac
return new_data
def hash_data(vec, library, time, phi, n_fission_fragments):
return hash((tuple(vec.flat), library, time, phi, n_fission_fragments))
def initial_vector(start_nuclide, nucs):
nuc_to_idx = {v: i for i, v in enumerate(nucs)}
return csr_matrix(([1], [[nuc_to_idx[start_nuclide]], [0]]),
shape=(len(nucs), 1)).toarray()
def test_origen_data_sanity(ORIGEN_data):
for table in ['table_4', 'table_5']:
assert table in ORIGEN_data, table
assert 'nuclide' in ORIGEN_data[table]
nuclide = ORIGEN_data['table_4']['nuclide']
# Sanity check
for comb in combinations(NUCLIDE_KEYS, 2):
a, b = comb
for common in set.intersection(set(nuclide[a]), set(nuclide[b])):
array_a, array_b = nuclide[a][common], nuclide[b][common]
assert np.allclose(array_a, 0) \
or np.allclose(array_b, 0)
# or np.allclose(array_a, array_b)
def create_hdf5_table(file, lib, nucs):
nucs_size = len(nucs)
desc_common = [
('hash', np.int64),
('library', 'S8'),
('initial vector', np.float64, (nucs_size, 1)),
('time', np.float64),
('phi', np.float64),
('n_fission_fragments', np.float64),
]
desc_origen = [
('execution time ORIGEN', np.float64),
('ORIGEN atom fraction', np.float64, (nucs_size, 1)),
('ORIGEN mass fraction', np.float64, (nucs_size, 1)),
]
desc_cram_lambdify = [
('execution time CRAM lambdify', np.float64),
('CRAM lambdify atom fraction', np.float64, (nucs_size, 1)),
('CRAM lambdify mass fraction', np.float64, (nucs_size, 1)),
]
desc_cram_py_solve = [
('execution time CRAM py_solve', np.float64),
('CRAM py_solve atom fraction', np.float64, (nucs_size, 1)),
('CRAM py_solve mass fraction', np.float64, (nucs_size, 1)),
]
h5file = tables.open_file(file, mode="a", title="CRAM/ORIGEN test run data", filters=tables.Filters(complevel=1))
h5file.create_group('/', lib, '%s data' % lib)
h5file.create_table('/' + lib, 'origen', np.dtype(desc_common + desc_origen))
h5file.create_table('/' + lib, 'cram-lambdify-umfpack', np.dtype(desc_common + desc_cram_lambdify))
h5file.create_table('/' + lib, 'cram-lambdify-superlu', np.dtype(desc_common + desc_cram_lambdify))
h5file.create_table('/' + lib, 'cram-py_solve', np.dtype(desc_common + desc_cram_py_solve))
h5file.create_array('/' + lib, 'nucs', np.array(nucs, 'S6'))
def save_file_origen(file, *, ORIGEN_data, lib, nucs, start_nuclide, time,
phi, ORIGEN_time, n_fission_fragments=2.004):
with tables.open_file(file, mode="a", title="ORIGEN and CRAM data",
filters=tables.Filters(complevel=1)) as h5file:
if lib not in h5file.root:
create_hdf5_table(file, lib, nucs)
table = h5file.get_node(h5file.root, lib + '/origen')
table.row['initial vector'] = vec = initial_vector(start_nuclide, nucs)
table.row['library'] = lib
table.row['hash'] = hash_data(vec, lib, time, phi, n_fission_fragments)
table.row['time'] = time
table.row['phi'] = phi
table.row['n_fission_fragments'] = n_fission_fragments
table.row['execution time ORIGEN'] = ORIGEN_time
table.row['ORIGEN atom fraction'] = origen_data_to_array_weighted(ORIGEN_data, nucs, n_fission_fragments=n_fission_fragments)
table.row['ORIGEN mass fraction'] = origen_data_to_array_materials(ORIGEN_data, nucs)
table.row.append()
table.flush()
def save_file_cram_lambdify(file, *, CRAM_lambdify_res, lib, nucs, start_nuclide, time,
phi, CRAM_lambdify_time, umfpack, n_fission_fragments=2.004):
assert len(CRAM_lambdify_res) == len(nucs)
with tables.open_file(file, mode="a", title="ORIGEN and CRAM data",
filters=tables.Filters(complevel=1)) as h5file:
if lib not in h5file.root:
create_hdf5_table(file, lib, nucs)
nodename = '/cram-lambdify-umfpack' if umfpack else '/cram-lambdify-superlu'
table = h5file.get_node(h5file.root, lib + nodename)
table.row['initial vector'] = vec = initial_vector(start_nuclide, nucs)
table.row['library'] = lib
table.row['hash'] = hash_data(vec, lib, time, phi, n_fission_fragments)
table.row['time'] = time
table.row['phi'] = phi
table.row['n_fission_fragments'] = n_fission_fragments
table.row['execution time CRAM lambdify'] = CRAM_lambdify_time
table.row['CRAM lambdify atom fraction'] = CRAM_lambdify_res
CRAM_lambdify_res_normalized = CRAM_lambdify_res/np.sum(CRAM_lambdify_res)
table.row['CRAM lambdify mass fraction'] = CRAM_lambdify_res_normalized
table.row.append()
table.flush()
def save_file_cram_py_solve(file, *, CRAM_py_solve_res, lib, nucs, start_nuclide, time,
phi, CRAM_py_solve_time, n_fission_fragments=2.004):
assert len(CRAM_py_solve_res) == len(nucs)
with tables.open_file(file, mode="a", title="ORIGEN and CRAM data",
filters=tables.Filters(complevel=1)) as h5file:
if lib not in h5file.root:
create_hdf5_table(file, lib, nucs)
table = h5file.get_node(h5file.root, lib + '/cram-py_solve')
table.row['initial vector'] = vec = initial_vector(start_nuclide, nucs)
table.row['library'] = lib
table.row['hash'] = hash_data(vec, lib, time, phi, n_fission_fragments)
table.row['time'] = time
table.row['phi'] = phi
table.row['n_fission_fragments'] = n_fission_fragments
table.row['execution time CRAM py_solve'] = CRAM_py_solve_time
table.row['CRAM py_solve atom fraction'] = CRAM_py_solve_res
CRAM_py_solve_res_normalized = CRAM_py_solve_res/np.sum(CRAM_py_solve_res)
table.row['CRAM py_solve mass fraction'] = CRAM_py_solve_res_normalized
table.row.append()
table.flush()
def test_origen_against_CRAM_lambdify(xs_tape9, time, nuclide, phi, umfpack, alpha_as_He4=False):
e_complex = CRAM_matrix_exp_lambdify()
lambdify_desc = 'CRAM lambdify UMFPACK' if umfpack else 'CRAM lambdify SuperLU'
logger.info("Running %s %s at time=%s, nuclide=%s, phi=%s", lambdify_desc,
xs_tape9, time, nuclide, phi)
logger.info('-'*80)
alpha_part = '_alpha_as_He4' if alpha_as_He4 else ''
npzfilename = os.path.join('data', os.path.splitext(os.path.basename(xs_tape9))[0] + '_' +
str(phi) + alpha_part + '.npz')
nucs, mat = load_sparse_csr(npzfilename)
assert mat.shape[1] == len(nucs)
b = initial_vector(nuclide, nucs)
use_solver(useUmfpack=umfpack)
CRAM_lambdify_time, CRAM_lambdify_res = time_func(e_complex, -mat*float(time), b)
CRAM_lambdify_res = np.asarray(CRAM_lambdify_res)
logger.info("%s runtime: %s", lambdify_desc, CRAM_lambdify_time)
return CRAM_lambdify_time, CRAM_lambdify_res
def test_origen_against_CRAM_py_solve(xs_tape9, time, nuclide, phi, alpha_as_He4=False):
from .py_solve import expm_multiply14, asflat, N
logger.info("Running CRAM pysolve %s at time=%s, nuclide=%s, phi=%s", xs_tape9, time, nuclide, phi)
logger.info('-'*80)
alpha_part = '_alpha_as_He4' if alpha_as_He4 else ''
npzfilename = os.path.join('data', os.path.splitext(os.path.basename(xs_tape9))[0] + '_' +
str(phi) + alpha_part + '.npz')
nucs, mat = load_sparse_csr(npzfilename)
assert mat.shape[1] == len(nucs) == N
A = asflat(mat)
b = initial_vector(nuclide, nucs)
b = np.asarray(b, dtype='float64')
CRAM_py_solve_time, CRAM_py_solve_res = time_func(expm_multiply14, -A*float(time), b)
CRAM_py_solve_res = np.asarray(CRAM_py_solve_res)
logger.info("CRAM py_solve runtime: %s", CRAM_py_solve_time)
return CRAM_py_solve_time, CRAM_py_solve_res
def compute_mismatch(ORIGEN_res_weighted, ORIGEN_res_materials, CRAM_lambdify_umfpack_res, CRAM_lambdify_superlu_res, CRAM_py_solve_res, nucs, rtol=1e-3, atol=1e-5):
"""
Computes a mismatch analysis for an ORIGEN run vs. CRAM
The default rtol is 1e-3 because ORIGEN returns 3 digits.
The default atol is 1e-5 because ORIGEN stops the taylor expansion with
the error term exp(ASUM)*ASUM**n/n! (using Sterling's approximation),
where n = 3.5*ASUM + 6 and ASUM is the max of the column sums. The max of
the column sums is ~2 because of fission, giving ~1e-5 (see ORIGEN lines
5075-5100)
"""
CRAM_lambdify_umfpack_res_normalized = CRAM_lambdify_umfpack_res/np.sum(CRAM_lambdify_umfpack_res)
CRAM_lambdify_superlu_res_normalized = CRAM_lambdify_superlu_res/np.sum(CRAM_lambdify_superlu_res)
CRAM_py_solve_res_normalized = CRAM_py_solve_res/np.sum(CRAM_py_solve_res)
for Clumf, Clsuper, Cpy, O, units in [
(CRAM_lambdify_umfpack_res, CRAM_lambdify_superlu_res, CRAM_py_solve_res, ORIGEN_res_weighted, 'atom fractions'),
(CRAM_lambdify_umfpack_res_normalized, CRAM_lambdify_superlu_res_normalized, CRAM_py_solve_res_normalized, ORIGEN_res_materials, 'mass fractions'),
# (CRAM_res_normalized, ORIGEN_res_atom_fraction, 'atom fraction'),
]:
logger.info("Units: %s", units)
d = {'CRAM lambdify UMFPACK': Clumf, 'CRAM lambdify SuperLU': Clsuper, 'CRAM py_solve': Cpy, 'ORIGEN': O}
for a_desc, b_desc in (
['CRAM lambdify UMFPACK', 'CRAM lambdify SuperLU'],
['CRAM lambdify UMFPACK', 'CRAM py_solve'],
['CRAM lambdify SuperLU', 'CRAM py_solve'],
['CRAM lambdify UMFPACK', 'ORIGEN'],
['CRAM lambdify SuperLU', 'ORIGEN'],
['CRAM py_solve', 'ORIGEN'],
):
a, b = d[a_desc], d[b_desc]
mismatching_indices = array_mismatch(a, b, rtol=rtol, atol=atol)
if mismatching_indices:
logger.info("%s and %s mismatch: Not equal to tolerance rtol=%s, atol=%s", a_desc, b_desc, rtol, atol)
logger.info("Mismatching elements sorted by error (%s, %s, symmetric relative error):", a_desc, b_desc)
rel_error = abs(a - b)/(a + b)
for i in mismatching_indices:
logger.info("%s %s %s %s", nucs[i], a[i], b[i], rel_error[i])
else:
logger.info("%s and %s arrays match with rtol=%s atol=%s", a_desc, b_desc, rtol, atol)
logger.info('')
# TODO: return some information here
def array_mismatch(a, b, rtol=1e-3, atol=1e-5):
"""
Test if arrays a and b mismatch with rtol and atol
If they do, return a list of mismatching indices. Otherwise, return False.
"""
mismatching_indices = []
try:
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
except AssertionError as e:
D = np.isclose(a, b, rtol=rtol, atol=atol)
rel_error = abs(a - b)/(a + b)
for i, in np.argsort(rel_error, axis=0)[::-1]:
if D[i]:
continue
mismatching_indices.append(i)
return mismatching_indices
else:
return False
def make_parser():
p = argparse.ArgumentParser('origen', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('xs_tape9', metavar='xs-tape9', help="""path to the cross section TAPE9 file. If
the path is not absolute, defaults to looking in {LIBS_DIR}""".format(LIBS_DIR=LIBS_DIR))
p.add_argument('time', help='the time in sec',
type=float)
p.add_argument('--phi', help='the neutron flux in [n/cm^2/sec]',
type=float, default=4e14)
p.add_argument('--nuclide', help="The initial starting nuclide.",
default="U235")
p.add_argument('--decay-tape9', help="path to the decay TAPE9 file.",
default=decay_TAPE9)
p.add_argument('--origen', help="Path to the origen executable",
default=ORIGEN)
p.add_argument('--no-run-origen', action='store_false', dest='run_origen',
help="Don't run origen")
p.add_argument('--no-run-cram', action='store_false', dest='run_cram',
help="Don't run cram")
p.add_argument('--hdf5-file', default='data/results.hdf5', help="""hdf5 file
to write results to""")
return p
def execute(xs_tape9, time, phi, nuclide, hdf5_file='data/results.hdf5',
decay_tape9=decay_TAPE9, origen=ORIGEN, run_origen=True,
run_cram_lambdify=True, run_cram_py_solve=True, alpha_as_He4=False):
lib = os.path.splitext(os.path.basename(xs_tape9))[0]
alpha_part = '_alpha_as_He4' if alpha_as_He4 else ''
npzfilename = os.path.join('data', lib + '_' + str(phi) + alpha_part + '.npz')
nucs, mat = load_sparse_csr(npzfilename)
if run_origen:
n_fission_fragments = 2.004
ORIGEN_time, ORIGEN_data = execute_origen(xs_tape9, time, nuclide, phi,
origen, decay_tape9)
ORIGEN_res_weighted = origen_data_to_array_weighted(ORIGEN_data, nucs, n_fission_fragments=n_fission_fragments)
ORIGEN_res_materials = origen_data_to_array_materials(ORIGEN_data, nucs)
test_origen_data_sanity(ORIGEN_data)
save_file_origen(hdf5_file,
ORIGEN_data=ORIGEN_data,
lib=lib,
nucs=nucs,
start_nuclide=nuclide,
time=time,
phi=phi,
ORIGEN_time=ORIGEN_time,
n_fission_fragments=n_fission_fragments,
)
if run_cram_lambdify:
try:
import scikits.umfpack
scikits.umfpack
except ImportError:
raise ImportError("scikits.umfpack is required. conda install scikit-umfpack")
umfpack = True
CRAM_lambdify_umfpack_time, CRAM_lambdify_umfpack_res = test_origen_against_CRAM_lambdify(xs_tape9, time, nuclide, phi,
umfpack, alpha_as_He4=alpha_as_He4)
save_file_cram_lambdify(hdf5_file,
CRAM_lambdify_res=CRAM_lambdify_umfpack_res,
lib=lib,
nucs=nucs,
start_nuclide=nuclide,
time=time,
phi=phi,
CRAM_lambdify_time=CRAM_lambdify_umfpack_time,
umfpack=umfpack,
)
umfpack = False
CRAM_lambdify_superlu_time, CRAM_lambdify_superlu_res = test_origen_against_CRAM_lambdify(xs_tape9, time, nuclide, phi,
umfpack, alpha_as_He4=alpha_as_He4)
save_file_cram_lambdify(hdf5_file,
CRAM_lambdify_res=CRAM_lambdify_superlu_res,
lib=lib,
nucs=nucs,
start_nuclide=nuclide,
time=time,
phi=phi,
CRAM_lambdify_time=CRAM_lambdify_superlu_time,
umfpack=umfpack,
)
if run_cram_py_solve:
CRAM_py_solve_time, CRAM_py_solve_res = test_origen_against_CRAM_py_solve(xs_tape9, time, nuclide, phi, alpha_as_He4=alpha_as_He4)
save_file_cram_py_solve(hdf5_file,
CRAM_py_solve_res=CRAM_py_solve_res,
lib=lib,
nucs=nucs,
start_nuclide=nuclide,
time=time,
phi=phi,
CRAM_py_solve_time=CRAM_py_solve_time,
)
if run_origen and run_cram_lambdify and run_cram_py_solve:
compute_mismatch(ORIGEN_res_weighted, ORIGEN_res_materials, CRAM_lambdify_umfpack_res, CRAM_lambdify_superlu_res, CRAM_py_solve_res, nucs)
def main():
p = make_parser()
try:
import argcomplete
argcomplete.autocomplete(p)
except ImportError:
pass
args = p.parse_args()
execute(**vars(args))
if __name__ == '__main__':
main()
| bsd-3-clause | 2,287,004,801,036,502,800 | 1,002,179,048,217,630,500 | 39.666667 | 165 | 0.634183 | false |
artnavsegda/avrnavsegda | xmega/bmp/bmp/src/ASF/common/services/gfx_mono/tools/bitmap.py | 73 | 2301 | ##
# \file
#
# \brief Output a 2 color bitmap as an uint8_t array
#
# Copyright (C) 2011-2014 Atmel Corporation. All rights reserved.
#
# \page License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. The name of Atmel may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# 4. This software may only be redistributed and used in connection with an
# Atmel microcontroller product.
#
# THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
# EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from PIL import Image
import sys
im = Image.open(sys.argv[1]);
new_im = im.load()
width, height = im.size
for y in range(0, height) :
for x in range(0, width) :
if 0 < new_im[x, y]:
new_im[x, y] = 1
sys.stdout.write(str(new_im[x, y]))
sys.stdout.write("\n")
sys.stdout.write("\n uint8_t image_header[] = {\n")
for y in range(0, height, 8) :
for x in range(0, width) :
first_byte = str(new_im[x, y + 7]) + str(new_im[x, y+6]) + str(new_im[x, y+5]) + str(new_im[x, y+4]) + str(new_im[x, y+3]) + str(new_im[x, y+2]) + str(new_im[x, y+1]) + str(new_im[x, y+0])
print "0x%x," % int(first_byte, 2),
sys.stdout.write("};\n")
| lgpl-3.0 | 305,531,238,724,660,700 | -2,851,536,683,101,407,700 | 38.672414 | 190 | 0.724902 | false |
mbareta/edx-platform-ft | lms/djangoapps/courseware/tests/test_module_render.py | 8 | 86350 | # -*- coding: utf-8 -*-
"""
Test for lms courseware app, module render unit
"""
import ddt
import itertools
import json
from nose.plugins.attrib import attr
from functools import partial
from bson import ObjectId
from django.http import Http404, HttpResponse
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.contrib.auth.models import AnonymousUser
from mock import MagicMock, patch, Mock
from opaque_keys.edx.keys import UsageKey, CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from pyquery import PyQuery
from courseware.module_render import hash_resource
from xblock.field_data import FieldData
from xblock.runtime import Runtime
from xblock.fields import ScopeIds
from xblock.core import XBlock, XBlockAside
from xblock.fragment import Fragment
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from course_modes.models import CourseMode
from courseware import module_render as render
from courseware.courses import get_course_with_access, get_course_info_section
from courseware.field_overrides import OverrideFieldData
from courseware.model_data import FieldDataCache
from courseware.module_render import hash_resource, get_module_for_descriptor
from courseware.models import StudentModule
from courseware.tests.factories import StudentModuleFactory, UserFactory, GlobalStaffFactory
from courseware.tests.tests import LoginEnrollmentTestCase
from courseware.tests.test_submitting_problems import TestSubmittingProblems
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from openedx.core.lib.courses import course_image_url
from openedx.core.lib.gating import api as gating_api
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_MIXED_MODULESTORE
)
from xmodule.lti_module import LTIDescriptor
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory, ToyCourseFactory, check_mongo_calls
from xmodule.modulestore.tests.test_asides import AsideTestType
from xmodule.x_module import XModuleDescriptor, XModule, STUDENT_VIEW, CombinedSystem
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.api import (
set_credit_requirements,
set_credit_requirement_status
)
from edx_proctoring.api import (
create_exam,
create_exam_attempt,
update_attempt_status
)
from edx_proctoring.runtime import set_runtime_service
from edx_proctoring.tests.test_services import MockCreditService
from milestones.tests.utils import MilestonesTestCaseMixin
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
@XBlock.needs("field-data")
@XBlock.needs("i18n")
@XBlock.needs("fs")
@XBlock.needs("user")
@XBlock.needs("bookmarks")
class PureXBlock(XBlock):
"""
Pure XBlock to use in tests.
"""
pass
class EmptyXModule(XModule): # pylint: disable=abstract-method
"""
Empty XModule for testing with no dependencies.
"""
pass
class EmptyXModuleDescriptor(XModuleDescriptor): # pylint: disable=abstract-method
"""
Empty XModule for testing with no dependencies.
"""
module_class = EmptyXModule
class GradedStatelessXBlock(XBlock):
"""
This XBlock exists to test grade storage for blocks that don't store
student state in a scoped field.
"""
@XBlock.json_handler
def set_score(self, json_data, suffix): # pylint: disable=unused-argument
"""
Set the score for this testing XBlock.
"""
self.runtime.publish(
self,
'grade',
{
'value': json_data['grade'],
'max_value': 1
}
)
@attr('shard_1')
@ddt.ddt
class ModuleRenderTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests of courseware.module_render
"""
@classmethod
def setUpClass(cls):
super(ModuleRenderTestCase, cls).setUpClass()
cls.course_key = ToyCourseFactory.create().id
cls.toy_course = modulestore().get_course(cls.course_key)
# TODO: this test relies on the specific setup of the toy course.
# It should be rewritten to build the course it needs and then test that.
def setUp(self):
"""
Set up the course and user context
"""
super(ModuleRenderTestCase, self).setUp()
self.mock_user = UserFactory()
self.mock_user.id = 1
self.request_factory = RequestFactory()
# Construct a mock module for the modulestore to return
self.mock_module = MagicMock()
self.mock_module.id = 1
self.dispatch = 'score_update'
# Construct a 'standard' xqueue_callback url
self.callback_url = reverse(
'xqueue_callback',
kwargs=dict(
course_id=self.course_key.to_deprecated_string(),
userid=str(self.mock_user.id),
mod_id=self.mock_module.id,
dispatch=self.dispatch
)
)
def test_get_module(self):
self.assertEqual(
None,
render.get_module('dummyuser', None, 'invalid location', None)
)
def test_module_render_with_jump_to_id(self):
"""
This test validates that the /jump_to_id/<id> shorthand for intracourse linking works assertIn
expected. Note there's a HTML element in the 'toy' course with the url_name 'toyjumpto' which
defines this linkage
"""
mock_request = MagicMock()
mock_request.user = self.mock_user
course = get_course_with_access(self.mock_user, 'load', self.course_key)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key, self.mock_user, course, depth=2)
module = render.get_module(
self.mock_user,
mock_request,
self.course_key.make_usage_key('html', 'toyjumpto'),
field_data_cache,
)
# get the rendered HTML output which should have the rewritten link
html = module.render(STUDENT_VIEW).content
# See if the url got rewritten to the target link
# note if the URL mapping changes then this assertion will break
self.assertIn('/courses/' + self.course_key.to_deprecated_string() + '/jump_to_id/vertical_test', html)
def test_xqueue_callback_success(self):
"""
Test for happy-path xqueue_callback
"""
fake_key = 'fake key'
xqueue_header = json.dumps({'lms_key': fake_key})
data = {
'xqueue_header': xqueue_header,
'xqueue_body': 'hello world',
}
# Patch getmodule to return our mock module
with patch('courseware.module_render.load_single_xblock', return_value=self.mock_module):
# call xqueue_callback with our mocked information
request = self.request_factory.post(self.callback_url, data)
render.xqueue_callback(
request,
unicode(self.course_key),
self.mock_user.id,
self.mock_module.id,
self.dispatch
)
# Verify that handle ajax is called with the correct data
request.POST['queuekey'] = fake_key
self.mock_module.handle_ajax.assert_called_once_with(self.dispatch, request.POST)
def test_xqueue_callback_missing_header_info(self):
data = {
'xqueue_header': '{}',
'xqueue_body': 'hello world',
}
with patch('courseware.module_render.load_single_xblock', return_value=self.mock_module):
# Test with missing xqueue data
with self.assertRaises(Http404):
request = self.request_factory.post(self.callback_url, {})
render.xqueue_callback(
request,
unicode(self.course_key),
self.mock_user.id,
self.mock_module.id,
self.dispatch
)
# Test with missing xqueue_header
with self.assertRaises(Http404):
request = self.request_factory.post(self.callback_url, data)
render.xqueue_callback(
request,
unicode(self.course_key),
self.mock_user.id,
self.mock_module.id,
self.dispatch
)
def test_get_score_bucket(self):
self.assertEquals(render.get_score_bucket(0, 10), 'incorrect')
self.assertEquals(render.get_score_bucket(1, 10), 'partial')
self.assertEquals(render.get_score_bucket(10, 10), 'correct')
# get_score_bucket calls error cases 'incorrect'
self.assertEquals(render.get_score_bucket(11, 10), 'incorrect')
self.assertEquals(render.get_score_bucket(-1, 10), 'incorrect')
def test_anonymous_handle_xblock_callback(self):
dispatch_url = reverse(
'xblock_handler',
args=[
self.course_key.to_deprecated_string(),
quote_slashes(self.course_key.make_usage_key('videosequence', 'Toy_Videos').to_deprecated_string()),
'xmodule_handler',
'goto_position'
]
)
response = self.client.post(dispatch_url, {'position': 2})
self.assertEquals(403, response.status_code)
self.assertEquals('Unauthenticated', response.content)
def test_missing_position_handler(self):
"""
Test that sending POST request without or invalid position argument don't raise server error
"""
self.client.login(username=self.mock_user.username, password="test")
dispatch_url = reverse(
'xblock_handler',
args=[
self.course_key.to_deprecated_string(),
quote_slashes(self.course_key.make_usage_key('videosequence', 'Toy_Videos').to_deprecated_string()),
'xmodule_handler',
'goto_position'
]
)
response = self.client.post(dispatch_url)
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': ''})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': '-1'})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': "string"})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': u"Φυσικά"})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': None})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
@ddt.data('pure', 'vertical')
@XBlock.register_temp_plugin(PureXBlock, identifier='pure')
def test_rebinding_same_user(self, block_type):
request = self.request_factory.get('')
request.user = self.mock_user
course = CourseFactory()
descriptor = ItemFactory(category=block_type, parent=course)
field_data_cache = FieldDataCache([self.toy_course, descriptor], self.toy_course.id, self.mock_user)
# This is verifying that caching doesn't cause an error during get_module_for_descriptor, which
# is why it calls the method twice identically.
render.get_module_for_descriptor(
self.mock_user,
request,
descriptor,
field_data_cache,
self.toy_course.id,
course=self.toy_course
)
render.get_module_for_descriptor(
self.mock_user,
request,
descriptor,
field_data_cache,
self.toy_course.id,
course=self.toy_course
)
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'courseware.student_field_overrides.IndividualStudentOverrideProvider',
))
def test_rebind_different_users(self):
"""
This tests the rebinding a descriptor to a student does not result
in overly nested _field_data.
"""
request = self.request_factory.get('')
request.user = self.mock_user
course = CourseFactory.create()
descriptor = ItemFactory(category='html', parent=course)
field_data_cache = FieldDataCache(
[course, descriptor], course.id, self.mock_user
)
# grab what _field_data was originally set to
original_field_data = descriptor._field_data # pylint: disable=protected-access, no-member
render.get_module_for_descriptor(
self.mock_user, request, descriptor, field_data_cache, course.id, course=course
)
# check that _unwrapped_field_data is the same as the original
# _field_data, but now _field_data as been reset.
# pylint: disable=protected-access, no-member
self.assertIs(descriptor._unwrapped_field_data, original_field_data)
self.assertIsNot(descriptor._unwrapped_field_data, descriptor._field_data)
# now bind this module to a few other students
for user in [UserFactory(), UserFactory(), UserFactory()]:
render.get_module_for_descriptor(
user,
request,
descriptor,
field_data_cache,
course.id,
course=course
)
# _field_data should now be wrapped by LmsFieldData
# pylint: disable=protected-access, no-member
self.assertIsInstance(descriptor._field_data, LmsFieldData)
# the LmsFieldData should now wrap OverrideFieldData
self.assertIsInstance(
# pylint: disable=protected-access, no-member
descriptor._field_data._authored_data._source,
OverrideFieldData
)
# the OverrideFieldData should point to the original unwrapped field_data
self.assertIs(
# pylint: disable=protected-access, no-member
descriptor._field_data._authored_data._source.fallback,
descriptor._unwrapped_field_data
)
def test_hash_resource(self):
"""
Ensure that the resource hasher works and does not fail on unicode,
decoded or otherwise.
"""
resources = ['ASCII text', u'❄ I am a special snowflake.', "❄ So am I, but I didn't tell you."]
self.assertEqual(hash_resource(resources), 'a76e27c8e80ca3efd7ce743093aa59e0')
@attr('shard_1')
class TestHandleXBlockCallback(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the handle_xblock_callback function
"""
@classmethod
def setUpClass(cls):
super(TestHandleXBlockCallback, cls).setUpClass()
cls.course_key = ToyCourseFactory.create().id
cls.toy_course = modulestore().get_course(cls.course_key)
def setUp(self):
super(TestHandleXBlockCallback, self).setUp()
self.location = self.course_key.make_usage_key('chapter', 'Overview')
self.mock_user = UserFactory.create()
self.request_factory = RequestFactory()
# Construct a mock module for the modulestore to return
self.mock_module = MagicMock()
self.mock_module.id = 1
self.dispatch = 'score_update'
# Construct a 'standard' xqueue_callback url
self.callback_url = reverse(
'xqueue_callback', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'userid': str(self.mock_user.id),
'mod_id': self.mock_module.id,
'dispatch': self.dispatch
}
)
def _mock_file(self, name='file', size=10):
"""Create a mock file object for testing uploads"""
mock_file = MagicMock(
size=size,
read=lambda: 'x' * size
)
# We can't use `name` as a kwarg to Mock to set the name attribute
# because mock uses `name` to name the mock itself
mock_file.name = name
return mock_file
def test_invalid_location(self):
request = self.request_factory.post('dummy_url', data={'position': 1})
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
'invalid Location',
'dummy_handler'
'dummy_dispatch'
)
def test_too_many_files(self):
request = self.request_factory.post(
'dummy_url',
data={'file_id': (self._mock_file(), ) * (settings.MAX_FILEUPLOADS_PER_INPUT + 1)}
)
request.user = self.mock_user
self.assertEquals(
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'dummy_handler'
).content,
json.dumps({
'success': 'Submission aborted! Maximum %d files may be submitted at once' %
settings.MAX_FILEUPLOADS_PER_INPUT
}, indent=2)
)
def test_too_large_file(self):
inputfile = self._mock_file(size=1 + settings.STUDENT_FILEUPLOAD_MAX_SIZE)
request = self.request_factory.post(
'dummy_url',
data={'file_id': inputfile}
)
request.user = self.mock_user
self.assertEquals(
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'dummy_handler'
).content,
json.dumps({
'success': 'Submission aborted! Your file "%s" is too large (max size: %d MB)' %
(inputfile.name, settings.STUDENT_FILEUPLOAD_MAX_SIZE / (1000 ** 2))
}, indent=2)
)
def test_xmodule_dispatch(self):
request = self.request_factory.post('dummy_url', data={'position': 1})
request.user = self.mock_user
response = render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'xmodule_handler',
'goto_position',
)
self.assertIsInstance(response, HttpResponse)
def test_bad_course_id(self):
request = self.request_factory.post('dummy_url')
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
'bad_course_id',
quote_slashes(self.location.to_deprecated_string()),
'xmodule_handler',
'goto_position',
)
def test_bad_location(self):
request = self.request_factory.post('dummy_url')
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.course_key.make_usage_key('chapter', 'bad_location').to_deprecated_string()),
'xmodule_handler',
'goto_position',
)
def test_bad_xmodule_dispatch(self):
request = self.request_factory.post('dummy_url')
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'xmodule_handler',
'bad_dispatch',
)
def test_missing_handler(self):
request = self.request_factory.post('dummy_url')
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'bad_handler',
'bad_dispatch',
)
@XBlock.register_temp_plugin(GradedStatelessXBlock, identifier='stateless_scorer')
def test_score_without_student_state(self):
course = CourseFactory.create()
block = ItemFactory.create(category='stateless_scorer', parent=course)
request = self.request_factory.post(
'dummy_url',
data=json.dumps({"grade": 0.75}),
content_type='application/json'
)
request.user = self.mock_user
response = render.handle_xblock_callback(
request,
unicode(course.id),
quote_slashes(unicode(block.scope_ids.usage_id)),
'set_score',
'',
)
self.assertEquals(response.status_code, 200)
student_module = StudentModule.objects.get(
student=self.mock_user,
module_state_key=block.scope_ids.usage_id,
)
self.assertEquals(student_module.grade, 0.75)
self.assertEquals(student_module.max_grade, 1)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_XBLOCK_VIEW_ENDPOINT': True})
def test_xblock_view_handler(self):
args = [
'edX/toy/2012_Fall',
quote_slashes('i4x://edX/toy/videosequence/Toy_Videos'),
'student_view'
]
xblock_view_url = reverse(
'xblock_view',
args=args
)
request = self.request_factory.get(xblock_view_url)
request.user = self.mock_user
response = render.xblock_view(request, *args)
self.assertEquals(200, response.status_code)
expected = ['csrf_token', 'html', 'resources']
content = json.loads(response.content)
for section in expected:
self.assertIn(section, content)
doc = PyQuery(content['html'])
self.assertEquals(len(doc('div.xblock-student_view-videosequence')), 1)
@attr('shard_1')
@ddt.ddt
class TestTOC(ModuleStoreTestCase):
"""Check the Table of Contents for a course"""
def setup_request_and_course(self, num_finds, num_sends):
"""
Sets up the toy course in the modulestore and the request object.
"""
self.course_key = ToyCourseFactory.create().id # pylint: disable=attribute-defined-outside-init
self.chapter = 'Overview'
chapter_url = '%s/%s/%s' % ('/courses', self.course_key, self.chapter)
factory = RequestFactory()
self.request = factory.get(chapter_url)
self.request.user = UserFactory()
self.modulestore = self.store._get_modulestore_for_courselike(self.course_key) # pylint: disable=protected-access, attribute-defined-outside-init
with self.modulestore.bulk_operations(self.course_key):
with check_mongo_calls(num_finds, num_sends):
self.toy_course = self.store.get_course(self.course_key, depth=2) # pylint: disable=attribute-defined-outside-init
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key, self.request.user, self.toy_course, depth=2
)
# Mongo makes 3 queries to load the course to depth 2:
# - 1 for the course
# - 1 for its children
# - 1 for its grandchildren
# Split makes 6 queries to load the course to depth 2:
# - load the structure
# - load 5 definitions
# Split makes 5 queries to render the toc:
# - it loads the active version at the start of the bulk operation
# - it loads 4 definitions, because it instantiates 4 VideoModules
# each of which access a Scope.content field in __init__
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0, 0), (ModuleStoreEnum.Type.split, 6, 0, 5))
@ddt.unpack
def test_toc_toy_from_chapter(self, default_ms, setup_finds, setup_sends, toc_finds):
with self.store.default_store(default_ms):
self.setup_request_and_course(setup_finds, setup_sends)
expected = ([{'active': True, 'sections':
[{'url_name': 'Toy_Videos', 'display_name': u'Toy Videos', 'graded': True,
'format': u'Lecture Sequence', 'due': None, 'active': False},
{'url_name': 'Welcome', 'display_name': u'Welcome', 'graded': True,
'format': '', 'due': None, 'active': False},
{'url_name': 'video_123456789012', 'display_name': 'Test Video', 'graded': True,
'format': '', 'due': None, 'active': False},
{'url_name': 'video_4f66f493ac8f', 'display_name': 'Video', 'graded': True,
'format': '', 'due': None, 'active': False}],
'url_name': 'Overview', 'display_name': u'Overview', 'display_id': u'overview'},
{'active': False, 'sections':
[{'url_name': 'toyvideo', 'display_name': 'toyvideo', 'graded': True,
'format': '', 'due': None, 'active': False}],
'url_name': 'secret:magic', 'display_name': 'secret:magic', 'display_id': 'secretmagic'}])
course = self.store.get_course(self.toy_course.id, depth=2)
with check_mongo_calls(toc_finds):
actual = render.toc_for_course(
self.request.user, self.request, course, self.chapter, None, self.field_data_cache
)
for toc_section in expected:
self.assertIn(toc_section, actual['chapters'])
self.assertIsNone(actual['previous_of_active_section'])
self.assertIsNone(actual['next_of_active_section'])
# Mongo makes 3 queries to load the course to depth 2:
# - 1 for the course
# - 1 for its children
# - 1 for its grandchildren
# Split makes 6 queries to load the course to depth 2:
# - load the structure
# - load 5 definitions
# Split makes 5 queries to render the toc:
# - it loads the active version at the start of the bulk operation
# - it loads 4 definitions, because it instantiates 4 VideoModules
# each of which access a Scope.content field in __init__
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0, 0), (ModuleStoreEnum.Type.split, 6, 0, 5))
@ddt.unpack
def test_toc_toy_from_section(self, default_ms, setup_finds, setup_sends, toc_finds):
with self.store.default_store(default_ms):
self.setup_request_and_course(setup_finds, setup_sends)
section = 'Welcome'
expected = ([{'active': True, 'sections':
[{'url_name': 'Toy_Videos', 'display_name': u'Toy Videos', 'graded': True,
'format': u'Lecture Sequence', 'due': None, 'active': False},
{'url_name': 'Welcome', 'display_name': u'Welcome', 'graded': True,
'format': '', 'due': None, 'active': True},
{'url_name': 'video_123456789012', 'display_name': 'Test Video', 'graded': True,
'format': '', 'due': None, 'active': False},
{'url_name': 'video_4f66f493ac8f', 'display_name': 'Video', 'graded': True,
'format': '', 'due': None, 'active': False}],
'url_name': 'Overview', 'display_name': u'Overview', 'display_id': u'overview'},
{'active': False, 'sections':
[{'url_name': 'toyvideo', 'display_name': 'toyvideo', 'graded': True,
'format': '', 'due': None, 'active': False}],
'url_name': 'secret:magic', 'display_name': 'secret:magic', 'display_id': 'secretmagic'}])
with check_mongo_calls(toc_finds):
actual = render.toc_for_course(
self.request.user, self.request, self.toy_course, self.chapter, section, self.field_data_cache
)
for toc_section in expected:
self.assertIn(toc_section, actual['chapters'])
self.assertEquals(actual['previous_of_active_section']['url_name'], 'Toy_Videos')
self.assertEquals(actual['next_of_active_section']['url_name'], 'video_123456789012')
@attr('shard_1')
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_SPECIAL_EXAMS': True})
class TestProctoringRendering(SharedModuleStoreTestCase):
@classmethod
def setUpClass(cls):
super(TestProctoringRendering, cls).setUpClass()
cls.course_key = ToyCourseFactory.create().id
"""Check the Table of Contents for a course"""
def setUp(self):
"""
Set up the initial mongo datastores
"""
super(TestProctoringRendering, self).setUp()
self.chapter = 'Overview'
chapter_url = '%s/%s/%s' % ('/courses', self.course_key, self.chapter)
factory = RequestFactory()
self.request = factory.get(chapter_url)
self.request.user = UserFactory.create()
self.user = UserFactory.create()
self.modulestore = self.store._get_modulestore_for_courselike(self.course_key) # pylint: disable=protected-access
with self.modulestore.bulk_operations(self.course_key):
self.toy_course = self.store.get_course(self.course_key, depth=2)
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key, self.request.user, self.toy_course, depth=2
)
@ddt.data(
(CourseMode.DEFAULT_MODE_SLUG, False, None, None),
(
CourseMode.DEFAULT_MODE_SLUG,
True,
'eligible',
{
'status': 'eligible',
'short_description': 'Ungraded Practice Exam',
'suggested_icon': '',
'in_completed_state': False
}
),
(
CourseMode.DEFAULT_MODE_SLUG,
True,
'submitted',
{
'status': 'submitted',
'short_description': 'Practice Exam Completed',
'suggested_icon': 'fa-check',
'in_completed_state': True
}
),
(
CourseMode.DEFAULT_MODE_SLUG,
True,
'error',
{
'status': 'error',
'short_description': 'Practice Exam Failed',
'suggested_icon': 'fa-exclamation-triangle',
'in_completed_state': True
}
),
(
CourseMode.VERIFIED,
False,
None,
{
'status': 'eligible',
'short_description': 'Proctored Option Available',
'suggested_icon': 'fa-pencil-square-o',
'in_completed_state': False
}
),
(
CourseMode.VERIFIED,
False,
'declined',
{
'status': 'declined',
'short_description': 'Taking As Open Exam',
'suggested_icon': 'fa-pencil-square-o',
'in_completed_state': False
}
),
(
CourseMode.VERIFIED,
False,
'submitted',
{
'status': 'submitted',
'short_description': 'Pending Session Review',
'suggested_icon': 'fa-spinner fa-spin',
'in_completed_state': True
}
),
(
CourseMode.VERIFIED,
False,
'verified',
{
'status': 'verified',
'short_description': 'Passed Proctoring',
'suggested_icon': 'fa-check',
'in_completed_state': True
}
),
(
CourseMode.VERIFIED,
False,
'rejected',
{
'status': 'rejected',
'short_description': 'Failed Proctoring',
'suggested_icon': 'fa-exclamation-triangle',
'in_completed_state': True
}
),
(
CourseMode.VERIFIED,
False,
'error',
{
'status': 'error',
'short_description': 'Failed Proctoring',
'suggested_icon': 'fa-exclamation-triangle',
'in_completed_state': True
}
),
)
@ddt.unpack
def test_proctored_exam_toc(self, enrollment_mode, is_practice_exam,
attempt_status, expected):
"""
Generate TOC for a course with a single chapter/sequence which contains proctored exam
"""
self._setup_test_data(enrollment_mode, is_practice_exam, attempt_status)
actual = render.toc_for_course(
self.request.user,
self.request,
self.toy_course,
self.chapter,
'Toy_Videos',
self.field_data_cache
)
section_actual = self._find_section(actual['chapters'], 'Overview', 'Toy_Videos')
if expected:
self.assertIn(expected, [section_actual['proctoring']])
else:
# we expect there not to be a 'proctoring' key in the dict
self.assertNotIn('proctoring', section_actual)
self.assertIsNone(actual['previous_of_active_section'])
self.assertEquals(actual['next_of_active_section']['url_name'], u"Welcome")
@ddt.data(
(
CourseMode.DEFAULT_MODE_SLUG,
True,
None,
'Try a proctored exam',
True
),
(
CourseMode.DEFAULT_MODE_SLUG,
True,
'submitted',
'You have submitted this practice proctored exam',
False
),
(
CourseMode.DEFAULT_MODE_SLUG,
True,
'error',
'There was a problem with your practice proctoring session',
True
),
(
CourseMode.VERIFIED,
False,
None,
'This exam is proctored',
False
),
(
CourseMode.VERIFIED,
False,
'submitted',
'You have submitted this proctored exam for review',
True
),
(
CourseMode.VERIFIED,
False,
'verified',
'Your proctoring session was reviewed and passed all requirements',
False
),
(
CourseMode.VERIFIED,
False,
'rejected',
'Your proctoring session was reviewed and did not pass requirements',
True
),
(
CourseMode.VERIFIED,
False,
'error',
'There was a problem with your proctoring session',
False
),
)
@ddt.unpack
def test_render_proctored_exam(self, enrollment_mode, is_practice_exam,
attempt_status, expected, with_credit_context):
"""
Verifies gated content from the student view rendering of a sequence
this is labeled as a proctored exam
"""
usage_key = self._setup_test_data(enrollment_mode, is_practice_exam, attempt_status)
# initialize some credit requirements, if so then specify
if with_credit_context:
credit_course = CreditCourse(course_key=self.course_key, enabled=True)
credit_course.save()
set_credit_requirements(
self.course_key,
[
{
'namespace': 'reverification',
'name': 'reverification-1',
'display_name': 'ICRV1',
'criteria': {},
},
{
'namespace': 'proctored-exam',
'name': 'Exam1',
'display_name': 'A Proctored Exam',
'criteria': {}
}
]
)
set_credit_requirement_status(
self.request.user.username,
self.course_key,
'reverification',
'ICRV1'
)
module = render.get_module(
self.request.user,
self.request,
usage_key,
self.field_data_cache,
wrap_xmodule_display=True,
)
content = module.render(STUDENT_VIEW).content
self.assertIn(expected, content)
def _setup_test_data(self, enrollment_mode, is_practice_exam, attempt_status):
"""
Helper method to consolidate some courseware/proctoring/credit
test harness data
"""
usage_key = self.course_key.make_usage_key('videosequence', 'Toy_Videos')
sequence = self.modulestore.get_item(usage_key)
sequence.is_time_limited = True
sequence.is_proctored_exam = True
sequence.is_practice_exam = is_practice_exam
self.modulestore.update_item(sequence, self.user.id)
self.toy_course = self.modulestore.get_course(self.course_key)
# refresh cache after update
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key, self.request.user, self.toy_course, depth=2
)
set_runtime_service(
'credit',
MockCreditService(enrollment_mode=enrollment_mode)
)
exam_id = create_exam(
course_id=unicode(self.course_key),
content_id=unicode(sequence.location),
exam_name='foo',
time_limit_mins=10,
is_proctored=True,
is_practice_exam=is_practice_exam
)
if attempt_status:
create_exam_attempt(exam_id, self.request.user.id, taking_as_proctored=True)
update_attempt_status(exam_id, self.request.user.id, attempt_status)
return usage_key
def _find_url_name(self, toc, url_name):
"""
Helper to return the dict TOC section associated with a Chapter of url_name
"""
for entry in toc:
if entry['url_name'] == url_name:
return entry
return None
def _find_section(self, toc, chapter_url_name, section_url_name):
"""
Helper to return the dict TOC section associated with a section of url_name
"""
chapter = self._find_url_name(toc, chapter_url_name)
if chapter:
return self._find_url_name(chapter['sections'], section_url_name)
return None
@attr('shard_1')
class TestGatedSubsectionRendering(SharedModuleStoreTestCase, MilestonesTestCaseMixin):
@classmethod
def setUpClass(cls):
super(TestGatedSubsectionRendering, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course.enable_subsection_gating = True
cls.course.save()
cls.store.update_item(cls.course, 0)
"""
Test the toc for a course is rendered correctly when there is gated content
"""
def setUp(self):
"""
Set up the initial test data
"""
super(TestGatedSubsectionRendering, self).setUp()
self.chapter = ItemFactory.create(
parent=self.course,
category="chapter",
display_name="Chapter"
)
self.open_seq = ItemFactory.create(
parent=self.chapter,
category='sequential',
display_name="Open Sequential"
)
self.gated_seq = ItemFactory.create(
parent=self.chapter,
category='sequential',
display_name="Gated Sequential"
)
self.request = RequestFactory().get('%s/%s/%s' % ('/courses', self.course.id, self.chapter.display_name))
self.request.user = UserFactory()
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id, self.request.user, self.course, depth=2
)
gating_api.add_prerequisite(self.course.id, self.open_seq.location)
gating_api.set_required_content(self.course.id, self.gated_seq.location, self.open_seq.location, 100)
def _find_url_name(self, toc, url_name):
"""
Helper to return the TOC section associated with url_name
"""
for entry in toc:
if entry['url_name'] == url_name:
return entry
return None
def _find_sequential(self, toc, chapter_url_name, sequential_url_name):
"""
Helper to return the sequential associated with sequential_url_name
"""
chapter = self._find_url_name(toc, chapter_url_name)
if chapter:
return self._find_url_name(chapter['sections'], sequential_url_name)
return None
def test_toc_with_gated_sequential(self):
"""
Test generation of TOC for a course with a gated subsection
"""
actual = render.toc_for_course(
self.request.user,
self.request,
self.course,
self.chapter.display_name,
self.open_seq.display_name,
self.field_data_cache
)
self.assertIsNotNone(self._find_sequential(actual['chapters'], 'Chapter', 'Open_Sequential'))
self.assertIsNone(self._find_sequential(actual['chapters'], 'Chapter', 'Gated_Sequential'))
self.assertIsNone(self._find_sequential(actual['chapters'], 'Non-existent_Chapter', 'Non-existent_Sequential'))
self.assertIsNone(actual['previous_of_active_section'])
self.assertIsNone(actual['next_of_active_section'])
@attr('shard_1')
@ddt.ddt
class TestHtmlModifiers(ModuleStoreTestCase):
"""
Tests to verify that standard modifications to the output of XModule/XBlock
student_view are taking place
"""
def setUp(self):
super(TestHtmlModifiers, self).setUp()
self.course = CourseFactory.create()
self.request = RequestFactory().get('/')
self.request.user = self.user
self.request.session = {}
self.content_string = '<p>This is the content<p>'
self.rewrite_link = '<a href="/static/foo/content">Test rewrite</a>'
self.rewrite_bad_link = '<img src="/static//file.jpg" />'
self.course_link = '<a href="/course/bar/content">Test course rewrite</a>'
self.descriptor = ItemFactory.create(
category='html',
data=self.content_string + self.rewrite_link + self.rewrite_bad_link + self.course_link
)
self.location = self.descriptor.location
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id,
self.user,
self.descriptor
)
def test_xmodule_display_wrapper_enabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
wrap_xmodule_display=True,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertEquals(len(PyQuery(result_fragment.content)('div.xblock.xblock-student_view.xmodule_HtmlModule')), 1)
def test_xmodule_display_wrapper_disabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
wrap_xmodule_display=False,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertNotIn('div class="xblock xblock-student_view xmodule_display xmodule_HtmlModule"', result_fragment.content)
def test_static_link_rewrite(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn(
'/c4x/{org}/{course}/asset/foo_content'.format(
org=self.course.location.org,
course=self.course.location.course,
),
result_fragment.content
)
def test_static_badlink_rewrite(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn(
'/c4x/{org}/{course}/asset/file.jpg'.format(
org=self.course.location.org,
course=self.course.location.course,
),
result_fragment.content
)
def test_static_asset_path_use(self):
'''
when a course is loaded with do_import_static=False (see xml_importer.py), then
static_asset_path is set as an lms kv in course. That should make static paths
not be mangled (ie not changed to c4x://).
'''
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
static_asset_path="toy_course_dir",
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn('href="/static/toy_course_dir', result_fragment.content)
def test_course_image(self):
url = course_image_url(self.course)
self.assertTrue(url.startswith('/c4x/'))
self.course.static_asset_path = "toy_course_dir"
url = course_image_url(self.course)
self.assertTrue(url.startswith('/static/toy_course_dir/'))
self.course.static_asset_path = ""
@override_settings(DEFAULT_COURSE_ABOUT_IMAGE_URL='test.png')
@override_settings(STATIC_URL='static/')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_image_for_split_course(self, store):
"""
for split courses if course_image is empty then course_image_url will be
the default image url defined in settings
"""
self.course = CourseFactory.create(default_store=store)
self.course.course_image = ''
url = course_image_url(self.course)
self.assertEqual('static/test.png', url)
def test_get_course_info_section(self):
self.course.static_asset_path = "toy_course_dir"
get_course_info_section(self.request, self.request.user, self.course, "handouts")
# NOTE: check handouts output...right now test course seems to have no such content
# at least this makes sure get_course_info_section returns without exception
def test_course_link_rewrite(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn(
'/courses/{course_id}/bar/content'.format(
course_id=self.course.id.to_deprecated_string()
),
result_fragment.content
)
class XBlockWithJsonInitData(XBlock):
"""
Pure XBlock to use in tests, with JSON init data.
"""
the_json_data = None
def student_view(self, context=None): # pylint: disable=unused-argument
"""
A simple view that returns just enough to test.
"""
frag = Fragment(u"Hello there!")
frag.add_javascript(u'alert("Hi!");')
frag.initialize_js('ThumbsBlock', self.the_json_data)
return frag
@attr('shard_1')
@ddt.ddt
class JsonInitDataTest(ModuleStoreTestCase):
"""Tests for JSON data injected into the JS init function."""
@ddt.data(
({'a': 17}, '''{"a": 17}'''),
({'xss': '</script>alert("XSS")'}, r'''{"xss": "<\/script>alert(\"XSS\")"}'''),
)
@ddt.unpack
@XBlock.register_temp_plugin(XBlockWithJsonInitData, identifier='withjson')
def test_json_init_data(self, json_data, json_output):
XBlockWithJsonInitData.the_json_data = json_data
mock_user = UserFactory()
mock_request = MagicMock()
mock_request.user = mock_user
course = CourseFactory()
descriptor = ItemFactory(category='withjson', parent=course)
field_data_cache = FieldDataCache([course, descriptor], course.id, mock_user) # pylint: disable=no-member
module = render.get_module_for_descriptor(
mock_user,
mock_request,
descriptor,
field_data_cache,
course.id, # pylint: disable=no-member
course=course
)
html = module.render(STUDENT_VIEW).content
self.assertIn(json_output, html)
# No matter what data goes in, there should only be one close-script tag.
self.assertEqual(html.count("</script>"), 1)
class ViewInStudioTest(ModuleStoreTestCase):
"""Tests for the 'View in Studio' link visiblity."""
def setUp(self):
""" Set up the user and request that will be used. """
super(ViewInStudioTest, self).setUp()
self.staff_user = GlobalStaffFactory.create()
self.request = RequestFactory().get('/')
self.request.user = self.staff_user
self.request.session = {}
self.module = None
self.default_context = {'bookmarked': False, 'username': self.user.username}
def _get_module(self, course_id, descriptor, location):
"""
Get the module from the course from which to pattern match (or not) the 'View in Studio' buttons
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id,
self.staff_user,
descriptor
)
return render.get_module(
self.staff_user,
self.request,
location,
field_data_cache,
)
def setup_mongo_course(self, course_edit_method='Studio'):
""" Create a mongo backed course. """
course = CourseFactory.create(
course_edit_method=course_edit_method
)
descriptor = ItemFactory.create(
category='vertical',
parent_location=course.location,
)
child_descriptor = ItemFactory.create(
category='vertical',
parent_location=descriptor.location
)
self.module = self._get_module(course.id, descriptor, descriptor.location)
# pylint: disable=attribute-defined-outside-init
self.child_module = self._get_module(course.id, child_descriptor, child_descriptor.location)
@attr('shard_1')
class MongoViewInStudioTest(ViewInStudioTest):
"""Test the 'View in Studio' link visibility in a mongo backed course."""
def test_view_in_studio_link_studio_course(self):
"""Regular Studio courses should see 'View in Studio' links."""
self.setup_mongo_course()
result_fragment = self.module.render(STUDENT_VIEW, context=self.default_context)
self.assertIn('View Unit in Studio', result_fragment.content)
def test_view_in_studio_link_only_in_top_level_vertical(self):
"""Regular Studio courses should not see 'View in Studio' for child verticals of verticals."""
self.setup_mongo_course()
# Render the parent vertical, then check that there is only a single "View Unit in Studio" link.
result_fragment = self.module.render(STUDENT_VIEW, context=self.default_context)
# The single "View Unit in Studio" link should appear before the first xmodule vertical definition.
parts = result_fragment.content.split('data-block-type="vertical"')
self.assertEqual(3, len(parts), "Did not find two vertical blocks")
self.assertIn('View Unit in Studio', parts[0])
self.assertNotIn('View Unit in Studio', parts[1])
self.assertNotIn('View Unit in Studio', parts[2])
def test_view_in_studio_link_xml_authored(self):
"""Courses that change 'course_edit_method' setting can hide 'View in Studio' links."""
self.setup_mongo_course(course_edit_method='XML')
result_fragment = self.module.render(STUDENT_VIEW, context=self.default_context)
self.assertNotIn('View Unit in Studio', result_fragment.content)
@attr('shard_1')
class MixedViewInStudioTest(ViewInStudioTest):
"""Test the 'View in Studio' link visibility in a mixed mongo backed course."""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def test_view_in_studio_link_mongo_backed(self):
"""Mixed mongo courses that are mongo backed should see 'View in Studio' links."""
self.setup_mongo_course()
result_fragment = self.module.render(STUDENT_VIEW, context=self.default_context)
self.assertIn('View Unit in Studio', result_fragment.content)
def test_view_in_studio_link_xml_authored(self):
"""Courses that change 'course_edit_method' setting can hide 'View in Studio' links."""
self.setup_mongo_course(course_edit_method='XML')
result_fragment = self.module.render(STUDENT_VIEW, context=self.default_context)
self.assertNotIn('View Unit in Studio', result_fragment.content)
@XBlock.tag("detached")
class DetachedXBlock(XBlock):
"""
XBlock marked with the 'detached' flag.
"""
def student_view(self, context=None): # pylint: disable=unused-argument
"""
A simple view that returns just enough to test.
"""
frag = Fragment(u"Hello there!")
return frag
@attr('shard_1')
@patch.dict('django.conf.settings.FEATURES', {'DISPLAY_DEBUG_INFO_TO_STAFF': True, 'DISPLAY_HISTOGRAMS_TO_STAFF': True})
@patch('courseware.module_render.has_access', Mock(return_value=True, autospec=True))
class TestStaffDebugInfo(SharedModuleStoreTestCase):
"""Tests to verify that Staff Debug Info panel and histograms are displayed to staff."""
@classmethod
def setUpClass(cls):
super(TestStaffDebugInfo, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestStaffDebugInfo, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get('/')
self.request.user = self.user
self.request.session = {}
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
self.descriptor = ItemFactory.create(
category='problem',
data=problem_xml,
display_name='Option Response Problem'
)
self.location = self.descriptor.location
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id,
self.user,
self.descriptor
)
@patch.dict('django.conf.settings.FEATURES', {'DISPLAY_DEBUG_INFO_TO_STAFF': False})
def test_staff_debug_info_disabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertNotIn('Staff Debug', result_fragment.content)
def test_staff_debug_info_enabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn('Staff Debug', result_fragment.content)
@XBlock.register_temp_plugin(DetachedXBlock, identifier='detached-block')
def test_staff_debug_info_disabled_for_detached_blocks(self):
"""Staff markup should not be present on detached blocks."""
descriptor = ItemFactory.create(
category='detached-block',
display_name='Detached Block'
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id,
self.user,
descriptor
)
module = render.get_module(
self.user,
self.request,
descriptor.location,
field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertNotIn('Staff Debug', result_fragment.content)
@patch.dict('django.conf.settings.FEATURES', {'DISPLAY_HISTOGRAMS_TO_STAFF': False})
def test_histogram_disabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertNotIn('histrogram', result_fragment.content)
def test_histogram_enabled_for_unscored_xmodules(self):
"""Histograms should not display for xmodules which are not scored."""
html_descriptor = ItemFactory.create(
category='html',
data='Here are some course details.'
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id,
self.user,
self.descriptor
)
with patch('openedx.core.lib.xblock_utils.grade_histogram') as mock_grade_histogram:
mock_grade_histogram.return_value = []
module = render.get_module(
self.user,
self.request,
html_descriptor.location,
field_data_cache,
)
module.render(STUDENT_VIEW)
self.assertFalse(mock_grade_histogram.called)
def test_histogram_enabled_for_scored_xmodules(self):
"""Histograms should display for xmodules which are scored."""
StudentModuleFactory.create(
course_id=self.course.id,
module_state_key=self.location,
student=UserFactory(),
grade=1,
max_grade=1,
state="{}",
)
with patch('openedx.core.lib.xblock_utils.grade_histogram') as mock_grade_histogram:
mock_grade_histogram.return_value = []
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
module.render(STUDENT_VIEW)
self.assertTrue(mock_grade_histogram.called)
PER_COURSE_ANONYMIZED_DESCRIPTORS = (LTIDescriptor, )
# The "set" here is to work around the bug that load_classes returns duplicates for multiply-delcared classes.
PER_STUDENT_ANONYMIZED_DESCRIPTORS = set(
class_ for (name, class_) in XModuleDescriptor.load_classes()
if not issubclass(class_, PER_COURSE_ANONYMIZED_DESCRIPTORS)
)
@attr('shard_1')
@ddt.ddt
class TestAnonymousStudentId(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test that anonymous_student_id is set correctly across a variety of XBlock types
"""
@classmethod
def setUpClass(cls):
super(TestAnonymousStudentId, cls).setUpClass()
cls.course_key = ToyCourseFactory.create().id
cls.course = modulestore().get_course(cls.course_key)
def setUp(self):
super(TestAnonymousStudentId, self).setUp()
self.user = UserFactory()
@patch('courseware.module_render.has_access', Mock(return_value=True, autospec=True))
def _get_anonymous_id(self, course_id, xblock_class):
location = course_id.make_usage_key('dummy_category', 'dummy_name')
descriptor = Mock(
spec=xblock_class,
_field_data=Mock(spec=FieldData, name='field_data'),
location=location,
static_asset_path=None,
_runtime=Mock(
spec=Runtime,
resources_fs=None,
mixologist=Mock(_mixins=(), name='mixologist'),
name='runtime',
),
scope_ids=Mock(spec=ScopeIds),
name='descriptor',
_field_data_cache={},
_dirty_fields={},
fields={},
days_early_for_beta=None,
)
descriptor.runtime = CombinedSystem(descriptor._runtime, None) # pylint: disable=protected-access
# Use the xblock_class's bind_for_student method
descriptor.bind_for_student = partial(xblock_class.bind_for_student, descriptor)
if hasattr(xblock_class, 'module_class'):
descriptor.module_class = xblock_class.module_class
return render.get_module_for_descriptor_internal(
user=self.user,
descriptor=descriptor,
student_data=Mock(spec=FieldData, name='student_data'),
course_id=course_id,
track_function=Mock(name='track_function'), # Track Function
xqueue_callback_url_prefix=Mock(name='xqueue_callback_url_prefix'), # XQueue Callback Url Prefix
request_token='request_token',
course=self.course,
).xmodule_runtime.anonymous_student_id
@ddt.data(*PER_STUDENT_ANONYMIZED_DESCRIPTORS)
def test_per_student_anonymized_id(self, descriptor_class):
for course_id in ('MITx/6.00x/2012_Fall', 'MITx/6.00x/2013_Spring'):
self.assertEquals(
# This value is set by observation, so that later changes to the student
# id computation don't break old data
'5afe5d9bb03796557ee2614f5c9611fb',
self._get_anonymous_id(CourseKey.from_string(course_id), descriptor_class)
)
@ddt.data(*PER_COURSE_ANONYMIZED_DESCRIPTORS)
def test_per_course_anonymized_id(self, descriptor_class):
self.assertEquals(
# This value is set by observation, so that later changes to the student
# id computation don't break old data
'e3b0b940318df9c14be59acb08e78af5',
self._get_anonymous_id(SlashSeparatedCourseKey('MITx', '6.00x', '2012_Fall'), descriptor_class)
)
self.assertEquals(
# This value is set by observation, so that later changes to the student
# id computation don't break old data
'f82b5416c9f54b5ce33989511bb5ef2e',
self._get_anonymous_id(SlashSeparatedCourseKey('MITx', '6.00x', '2013_Spring'), descriptor_class)
)
@attr('shard_1')
@patch('track.views.tracker', autospec=True)
class TestModuleTrackingContext(SharedModuleStoreTestCase):
"""
Ensure correct tracking information is included in events emitted during XBlock callback handling.
"""
@classmethod
def setUpClass(cls):
super(TestModuleTrackingContext, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestModuleTrackingContext, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get('/')
self.request.user = self.user
self.request.session = {}
self.course = CourseFactory.create()
self.problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
def test_context_contains_display_name(self, mock_tracker):
problem_display_name = u'Option Response Problem'
module_info = self.handle_callback_and_get_module_info(mock_tracker, problem_display_name)
self.assertEquals(problem_display_name, module_info['display_name'])
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
@patch('xmodule.modulestore.mongo.base.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside'])
@patch('lms.djangoapps.lms_xblock.runtime.LmsModuleSystem.applicable_aside_types',
lambda self, block: ['test_aside'])
def test_context_contains_aside_info(self, mock_tracker):
"""
Check that related xblock asides populate information in the 'problem_check' event in case
the 'get_event_context' method is exist
"""
problem_display_name = u'Test Problem'
def get_event_context(self, event_type, event): # pylint: disable=unused-argument
"""
This method return data that should be associated with the "check_problem" event
"""
return {'content': 'test1', 'data_field': 'test2'}
AsideTestType.get_event_context = get_event_context
context_info = self.handle_callback_and_get_context_info(mock_tracker, problem_display_name)
self.assertIn('asides', context_info)
self.assertIn('test_aside', context_info['asides'])
self.assertIn('content', context_info['asides']['test_aside'])
self.assertEquals(context_info['asides']['test_aside']['content'], 'test1')
self.assertIn('data_field', context_info['asides']['test_aside'])
self.assertEquals(context_info['asides']['test_aside']['data_field'], 'test2')
def handle_callback_and_get_context_info(self, mock_tracker, problem_display_name=None):
"""
Creates a fake module, invokes the callback and extracts the 'context'
metadata from the emitted problem_check event.
"""
descriptor_kwargs = {
'category': 'problem',
'data': self.problem_xml
}
if problem_display_name:
descriptor_kwargs['display_name'] = problem_display_name
descriptor = ItemFactory.create(**descriptor_kwargs)
render.handle_xblock_callback(
self.request,
self.course.id.to_deprecated_string(),
quote_slashes(descriptor.location.to_deprecated_string()),
'xmodule_handler',
'problem_check',
)
self.assertEquals(len(mock_tracker.send.mock_calls), 1)
mock_call = mock_tracker.send.mock_calls[0]
event = mock_call[1][0]
self.assertEquals(event['event_type'], 'problem_check')
return event['context']
def handle_callback_and_get_module_info(self, mock_tracker, problem_display_name=None):
"""
Creates a fake module, invokes the callback and extracts the 'module'
metadata from the emitted problem_check event.
"""
event = self.handle_callback_and_get_context_info(mock_tracker, problem_display_name)
return event['module']
def test_missing_display_name(self, mock_tracker):
actual_display_name = self.handle_callback_and_get_module_info(mock_tracker)['display_name']
self.assertTrue(actual_display_name.startswith('problem'))
def test_library_source_information(self, mock_tracker):
"""
Check that XBlocks that are inherited from a library include the
information about their library block source in events.
We patch the modulestore to avoid having to create a library.
"""
original_usage_key = UsageKey.from_string(u'block-v1:A+B+C+type@problem+block@abcd1234')
original_usage_version = ObjectId()
mock_get_original_usage = lambda _, key: (original_usage_key, original_usage_version)
with patch('xmodule.modulestore.mixed.MixedModuleStore.get_block_original_usage', mock_get_original_usage):
module_info = self.handle_callback_and_get_module_info(mock_tracker)
self.assertIn('original_usage_key', module_info)
self.assertEqual(module_info['original_usage_key'], unicode(original_usage_key))
self.assertIn('original_usage_version', module_info)
self.assertEqual(module_info['original_usage_version'], unicode(original_usage_version))
@attr('shard_1')
class TestXmoduleRuntimeEvent(TestSubmittingProblems):
"""
Inherit from TestSubmittingProblems to get functionality that set up a course and problems structure
"""
def setUp(self):
super(TestXmoduleRuntimeEvent, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.problem = self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.grade_dict = {'value': 0.18, 'max_value': 32}
self.delete_dict = {'value': None, 'max_value': None}
def get_module_for_user(self, user):
"""Helper function to get useful module at self.location in self.course_id for user"""
mock_request = MagicMock()
mock_request.user = user
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id, user, self.course, depth=2)
return render.get_module( # pylint: disable=protected-access
user,
mock_request,
self.problem.location,
field_data_cache,
)._xmodule
def set_module_grade_using_publish(self, grade_dict):
"""Publish the user's grade, takes grade_dict as input"""
module = self.get_module_for_user(self.student_user)
module.system.publish(module, 'grade', grade_dict)
return module
def test_xmodule_runtime_publish(self):
"""Tests the publish mechanism"""
self.set_module_grade_using_publish(self.grade_dict)
student_module = StudentModule.objects.get(student=self.student_user, module_state_key=self.problem.location)
self.assertEqual(student_module.grade, self.grade_dict['value'])
self.assertEqual(student_module.max_grade, self.grade_dict['max_value'])
def test_xmodule_runtime_publish_delete(self):
"""Test deleting the grade using the publish mechanism"""
module = self.set_module_grade_using_publish(self.grade_dict)
module.system.publish(module, 'grade', self.delete_dict)
student_module = StudentModule.objects.get(student=self.student_user, module_state_key=self.problem.location)
self.assertIsNone(student_module.grade)
self.assertIsNone(student_module.max_grade)
@patch('courseware.module_render.SCORE_CHANGED.send')
def test_score_change_signal(self, send_mock):
"""Test that a Django signal is generated when a score changes"""
self.set_module_grade_using_publish(self.grade_dict)
expected_signal_kwargs = {
'sender': None,
'points_possible': self.grade_dict['max_value'],
'points_earned': self.grade_dict['value'],
'user_id': self.student_user.id,
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location)
}
send_mock.assert_called_with(**expected_signal_kwargs)
@attr('shard_1')
class TestRebindModule(TestSubmittingProblems):
"""
Tests to verify the functionality of rebinding a module.
Inherit from TestSubmittingProblems to get functionality that set up a course structure
"""
def setUp(self):
super(TestRebindModule, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.lti = ItemFactory.create(category='lti', parent=self.homework)
self.problem = ItemFactory.create(category='problem', parent=self.homework)
self.user = UserFactory.create()
self.anon_user = AnonymousUser()
def get_module_for_user(self, user, item=None):
"""Helper function to get useful module at self.location in self.course_id for user"""
mock_request = MagicMock()
mock_request.user = user
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id, user, self.course, depth=2)
if item is None:
item = self.lti
return render.get_module( # pylint: disable=protected-access
user,
mock_request,
item.location,
field_data_cache,
)._xmodule
def test_rebind_module_to_new_users(self):
module = self.get_module_for_user(self.user, self.problem)
# Bind the module to another student, which will remove "correct_map"
# from the module's _field_data_cache and _dirty_fields.
user2 = UserFactory.create()
module.descriptor.bind_for_student(module.system, user2.id)
# XBlock's save method assumes that if a field is in _dirty_fields,
# then it's also in _field_data_cache. If this assumption
# doesn't hold, then we get an error trying to bind this module
# to a third student, since we've removed "correct_map" from
# _field_data cache, but not _dirty_fields, when we bound
# this module to the second student. (TNL-2640)
user3 = UserFactory.create()
module.descriptor.bind_for_student(module.system, user3.id)
def test_rebind_noauth_module_to_user_not_anonymous(self):
"""
Tests that an exception is thrown when rebind_noauth_module_to_user is run from a
module bound to a real user
"""
module = self.get_module_for_user(self.user)
user2 = UserFactory()
user2.id = 2
with self.assertRaisesRegexp(
render.LmsModuleRenderError,
"rebind_noauth_module_to_user can only be called from a module bound to an anonymous user"
):
self.assertTrue(module.system.rebind_noauth_module_to_user(module, user2))
def test_rebind_noauth_module_to_user_anonymous(self):
"""
Tests that get_user_module_for_noauth succeeds when rebind_noauth_module_to_user is run from a
module bound to AnonymousUser
"""
module = self.get_module_for_user(self.anon_user)
user2 = UserFactory()
user2.id = 2
module.system.rebind_noauth_module_to_user(module, user2)
self.assertTrue(module)
self.assertEqual(module.system.anonymous_student_id, anonymous_id_for_user(user2, self.course.id))
self.assertEqual(module.scope_ids.user_id, user2.id)
self.assertEqual(module.descriptor.scope_ids.user_id, user2.id)
@attr('shard_1')
@ddt.ddt
class TestEventPublishing(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests of event publishing for both XModules and XBlocks.
"""
def setUp(self):
"""
Set up the course and user context
"""
super(TestEventPublishing, self).setUp()
self.mock_user = UserFactory()
self.mock_user.id = 1
self.request_factory = RequestFactory()
@ddt.data('xblock', 'xmodule')
@XBlock.register_temp_plugin(PureXBlock, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptor, identifier='xmodule')
@patch.object(render, 'make_track_function')
def test_event_publishing(self, block_type, mock_track_function):
request = self.request_factory.get('')
request.user = self.mock_user
course = CourseFactory()
descriptor = ItemFactory(category=block_type, parent=course)
field_data_cache = FieldDataCache([course, descriptor], course.id, self.mock_user) # pylint: disable=no-member
block = render.get_module(self.mock_user, request, descriptor.location, field_data_cache)
event_type = 'event_type'
event = {'event': 'data'}
block.runtime.publish(block, event_type, event)
mock_track_function.assert_called_once_with(request)
mock_track_function.return_value.assert_called_once_with(event_type, event)
@attr('shard_1')
@ddt.ddt
class LMSXBlockServiceBindingTest(SharedModuleStoreTestCase):
"""
Tests that the LMS Module System (XBlock Runtime) provides an expected set of services.
"""
@classmethod
def setUpClass(cls):
super(LMSXBlockServiceBindingTest, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
"""
Set up the user and other fields that will be used to instantiate the runtime.
"""
super(LMSXBlockServiceBindingTest, self).setUp()
self.user = UserFactory()
self.student_data = Mock()
self.track_function = Mock()
self.xqueue_callback_url_prefix = Mock()
self.request_token = Mock()
@XBlock.register_temp_plugin(PureXBlock, identifier='pure')
@ddt.data("user", "i18n", "fs", "field-data", "bookmarks")
def test_expected_services_exist(self, expected_service):
"""
Tests that the 'user', 'i18n', and 'fs' services are provided by the LMS runtime.
"""
descriptor = ItemFactory(category="pure", parent=self.course)
runtime, _ = render.get_module_system_for_user(
self.user,
self.student_data,
descriptor,
self.course.id,
self.track_function,
self.xqueue_callback_url_prefix,
self.request_token,
course=self.course
)
service = runtime.service(descriptor, expected_service)
self.assertIsNotNone(service)
def test_beta_tester_fields_added(self):
"""
Tests that the beta tester fields are set on LMS runtime.
"""
descriptor = ItemFactory(category="pure", parent=self.course)
descriptor.days_early_for_beta = 5
runtime, _ = render.get_module_system_for_user(
self.user,
self.student_data,
descriptor,
self.course.id,
self.track_function,
self.xqueue_callback_url_prefix,
self.request_token,
course=self.course
)
# pylint: disable=no-member
self.assertFalse(runtime.user_is_beta_tester)
self.assertEqual(runtime.days_early_for_beta, 5)
class PureXBlockWithChildren(PureXBlock):
"""
Pure XBlock with children to use in tests.
"""
has_children = True
class EmptyXModuleWithChildren(EmptyXModule): # pylint: disable=abstract-method
"""
Empty XModule for testing with no dependencies.
"""
has_children = True
class EmptyXModuleDescriptorWithChildren(EmptyXModuleDescriptor): # pylint: disable=abstract-method
"""
Empty XModule for testing with no dependencies.
"""
module_class = EmptyXModuleWithChildren
has_children = True
BLOCK_TYPES = ['xblock', 'xmodule']
USER_NUMBERS = range(2)
@attr('shard_1')
@ddt.ddt
class TestFilteredChildren(SharedModuleStoreTestCase):
"""
Tests that verify access to XBlock/XModule children work correctly
even when those children are filtered by the runtime when loaded.
"""
@classmethod
def setUpClass(cls):
super(TestFilteredChildren, cls).setUpClass()
cls.course = CourseFactory.create()
# pylint: disable=attribute-defined-outside-init, no-member
def setUp(self):
super(TestFilteredChildren, self).setUp()
self.users = {number: UserFactory() for number in USER_NUMBERS}
self._old_has_access = render.has_access
patcher = patch('courseware.module_render.has_access', self._has_access)
patcher.start()
self.addCleanup(patcher.stop)
@ddt.data(*BLOCK_TYPES)
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_unbound(self, block_type):
block = self._load_block(block_type)
self.assertUnboundChildren(block)
@ddt.data(*itertools.product(BLOCK_TYPES, USER_NUMBERS))
@ddt.unpack
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_unbound_then_bound_as_descriptor(self, block_type, user_number):
user = self.users[user_number]
block = self._load_block(block_type)
self.assertUnboundChildren(block)
self._bind_block(block, user)
self.assertBoundChildren(block, user)
@ddt.data(*itertools.product(BLOCK_TYPES, USER_NUMBERS))
@ddt.unpack
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_unbound_then_bound_as_xmodule(self, block_type, user_number):
user = self.users[user_number]
block = self._load_block(block_type)
self.assertUnboundChildren(block)
self._bind_block(block, user)
# Validate direct XModule access as well
if isinstance(block, XModuleDescriptor):
self.assertBoundChildren(block._xmodule, user) # pylint: disable=protected-access
else:
self.assertBoundChildren(block, user)
@ddt.data(*itertools.product(BLOCK_TYPES, USER_NUMBERS))
@ddt.unpack
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_bound_only_as_descriptor(self, block_type, user_number):
user = self.users[user_number]
block = self._load_block(block_type)
self._bind_block(block, user)
self.assertBoundChildren(block, user)
@ddt.data(*itertools.product(BLOCK_TYPES, USER_NUMBERS))
@ddt.unpack
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_bound_only_as_xmodule(self, block_type, user_number):
user = self.users[user_number]
block = self._load_block(block_type)
self._bind_block(block, user)
# Validate direct XModule access as well
if isinstance(block, XModuleDescriptor):
self.assertBoundChildren(block._xmodule, user) # pylint: disable=protected-access
else:
self.assertBoundChildren(block, user)
def _load_block(self, block_type):
"""
Instantiate an XBlock of `block_type` with the appropriate set of children.
"""
self.parent = ItemFactory(category=block_type, parent=self.course)
# Create a child of each block type for each user
self.children_for_user = {
user: [
ItemFactory(category=child_type, parent=self.parent).scope_ids.usage_id
for child_type in BLOCK_TYPES
]
for user in self.users.itervalues()
}
self.all_children = sum(self.children_for_user.values(), [])
return modulestore().get_item(self.parent.scope_ids.usage_id)
def _bind_block(self, block, user):
"""
Bind `block` to the supplied `user`.
"""
course_id = self.course.id
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id,
user,
block,
)
return get_module_for_descriptor(
user,
Mock(name='request', user=user),
block,
field_data_cache,
course_id,
course=self.course
)
def _has_access(self, user, action, obj, course_key=None):
"""
Mock implementation of `has_access` used to control which blocks
have access to which children during tests.
"""
if action != 'load':
return self._old_has_access(user, action, obj, course_key)
if isinstance(obj, XBlock):
key = obj.scope_ids.usage_id
elif isinstance(obj, UsageKey):
key = obj
if key == self.parent.scope_ids.usage_id:
return True
return key in self.children_for_user[user]
def assertBoundChildren(self, block, user):
"""
Ensure the bound children are indeed children.
"""
self.assertChildren(block, self.children_for_user[user])
def assertUnboundChildren(self, block):
"""
Ensure unbound children are indeed children.
"""
self.assertChildren(block, self.all_children)
def assertChildren(self, block, child_usage_ids):
"""
Used to assert that sets of children are equivalent.
"""
self.assertEquals(set(child_usage_ids), set(child.scope_ids.usage_id for child in block.get_children()))
@attr('shard_1')
@ddt.ddt
class TestDisabledXBlockTypes(ModuleStoreTestCase):
"""
Tests that verify disabled XBlock types are not loaded.
"""
# pylint: disable=no-member
def setUp(self):
super(TestDisabledXBlockTypes, self).setUp()
for store in self.store.modulestores:
store.disabled_xblock_types = ('video',)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_item(self, default_ms):
with self.store.default_store(default_ms):
course = CourseFactory()
for block_type in ('video',):
item = ItemFactory(category=block_type, parent=course)
item = self.store.get_item(item.scope_ids.usage_id)
self.assertEqual(item.__class__.__name__, 'RawDescriptorWithMixins')
| agpl-3.0 | 6,489,512,908,095,212,000 | 8,712,315,270,804,537,000 | 37.579088 | 154 | 0.607922 | false |
DevOps4Networks/ansible | test/units/playbook/test_play.py | 118 | 3933 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.block import Block
from ansible.playbook.play import Play
from ansible.playbook.role import Role
from units.mock.loader import DictDataLoader
class TestPlay(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_play(self):
p = Play.load(dict())
self.assertEqual(str(p), '')
def test_basic_play(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
connection='local',
remote_user="root",
sudo=True,
sudo_user="testing",
))
def test_play_with_user_conflict(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
user="testing",
gather_facts=False,
))
self.assertEqual(p.remote_user, "testing")
def test_play_with_user_conflict(self):
play_data = dict(
name="test play",
hosts=['foo'],
user="testing",
remote_user="testing",
)
self.assertRaises(AnsibleParserError, Play.load, play_data)
def test_play_with_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_handlers(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
handlers=[dict(action='shell echo "hello world"')],
))
def test_play_with_pre_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
pre_tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_post_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
post_tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_roles(self):
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo/tasks.yml': """
- name: role task
shell: echo "hello world"
""",
})
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
roles=['foo'],
), loader=fake_loader)
blocks = p.compile()
def test_play_compile(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
blocks = p.compile()
# with a single block, there will still be three
# implicit meta flush_handler blocks inserted
self.assertEqual(len(blocks), 4)
| gpl-3.0 | 1,842,498,176,938,649,900 | -2,869,760,377,350,783,500 | 28.350746 | 70 | 0.575642 | false |
AlexanderFabisch/scikit-learn | sklearn/metrics/pairwise.py | 9 | 45248 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause | 6,698,324,735,841,373,000 | 2,179,523,422,804,926,500 | 32.467456 | 99 | 0.614834 | false |
jschultz/Gooey | gooey/python_bindings/config_generator.py | 1 | 2477 | import os
import sys
from gooey.gui.windows import layouts
from gooey.python_bindings import argparse_to_json
from gooey.gui.util.quoting import quote
def create_from_parser(parser, source_path, cmd_args, **kwargs):
auto_start = kwargs.get('auto_start', False)
if hasattr(sys, 'frozen'):
run_cmd = quote(source_path)
else:
run_cmd = '{} -u {}'.format(quote(sys.executable), quote(source_path))
build_spec = {
'language': kwargs.get('language', 'english'),
'target': run_cmd,
'program_name': kwargs.get('program_name') or os.path.basename(sys.argv[0]).replace('.py', ''),
'program_description': kwargs.get('program_description', ''),
'auto_start': kwargs.get('auto_start', False),
'show_advanced': kwargs.get('advanced', True),
'default_size': kwargs.get('default_size', (610, 530)),
'manual_start': False,
'layout_type': 'flat',
'monospace_display': kwargs.get('monospace_display', False),
'image_dir': kwargs.get('image_dir'),
'language_dir': kwargs.get('language_dir'),
'progress_regex': kwargs.get('progress_regex'),
'progress_expr': kwargs.get('progress_expr'),
'disable_progress_bar_animation': kwargs.get('disable_progress_bar_animation'),
'disable_stop_button': kwargs.get('disable_stop_button'),
'group_by_type': kwargs.get('group_by_type', True),
'ignore_command': kwargs.get('ignore_command', None),
'force_command': kwargs.get('force_command', None),
'use_argparse_groups': kwargs.get('use_argparse_groups', False),
'use_tabs': kwargs.get('use_tabs', False)
}
if build_spec['use_argparse_groups']:
build_spec['num_default_cols'] = kwargs.get('default_cols', 2)
build_spec['num_cols_dict'] = kwargs.get('cols_dict', {})
else:
build_spec['num_cols_dict'] = {"required arguments": kwargs.get('required_cols', 1),
"optional arguments": kwargs.get('optional_cols', 3)}
if not auto_start:
build_spec['program_description'] = parser.description or build_spec['program_description']
layout_data = argparse_to_json.convert(parser, build_spec['use_argparse_groups'], cmd_args=cmd_args) if build_spec['show_advanced'] else layouts.basic_config.items()
build_spec.update(layout_data)
return build_spec
| mit | -1,536,991,423,036,219,400 | 1,813,958,840,289,309,200 | 44.735849 | 169 | 0.612434 | false |
timokoola/hslbot | docutils/parsers/rst/languages/fi.py | 128 | 3661 | # -*- coding: utf-8 -*-
# $Id: fi.py 7119 2011-09-02 13:00:23Z milde $
# Author: Asko Soukka <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Finnish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'huomio': u'attention',
u'varo': u'caution',
u'code (translation required)': 'code',
u'vaara': u'danger',
u'virhe': u'error',
u'vihje': u'hint',
u't\u00e4rke\u00e4\u00e4': u'important',
u'huomautus': u'note',
u'neuvo': u'tip',
u'varoitus': u'warning',
u'kehotus': u'admonition',
u'sivupalkki': u'sidebar',
u'aihe': u'topic',
u'rivi': u'line-block',
u'tasalevyinen': u'parsed-literal',
u'ohje': u'rubric',
u'epigraafi': u'epigraph',
u'kohokohdat': u'highlights',
u'lainaus': u'pull-quote',
u'taulukko': u'table',
u'csv-taulukko': u'csv-table',
u'list-table (translation required)': 'list-table',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#u'kysymykset': u'questions',
u'meta': u'meta',
'math (translation required)': 'math',
#u'kuvakartta': u'imagemap',
u'kuva': u'image',
u'kaavio': u'figure',
u'sis\u00e4llyt\u00e4': u'include',
u'raaka': u'raw',
u'korvaa': u'replace',
u'unicode': u'unicode',
u'p\u00e4iv\u00e4ys': u'date',
u'luokka': u'class',
u'rooli': u'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'sis\u00e4llys': u'contents',
u'kappale': u'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'alaviitteet': u'footnotes',
#u'viitaukset': u'citations',
u'target-notes (translation required)': u'target-notes'}
"""Finnish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'lyhennys': u'abbreviation',
u'akronyymi': u'acronym',
u'kirjainsana': u'acronym',
u'code (translation required)': 'code',
u'hakemisto': u'index',
u'luettelo': u'index',
u'alaindeksi': u'subscript',
u'indeksi': u'subscript',
u'yl\u00e4indeksi': u'superscript',
u'title-reference (translation required)': u'title-reference',
u'title (translation required)': u'title-reference',
u'pep-reference (translation required)': u'pep-reference',
u'rfc-reference (translation required)': u'rfc-reference',
u'korostus': u'emphasis',
u'vahvistus': u'strong',
u'tasalevyinen': u'literal',
'math (translation required)': 'math',
u'named-reference (translation required)': u'named-reference',
u'anonymous-reference (translation required)': u'anonymous-reference',
u'footnote-reference (translation required)': u'footnote-reference',
u'citation-reference (translation required)': u'citation-reference',
u'substitution-reference (translation required)': u'substitution-reference',
u'kohde': u'target',
u'uri-reference (translation required)': u'uri-reference',
u'raw (translation required)': 'raw',}
"""Mapping of Finnish role names to canonical role names for interpreted text.
"""
| apache-2.0 | 956,243,060,264,427,300 | -9,181,967,782,674,822,000 | 36.357143 | 80 | 0.643267 | false |
bgxavier/neutron | neutron/db/metering/metering_db.py | 17 | 11434 | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
from neutron.common import constants
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import metering
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
class MeteringLabelRule(model_base.BASEV2, models_v2.HasId):
direction = sa.Column(sa.Enum('ingress', 'egress',
name='meteringlabels_direction'))
remote_ip_prefix = sa.Column(sa.String(64))
metering_label_id = sa.Column(sa.String(36),
sa.ForeignKey("meteringlabels.id",
ondelete="CASCADE"),
nullable=False)
excluded = sa.Column(sa.Boolean, default=False, server_default=sql.false())
class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
rules = orm.relationship(MeteringLabelRule, backref="label",
cascade="delete", lazy="joined")
routers = orm.relationship(
l3_db.Router,
primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
foreign_keys='MeteringLabel.tenant_id',
uselist=True)
shared = sa.Column(sa.Boolean, default=False, server_default=sql.false())
class MeteringDbMixin(metering.MeteringPluginBase,
base_db.CommonDbMixin):
def __init__(self):
self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
def _make_metering_label_dict(self, metering_label, fields=None):
res = {'id': metering_label['id'],
'name': metering_label['name'],
'description': metering_label['description'],
'shared': metering_label['shared'],
'tenant_id': metering_label['tenant_id']}
return self._fields(res, fields)
def create_metering_label(self, context, metering_label):
m = metering_label['metering_label']
tenant_id = self._get_tenant_id_for_create(context, m)
with context.session.begin(subtransactions=True):
metering_db = MeteringLabel(id=uuidutils.generate_uuid(),
description=m['description'],
tenant_id=tenant_id,
name=m['name'],
shared=m['shared'])
context.session.add(metering_db)
return self._make_metering_label_dict(metering_db)
def delete_metering_label(self, context, label_id):
with context.session.begin(subtransactions=True):
try:
label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
context.session.delete(label)
def get_metering_label(self, context, label_id, fields=None):
try:
metering_label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
return self._make_metering_label_dict(metering_label, fields)
def get_metering_labels(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_labels', limit,
marker)
return self._get_collection(context, MeteringLabel,
self._make_metering_label_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _make_metering_label_rule_dict(self, metering_label_rule, fields=None):
res = {'id': metering_label_rule['id'],
'metering_label_id': metering_label_rule['metering_label_id'],
'direction': metering_label_rule['direction'],
'remote_ip_prefix': metering_label_rule['remote_ip_prefix'],
'excluded': metering_label_rule['excluded']}
return self._fields(res, fields)
def get_metering_label_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_label_rules',
limit, marker)
return self._get_collection(context, MeteringLabelRule,
self._make_metering_label_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_metering_label_rule(self, context, rule_id, fields=None):
try:
metering_label_rule = self._get_by_id(context,
MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
return self._make_metering_label_rule_dict(metering_label_rule, fields)
def _validate_cidr(self, context, label_id, remote_ip_prefix,
direction, excluded):
r_ips = self.get_metering_label_rules(context,
filters={'metering_label_id':
[label_id],
'direction':
[direction],
'excluded':
[excluded]},
fields=['remote_ip_prefix'])
cidrs = [r['remote_ip_prefix'] for r in r_ips]
new_cidr_ipset = netaddr.IPSet([remote_ip_prefix])
if (netaddr.IPSet(cidrs) & new_cidr_ipset):
raise metering.MeteringLabelRuleOverlaps(
remote_ip_prefix=remote_ip_prefix)
def create_metering_label_rule(self, context, metering_label_rule):
m = metering_label_rule['metering_label_rule']
with context.session.begin(subtransactions=True):
label_id = m['metering_label_id']
ip_prefix = m['remote_ip_prefix']
direction = m['direction']
excluded = m['excluded']
self._validate_cidr(context, label_id, ip_prefix, direction,
excluded)
metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(),
metering_label_id=label_id,
direction=direction,
excluded=m['excluded'],
remote_ip_prefix=ip_prefix)
context.session.add(metering_db)
return self._make_metering_label_rule_dict(metering_db)
def delete_metering_label_rule(self, context, rule_id):
with context.session.begin(subtransactions=True):
try:
rule = self._get_by_id(context, MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
context.session.delete(rule)
return self._make_metering_label_rule_dict(rule)
def _get_metering_rules_dict(self, metering_label):
rules = []
for rule in metering_label.rules:
rule_dict = self._make_metering_label_rule_dict(rule)
rules.append(rule_dict)
return rules
def _make_router_dict(self, router):
res = {'id': router['id'],
'name': router['name'],
'tenant_id': router['tenant_id'],
'admin_state_up': router['admin_state_up'],
'status': router['status'],
'gw_port_id': router['gw_port_id'],
constants.METERING_LABEL_KEY: []}
return res
def _process_sync_metering_data(self, context, labels):
all_routers = None
routers_dict = {}
for label in labels:
if label.shared:
if not all_routers:
all_routers = self._get_collection_query(context,
l3_db.Router)
routers = all_routers
else:
routers = label.routers
for router in routers:
router_dict = routers_dict.get(
router['id'],
self._make_router_dict(router))
rules = self._get_metering_rules_dict(label)
data = {'id': label['id'], 'rules': rules}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return routers_dict.values()
def get_sync_data_for_rule(self, context, rule):
label = context.session.query(MeteringLabel).get(
rule['metering_label_id'])
if label.shared:
routers = self._get_collection_query(context, l3_db.Router)
else:
routers = label.routers
routers_dict = {}
for router in routers:
router_dict = routers_dict.get(router['id'],
self._make_router_dict(router))
data = {'id': label['id'], 'rule': rule}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return routers_dict.values()
def get_sync_data_metering(self, context, label_id=None, router_ids=None):
labels = context.session.query(MeteringLabel)
if label_id:
labels = labels.filter(MeteringLabel.id == label_id)
elif router_ids:
labels = (labels.join(MeteringLabel.routers).
filter(l3_db.Router.id.in_(router_ids)))
return self._process_sync_metering_data(context, labels)
| apache-2.0 | -1,085,358,422,212,625,000 | 41,225,657,076,095,864 | 41.82397 | 79 | 0.548977 | false |
abdulbaqi/quranf | venv/lib/python2.7/site-packages/pip/vcs/bazaar.py | 393 | 4943 | import os
import tempfile
import re
from pip.backwardcompat import urlparse
from pip.log import logger
from pip.util import rmtree, display_path, call_subprocess
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
bundle_file = 'bzr-branch.txt'
schemes = ('bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', 'bzr+lp')
guide = ('# This was a Bazaar branch; to make it a branch again run:\n'
'bzr branch -r %(rev)s %(url)s .\n')
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(['lp'])
urlparse.non_hierarchical.extend(['lp'])
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^bzr\s*branch\s*-r\s*(\d*)', line)
if match:
rev = match.group(1).strip()
url = line[match.end():].strip().split(None, 1)[0]
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Bazaar repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
call_subprocess([self.cmd, 'export', location], cwd=temp_dir,
filter_stdout=self._filter, show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
call_subprocess([self.cmd, 'switch', url], cwd=dest)
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = call_subprocess(
[self.cmd, 'info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = call_subprocess(
[self.cmd, 'revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| mit | 8,093,166,955,004,379,000 | 2,383,563,254,745,058,300 | 36.732824 | 90 | 0.544609 | false |
Juniper/neutron | neutron/tests/unit/ml2/test_type_vxlan.py | 5 | 9439 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
from oslo.config import cfg
import testtools
from testtools import matchers
from neutron.common import exceptions as exc
from neutron.db import api as db
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_vxlan
from neutron.tests import base
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
INVALID_VXLAN_VNI = 7337
MULTICAST_GROUP = "239.1.1.1"
VXLAN_UDP_PORT_ONE = 9999
VXLAN_UDP_PORT_TWO = 8888
class VxlanTypeTest(base.BaseTestCase):
def setUp(self):
super(VxlanTypeTest, self).setUp()
db.configure_db()
cfg.CONF.set_override('vni_ranges', [TUNNEL_RANGES],
group='ml2_type_vxlan')
cfg.CONF.set_override('vxlan_group', MULTICAST_GROUP,
group='ml2_type_vxlan')
self.driver = type_vxlan.VxlanTypeDriver()
self.driver.vxlan_vni_ranges = TUNNEL_RANGES
self.driver._sync_vxlan_allocations()
self.session = db.get_session()
self.addCleanup(cfg.CONF.reset)
self.addCleanup(db.clear_db)
def test_vxlan_tunnel_type(self):
self.assertEqual(self.driver.get_type(), p_const.TYPE_VXLAN)
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN - 1))
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN + 1)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX - 1)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX)).allocated
)
self.assertIsNone(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX + 1))
)
self.driver.vxlan_vni_ranges = UPDATED_TUNNEL_RANGES
self.driver._sync_vxlan_allocations()
self.assertIsNone(self.driver.
get_vxlan_allocation(self.session,
(TUN_MIN + 5 - 1)))
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MIN + 5)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MIN + 5 + 1)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MAX + 5 - 1)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MAX + 5)).
allocated)
self.assertIsNone(self.driver.
get_vxlan_allocation(self.session,
(TUN_MAX + 5 + 1)))
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: 101}
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertIsNone(alloc)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.session)
self.assertIsNone(segment)
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
def test_vxlan_endpoints(self):
"""Test VXLAN allocation/de-allocation."""
# Set first endpoint, verify it gets VXLAN VNI 1
vxlan1_endpoint = self.driver.add_endpoint(TUNNEL_IP_ONE,
VXLAN_UDP_PORT_ONE)
self.assertEqual(TUNNEL_IP_ONE, vxlan1_endpoint.ip_address)
self.assertEqual(VXLAN_UDP_PORT_ONE, vxlan1_endpoint.udp_port)
# Set second endpoint, verify it gets VXLAN VNI 2
vxlan2_endpoint = self.driver.add_endpoint(TUNNEL_IP_TWO,
VXLAN_UDP_PORT_TWO)
self.assertEqual(TUNNEL_IP_TWO, vxlan2_endpoint.ip_address)
self.assertEqual(VXLAN_UDP_PORT_TWO, vxlan2_endpoint.udp_port)
# Get all the endpoints
endpoints = self.driver.get_endpoints()
for endpoint in endpoints:
if endpoint['ip_address'] == TUNNEL_IP_ONE:
self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port'])
elif endpoint['ip_address'] == TUNNEL_IP_TWO:
self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port'])
class VxlanTypeMultiRangeTest(base.BaseTestCase):
TUN_MIN0 = 100
TUN_MAX0 = 101
TUN_MIN1 = 200
TUN_MAX1 = 201
TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
def setUp(self):
super(VxlanTypeMultiRangeTest, self).setUp()
db.configure_db()
self.driver = type_vxlan.VxlanTypeDriver()
self.driver.vxlan_vni_ranges = self.TUNNEL_MULTI_RANGES
self.driver._sync_vxlan_allocations()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_release_segment(self):
segments = [self.driver.allocate_tenant_segment(self.session)
for i in range(4)]
# Release them in random order. No special meaning.
for i in (0, 2, 1, 3):
self.driver.release_segment(self.session, segments[i])
for key in (self.TUN_MIN0, self.TUN_MAX0,
self.TUN_MIN1, self.TUN_MAX1):
alloc = self.driver.get_vxlan_allocation(self.session, key)
self.assertFalse(alloc.allocated)
| apache-2.0 | -3,119,630,981,356,196,000 | -2,350,153,190,051,213,300 | 40.581498 | 79 | 0.586291 | false |
dingocuster/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause | 8,872,120,025,412,605,000 | -3,795,045,631,561,138,700 | 34.740385 | 78 | 0.600753 | false |
mdj2/django | django/contrib/auth/tests/test_forms.py | 6 | 16393 | from __future__ import unicode_literals
import os
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm,
PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core import mail
from django.forms.fields import Field, CharField
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.error_messages['duplicate_username'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': '[email protected]',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: [email protected]>')
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True):
with translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
UserModel = get_user_model()
username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['username'].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
form = MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""creates a user and returns a tuple
(user_object, username, email)
"""
username = 'jsmith'
email = '[email protected]'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistant_email(self):
# Test nonexistant email address. This should not fail because it would
# expose information about registered users.
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
)
def test_cleaned_data(self):
# Regression test
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
)
def test_custom_email_subject(self):
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_bug_5605(self):
# bug #5605, preserve the case of the user name (before the @ in the
# email address) when creating a user.
user = User.objects.create_user('forms_test2', '[email protected]', 'test')
self.assertEqual(user.email, '[email protected]')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
#tests that inactive user cannot
#receive password reset email
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', '[email protected]', 'test')
data = {"email": "[email protected]"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field._has_changed('aaa', 'bbb'))
| bsd-3-clause | -828,874,262,452,569,300 | -1,885,724,550,932,468,700 | 36.946759 | 110 | 0.620265 | false |
TemplateVoid/mapnik | scons/scons-local-2.3.1/SCons/Tool/suncc.py | 8 | 1998 | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 2014/03/02 14:18:15 garyo"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 | -867,489,240,292,520,400 | -2,671,817,040,111,077,400 | 33.448276 | 119 | 0.728228 | false |
motion2015/a3 | common/lib/xmodule/xmodule/library_tools.py | 5 | 6842 | """
XBlock runtime services for LibraryContentModule
"""
from django.core.exceptions import PermissionDenied
from opaque_keys.edx.locator import LibraryLocator
from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.capa_module import CapaDescriptor
class LibraryToolsService(object):
"""
Service that allows LibraryContentModule to interact with libraries in the
modulestore.
"""
def __init__(self, modulestore):
self.store = modulestore
def _get_library(self, library_key):
"""
Given a library key like "library-v1:ProblemX+PR0B", return the
'library' XBlock with meta-information about the library.
A specific version may be specified.
Returns None on error.
"""
if not isinstance(library_key, LibraryLocator):
library_key = LibraryLocator.from_string(library_key)
try:
return self.store.get_library(
library_key, remove_version=False, remove_branch=False, head_validation=False
)
except ItemNotFoundError:
return None
def get_library_version(self, lib_key):
"""
Get the version (an ObjectID) of the given library.
Returns None if the library does not exist.
"""
library = self._get_library(lib_key)
if library:
# We need to know the library's version so ensure it's set in library.location.library_key.version_guid
assert library.location.library_key.version_guid is not None
return library.location.library_key.version_guid
return None
def create_block_analytics_summary(self, course_key, block_keys):
"""
Given a CourseKey and a list of (block_type, block_id) pairs,
prepare the JSON-ready metadata needed for analytics logging.
This is [
{"usage_key": x, "original_usage_key": y, "original_usage_version": z, "descendants": [...]}
]
where the main list contains all top-level blocks, and descendants contains a *flat* list of all
descendants of the top level blocks, if any.
"""
def summarize_block(usage_key):
""" Basic information about the given block """
orig_key, orig_version = self.store.get_block_original_usage(usage_key)
return {
"usage_key": unicode(usage_key),
"original_usage_key": unicode(orig_key) if orig_key else None,
"original_usage_version": unicode(orig_version) if orig_version else None,
}
result_json = []
for block_key in block_keys:
key = course_key.make_usage_key(*block_key)
info = summarize_block(key)
info['descendants'] = []
try:
block = self.store.get_item(key, depth=None) # Load the item and all descendants
children = list(getattr(block, "children", []))
while children:
child_key = children.pop()
child = self.store.get_item(child_key)
info['descendants'].append(summarize_block(child_key))
children.extend(getattr(child, "children", []))
except ItemNotFoundError:
pass # The block has been deleted
result_json.append(info)
return result_json
def _filter_child(self, usage_key, capa_type):
"""
Filters children by CAPA problem type, if configured
"""
if capa_type == ANY_CAPA_TYPE_VALUE:
return True
if usage_key.block_type != "problem":
return False
descriptor = self.store.get_item(usage_key, depth=0)
assert isinstance(descriptor, CapaDescriptor)
return capa_type in descriptor.problem_types
def can_use_library_content(self, block):
"""
Determines whether a modulestore holding a course_id supports libraries.
"""
return self.store.check_supports(block.location.course_key, 'copy_from_template')
def update_children(self, dest_block, user_id, user_perms=None, version=None):
"""
This method is to be used when the library that a LibraryContentModule
references has been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of dest_block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update dest_block's 'source_library_version' field to
store the version number of the libraries used, so we easily determine
if dest_block is up to date or not.
"""
if user_perms and not user_perms.can_write(dest_block.location.course_key):
raise PermissionDenied()
if not dest_block.source_library_id:
dest_block.source_library_version = ""
return
source_blocks = []
library_key = dest_block.source_library_key
if version:
library_key = library_key.replace(branch=ModuleStoreEnum.BranchName.library, version_guid=version)
library = self._get_library(library_key)
if library is None:
raise ValueError("Requested library not found.")
if user_perms and not user_perms.can_read(library_key):
raise PermissionDenied()
filter_children = (dest_block.capa_type != ANY_CAPA_TYPE_VALUE)
if filter_children:
# Apply simple filtering based on CAPA problem types:
source_blocks.extend([key for key in library.children if self._filter_child(key, dest_block.capa_type)])
else:
source_blocks.extend(library.children)
with self.store.bulk_operations(dest_block.location.course_key):
dest_block.source_library_version = unicode(library.location.library_key.version_guid)
self.store.update_item(dest_block, user_id)
head_validation = not version
dest_block.children = self.store.copy_from_template(
source_blocks, dest_block.location, user_id, head_validation=head_validation
)
# ^-- copy_from_template updates the children in the DB
# but we must also set .children here to avoid overwriting the DB again
def list_available_libraries(self):
"""
List all known libraries.
Returns tuples of (LibraryLocator, display_name)
"""
return [
(lib.location.library_key.replace(version_guid=None, branch=None), lib.display_name)
for lib in self.store.get_libraries()
]
| agpl-3.0 | 4,278,801,390,790,347,000 | 1,213,512,859,724,571,100 | 41.234568 | 116 | 0.62891 | false |
andresailer/DIRAC | tests/Integration/DataManagementSystem/Test_FileCatalogDB.py | 1 | 56699 | """ This is a test of the FileCatalogDB
It supposes that the DB is present.
"""
# pylint: disable=invalid-name,wrong-import-position
import unittest
import itertools
import os
import sys
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.DataManagementSystem.DB.FileCatalogDB import FileCatalogDB
from DIRAC.Core.Security.Properties import FC_MANAGEMENT
seName = "mySE"
testUser = 'atsareg'
testGroup = 'dirac_user'
testDir = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir'
parentDir = '/vo.formation.idgrilles.fr/user/a/atsareg'
nonExistingDir = "/I/Dont/exist/dir"
testFile = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir/testfile'
nonExistingFile = "/I/Dont/exist"
x509Chain = "<X509Chain 3 certs [/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch]"
x509Chain += "[/DC=ch/DC=cern/CN=CERN Trusted Certification Authority][/DC=ch/DC=cern/CN=CERN Root CA]>"
credDict = {
'DN': '/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch',
'extraCredentials': 'hosts',
'group': 'visitor',
'CN': 'volhcb12.cern.ch',
'x509Chain': x509Chain,
'username': 'anonymous',
'isLimitedProxy': False,
'properties': [FC_MANAGEMENT],
'isProxy': False}
isAdmin = False
proxyUser = 'anonymous'
proxyGroup = 'visitor'
# TESTS WERE DESIGNED WITH THIS CONFIGURATION
# DATABASE_CONFIG = { 'UserGroupManager' : 'UserAndGroupManagerDB',
# 'SEManager' : 'SEManagerDB',
# 'SecurityManager' : 'NoSecurityManager',
# 'DirectoryManager' : 'DirectoryLevelTree',
# 'FileManager' : 'FileManager',
# 'DirectoryMetadata' : 'DirectoryMetadata',
# 'FileMetadata' : 'FileMetadata',
# 'DatasetManager' : 'DatasetManager',
# 'UniqueGUID' : False,
# 'GlobalReadAccess' : True,
# 'LFNPFNConvention' : 'Strong',
# 'ResolvePFN' : True,
# 'DefaultUmask' : 0775,
# 'ValidFileStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'ValidReplicaStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'VisibleFileStatus' : ['AprioriGood'],
# 'VisibleReplicaStatus': ['AprioriGood'] }
DATABASE_CONFIG = {
'UserGroupManager': 'UserAndGroupManagerDB', # UserAndGroupManagerDB, UserAndGroupManagerCS
'SEManager': 'SEManagerDB', # SEManagerDB, SEManagerCS
# NoSecurityManager, DirectorySecurityManager, FullSecurityManager
'SecurityManager': 'NoSecurityManager',
# DirectorySimpleTree, DirectoryFlatTree, DirectoryNodeTree, DirectoryLevelTree
'DirectoryManager': 'DirectoryLevelTree',
'FileManager': 'FileManager', # FileManagerFlat, FileManager
'DirectoryMetadata': 'DirectoryMetadata',
'FileMetadata': 'FileMetadata',
'DatasetManager': 'DatasetManager',
'UniqueGUID': True,
'GlobalReadAccess': True,
'LFNPFNConvention': 'Strong',
'ResolvePFN': True,
'DefaultUmask': 0o775,
'ValidFileStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'ValidReplicaStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'VisibleFileStatus': ['AprioriGood'],
'VisibleReplicaStatus': ['AprioriGood']}
ALL_MANAGERS = {
"UserGroupManager": [
"UserAndGroupManagerDB", "UserAndGroupManagerCS"], "SEManager": [
"SEManagerDB", "SEManagerCS"], "SecurityManager": [
"NoSecurityManager", "DirectorySecurityManager", "FullSecurityManager"], "DirectoryManager": [
"DirectorySimpleTree", "DirectoryFlatTree", "DirectoryNodeTree", "DirectoryLevelTree"], "FileManager": [
"FileManagerFlat", "FileManager"], }
ALL_MANAGERS_NO_CS = {
"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": [
"NoSecurityManager",
"DirectorySecurityManager",
"FullSecurityManager"],
"DirectoryManager": [
"DirectorySimpleTree",
"DirectoryFlatTree",
"DirectoryNodeTree",
"DirectoryLevelTree"],
"FileManager": [
"FileManagerFlat",
"FileManager"],
}
DEFAULT_MANAGER = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["DirectorySecurityManagerWithDelete"],
"DirectoryManager": ["DirectoryClosure"],
"FileManager": ["FileManagerPs"],
}
DEFAULT_MANAGER_2 = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["NoSecurityManager"],
"DirectoryManager": ["DirectoryLevelTree"],
"FileManager": ["FileManager"],
}
MANAGER_TO_TEST = DEFAULT_MANAGER
class FileCatalogDBTestCase(unittest.TestCase):
""" Base class for the FileCatalogDB test cases
"""
def setUp(self):
self.db = FileCatalogDB()
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
self.db.setConfig(DATABASE_CONFIG)
def tearDown(self):
pass
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
class SECase (FileCatalogDBTestCase):
def test_seOperations(self):
"""Testing SE related operation"""
# create SE
ret = self.db.addSE(seName, credDict)
if isAdmin:
self.assertTrue(ret["OK"], "addSE failed when adding new SE: %s" % ret)
seId = ret["Value"]
# create it again
ret = self.db.addSE(seName, credDict)
self.assertEqual(ret["Value"], seId, "addSE failed when adding existing SE: %s" % ret)
else:
self.assertEqual(
ret["OK"],
False,
"addSE should fail when adding new SE as non admin: %s" %
ret)
# remove it
ret = self.db.deleteSE(seName, credDict)
self.assertEqual(ret["OK"], True if isAdmin else False, "deleteE failed %s" % ret)
class UserGroupCase(FileCatalogDBTestCase):
def test_userOperations(self):
"""Testing the user related operations"""
expectedRes = None
if isAdmin:
print "Running UserTest in admin mode"
expectedRes = True
else:
print "Running UserTest in non admin mode"
expectedRes = False
# Add the user
result = self.db.addUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "AddUser failed when adding new user: %s" % result)
# Add an existing user
result = self.db.addUser(testUser, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddUser failed when adding existing user: %s" %
result)
# Fetch the list of user
result = self.db.getUsers(credDict)
self.assertEqual(result['OK'], expectedRes, "getUsers failed: %s" % result)
if isAdmin:
# Check if our user is present
self.assertEqual(testUser in result['Value'], expectedRes, "getUsers failed: %s" % result)
# remove the user we created
result = self.db.deleteUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteUser failed: %s" % result)
def test_groupOperations(self):
"""Testing the group related operations"""
expectedRes = None
if isAdmin:
print "Running UserTest in admin mode"
expectedRes = True
else:
print "Running UserTest in non admin mode"
expectedRes = False
# Create new group
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "AddGroup failed when adding new user: %s" % result)
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddGroup failed when adding existing user: %s" %
result)
result = self.db.getGroups(credDict)
self.assertEqual(result['OK'], expectedRes, "getGroups failed: %s" % result)
if isAdmin:
self.assertEqual(testGroup in result['Value'], expectedRes)
result = self.db.deleteGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteGroup failed: %s" % result)
class FileCase(FileCatalogDBTestCase):
def test_fileOperations(self):
"""
Tests the File related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
result = self.db.exists(testFile, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile) should be the same lfn %s" % result)
result = self.db.exists({testFile: '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
result = self.db.exists({testFile: {'GUID': '1000', 'PFN': 'blabla'}}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
# In fact, we don't check if the GUID is correct...
result = self.db.exists({testFile: '1001'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1001) should be the same lfn %s" % result)
result = self.db.exists({testFile + '2': '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile + '2'),
testFile, "exists( testFile2 : 1000) should return testFile %s" % result)
# Re-adding the same file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with same param %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"addFile failed: it should be possible to add an existing lfn with same param %s" %
result)
# Adding same file with different param
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '1'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with different parem %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"addFile failed: it should not be possible to add an existing lfn with different param %s" %
result)
result = self.db.addFile({testFile + '2': {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result["OK"], "addFile failed when adding existing file %s" % result)
self.assertTrue(
testFile +
'2' in result["Value"]["Failed"],
"addFile failed: it should not be possible to add a new lfn with existing GUID %s" %
result)
##################################################################################
# Setting existing status of existing file
result = self.db.setFileStatus({testFile: "AprioriGood"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setFileStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting unexisting status of existing file
result = self.db.setFileStatus({testFile: "Happy"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting un-existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setFileStatus should have failed %s" %
result)
# Setting existing status of unexisting file
result = self.db.setFileStatus({nonExistingFile: "Trash"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of non-existing file %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setFileStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
##################################################################################
result = self.db.isFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "isFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"isFile : %s should be seen as a file %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(nonExistingFile,
result))
self.assertTrue(result["Value"]["Successful"][nonExistingFile] is False,
"isFile : %s should be seen as a file %s" % (nonExistingFile, result))
result = self.db.changePathOwner({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathGroup({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathGroup failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathMode({testFile: 0o44, nonExistingFile: 0o44}, credDict)
self.assertTrue(result["OK"], "changePathMode failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathMode : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileSize([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileSize failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileSize : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile],
123,
"getFileSize got incorrect file size %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileSize : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileMetadata([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileMetadata failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileMetadata : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile]["Owner"],
"toto",
"getFileMetadata got incorrect Owner %s" %
result)
self.assertEqual(
result["Value"]["Successful"][testFile]["Status"],
"AprioriGood",
"getFileMetadata got incorrect status %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileMetadata : %s should be in Failed %s" %
(nonExistingFile,
result))
# DOES NOT FOLLOW THE SUCCESSFUL/FAILED CONVENTION
# result = self.db.getFileDetails([testFile, nonExistingFile], credDict)
# self.assertTrue(result["OK"], "getFileDetails failed: %s" % result)
# self.assertTrue(
# testFile in result["Value"]["Successful"],
# "getFileDetails : %s should be in Successful %s" %
# (testFile,
# result))
# self.assertEqual(
# result["Value"]["Successful"][testFile]["Owner"],
# "toto",
# "getFileDetails got incorrect Owner %s" %
# result)
# self.assertTrue(
# nonExistingFile in result["Value"]["Failed"],
# "getFileDetails : %s should be in Failed %s" %
# (nonExistingFile,
# result))
# ADD SOMETHING ABOUT FILE ANCESTORS AND DESCENDENTS
result = self.db.getSEDump('testSE')
self.assertTrue(result['OK'], "Error when getting SE dump %s" % result)
self.assertEqual(result['Value'], ((testFile, '0', 123),),
"Did not get the expected SE Dump %s" % result['Value'])
result = self.db.removeFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"removeFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"removeFile : %s should be in True %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingFile],
"removeFile : %s should be in True %s" %
(nonExistingFile,
result))
class ReplicaCase(FileCatalogDBTestCase):
def test_replicaOperations(self):
"""
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Adding new replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding the same replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding replica of a non existing file
result = self.db.addReplica({nonExistingFile: {"PFN": "Idontexist", "SE": "otherSE"}}, credDict)
self.assertTrue(
result['OK'],
"addReplica failed when adding Replica to non existing Replica %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Failed"],
"addReplica for non existing file should go in Failed %s" %
result)
# Setting existing status of existing Replica
result = self.db.setReplicaStatus({testFile: {"Status": "Trash", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setReplicaStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting non existing status of existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "randomStatus", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting non-existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing File
result = self.db.setReplicaStatus(
{nonExistingFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(nonExistingFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "otherSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica but not visible %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting status of non-existing File but not visible
result = self.db.getReplicaStatus({nonExistingFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting status of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicaStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
# Getting replicas of existing File and non existing file, seeing all replicas
result = self.db.getReplicas([testFile, nonExistingFile], allStatus=True, credDict=credDict)
self.assertTrue(result["OK"], "getReplicas failed %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicas failed, %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile], {
"otherSE": "", "testSE": ""}, "getReplicas failed, %s should be in Successful %s" %
(testFile, result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicas failed, %s should be in Failed %s" %
(nonExistingFile,
result))
# removing master replica
result = self.db.removeReplica({testFile: {"SE": "testSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing master Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing master Replica %s" %
result)
# removing non existing replica of existing File
result = self.db.removeReplica({testFile: {"SE": "nonExistingSe2"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing non existing Replica %s" %
result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing new Replica %s" %
result)
# removing non existing replica of non existing file
result = self.db.removeReplica({nonExistingFile: {"SE": "nonExistingSe3"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing replica of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Successful"],
"removeReplica of non existing file, %s should be in Successful %s" %
(nonExistingFile,
result))
# removing last replica
result = self.db.removeReplica({testFile: {"SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing last Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing last Replica %s" %
result)
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
class DirectoryCase(FileCatalogDBTestCase):
def test_directoryOperations(self):
"""
Tests the Directory related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new directory
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result['OK'], "addDirectory failed when adding new directory %s" % result)
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Re-adding the same directory (CAUTION, different from addFile)
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result["OK"], "addDirectory failed when adding existing directory %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"addDirectory failed: it should be possible to add an existing lfn %s" %
result)
result = self.db.isDirectory([testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "isDirectory failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertTrue(
result["Value"]["Successful"][testDir],
"isDirectory : %s should be seen as a directory %s" %
(testDir,
result))
self.assertTrue(
nonExistingDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(nonExistingDir,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingDir] is False,
"isDirectory : %s should be seen as a directory %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, False, credDict)
self.assertTrue(result["OK"], "getDirectorySize failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, True, credDict)
self.assertTrue(result["OK"], "getDirectorySize (calc) failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize (calc): %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize (calc) : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.listDirectory([parentDir, testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "listDirectory failed: %s" % result)
self.assertTrue(
parentDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(result["Value"]["Successful"][parentDir]["SubDirs"].keys(), [testDir],
"listDir : incorrect content for %s (%s)" % (parentDir, result))
self.assertTrue(
testDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir]["Files"].keys(), [testFile.split("/")[-1]],
"listDir : incorrect content for %s (%s)" % (testDir, result))
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"listDirectory : %s should be in Failed %s" %
(nonExistingDir,
result))
# We do it two times to make sure that
# when updating something to the same value
# returns a success if it is allowed
for attempt in xrange(2):
print "Attempt %s" % (attempt + 1)
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict)
result = self.db.changePathOwner({parentDir: "toto"}, credDict)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Mode'),
0o775,
"testDir should not have changed %s" %
result2)
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Owner'),
proxyUser,
"testDir should not have changed %s" %
result2)
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
proxyGroup,
"testDir should not have changed %s" %
result2)
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], "changePathOwner : \
# %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
pass
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict, True)
result = self.db.changePathOwner({parentDir: "toto"}, credDict, True)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict, True)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
result3 = self.db.getFileMetadata(testFile, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
self.assertTrue(result3["OK"], "getFileMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Mode'), 0o777, "testDir should have mode %s %s" %
(0o777, result2))
self.assertEqual(
result3['Value'].get(
'Successful', {}).get(
testFile, {}).get('Mode'), 0o777, "testFile should have mode %s %s" %
(0o777, result3))
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Owner'), 'toto', "testDir should belong to %s %s" %
(proxyUser, result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('Owner'),
'toto',
"testFile should belong to %s %s" %
(proxyUser,
result3))
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
'toto',
"testDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('OwnerGroup'),
'toto',
"testFile should belong to %s %s" %
(proxyGroup,
result3))
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], \
# "changePathOwner : %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'Owner' ), \
# proxyUser, "testFile should not have changed %s" % result3 )
#
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testFile should not have changed %s" % result3 )
pass
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
pathParts = testDir.split('/')[1:]
startDir = '/'
pathToRemove = []
for part in pathParts:
startDir = os.path.join(startDir, part)
pathToRemove.append(startDir)
pathToRemove.reverse()
for toRemove in pathToRemove:
result = self.db.removeDirectory(toRemove, credDict)
self.assertTrue(result["OK"], "removeDirectory failed: %s" % result)
class DirectoryUsageCase (FileCatalogDBTestCase):
def getPhysicalSize(self, sizeDict, dirName, seName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
val = sizeDict[dirName]['PhysicalSize'][seName]
files = val['Files']
size = val['Size']
return (files, size)
def getLogicalSize(self, sizeDict, dirName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
files = sizeDict[dirName]['LogicalFiles']
size = sizeDict[dirName]['LogicalSize']
return (files, size)
def getAndCompareDirectorySize(self, dirList):
""" Fetch the directory size from the DirectoryUsage table
and calculate it, compare the results, and then return
the values
"""
retTable = self.db.getDirectorySize(dirList, True, False, credDict)
retCalc = self.db.getDirectorySize(dirList, True, True, credDict)
self.assertTrue(retTable["OK"])
self.assertTrue(retCalc["OK"])
succTable = retTable['Value']['Successful']
succCalc = retCalc['Value']['Successful']
# Since we have simple type, the == is recursive for dict :-)
retEquals = (succTable == succCalc)
self.assertTrue(retEquals, "Calc and table results different %s %s" % (succTable, succCalc))
return retTable
def test_directoryUsage(self):
"""Testing DirectoryUsage related operation"""
# create SE
# Only admin can run that
if not isAdmin:
return
d1 = '/sizeTest/d1'
d2 = '/sizeTest/d2'
f1 = d1 + '/f1'
f2 = d1 + '/f2'
f3 = d2 + '/f3'
f1Size = 3000000000
f2Size = 3000000001
f3Size = 3000000002
# f1Size = 1
# f2Size = 2
# f3Size = 5
for sen in ['se1', 'se2', 'se3']:
ret = self.db.addSE(sen, credDict)
self.assertTrue(ret["OK"])
for din in [d1, d2]:
ret = self.db.createDirectory(din, credDict)
self.assertTrue(ret["OK"])
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se2',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
ret = self.db.addReplica({f1: {"PFN": "f1se2", "SE": "se2"},
f2: {"PFN": "f1se3", "SE": "se3"}},
credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(
d1s2, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1s2, (2, f1Size + f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
ret = self.db.removeReplica({f2: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s2 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s2 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f3: {'PFN': 'f3se3',
'SE': 'se3',
'Size': f3Size,
'GUID': '1003',
'Checksum': '3'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeReplica({f1: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeReplica({f2: {"SE": "se3"},
f3: {"SE": "se3"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeFile([f2, f3], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
self.assertEqual(d2l, (0, 0), "Unexpected size %s, expected %s" % (d2l, (0, 0)))
# Removing Replicas and Files from the same directory
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se1',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se1"},
f2: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
ret = self.db.removeFile([f1, f2], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
# Try removing a replica from a non existing SE
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s2 = self.getPhysicalSize(val, d1, 'se2')
except KeyError:
d1s2 = (0, 0)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
if __name__ == '__main__':
managerTypes = MANAGER_TO_TEST.keys()
all_combinations = list(itertools.product(*MANAGER_TO_TEST.values()))
numberOfManager = len(managerTypes)
for setup in all_combinations:
print "Running with:"
print ("".join(["\t %s : %s\n" % (managerTypes[i], setup[i]) for i in xrange(numberOfManager)]))
for i in xrange(numberOfManager):
DATABASE_CONFIG[managerTypes[i]] = setup[i]
suite = unittest.defaultTestLoader.loadTestsFromTestCase(SECase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(UserGroupCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ReplicaCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryUsageCase))
# Then run without admin privilege:
isAdmin = False
if FC_MANAGEMENT in credDict['properties']:
credDict['properties'].remove(FC_MANAGEMENT)
print "Running test without admin privileges"
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# First run with admin privilege:
isAdmin = True
if FC_MANAGEMENT not in credDict['properties']:
credDict['properties'].append(FC_MANAGEMENT)
print "Running test with admin privileges"
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
| gpl-3.0 | -437,399,670,991,288,400 | -6,049,536,813,524,112,000 | 37.861549 | 118 | 0.580292 | false |
Eric89GXL/mne-python | mne/datasets/sleep_physionet/temazepam.py | 8 | 4230 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Joan Massich <[email protected]>
#
# License: BSD Style.
import numpy as np
from ...utils import verbose
from ._utils import _fetch_one, _data_path, TEMAZEPAM_SLEEP_RECORDS
from ._utils import _check_subjects
data_path = _data_path # expose _data_path(..) as data_path(..)
BASE_URL = 'https://physionet.org/physiobank/database/sleep-edfx/sleep-telemetry/' # noqa: E501
@verbose
def fetch_data(subjects, recording=[b'Placebo', 'temazepam'],
path=None, force_update=False,
update_path=None, base_url=BASE_URL, verbose=None):
"""Get paths to local copies of PhysioNet Polysomnography dataset files.
This will fetch data from the publicly available subjects from PhysioNet's
study of Temazepam effects on sleep [1]_. This corresponds to
a set of 22 subjects. Subjects had mild difficulty falling asleep
but were otherwise healthy.
See more details in the `physionet website
<https://physionet.org/physiobank/database/sleep-edfx/>`_.
Parameters
----------
subjects : list of int
The subjects to use. Can be in the range of 0-21 (inclusive).
path : None | str
Location of where to look for the PhysioNet data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist,
the "~/mne_data" directory is used. If the Polysomnography dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
paths : list
List of local data paths of the given type.
Notes
-----
For example, one could do:
>>> from mne.datasets import sleep_physionet
>>> sleep_physionet.temazepam.fetch_data(subjects=[1]) # doctest: +SKIP
This would download data for subject 0 if it isn't there already.
References
----------
.. [1] B Kemp, AH Zwinderman, B Tuk, HAC Kamphuisen, JJL Oberyé. Analysis
of a sleep-dependent neuronal feedback loop: the slow-wave
microcontinuity of the EEG. IEEE-BME 47(9):1185-1194 (2000).
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
Research Resource for Complex Physiologic Signals.
Circulation 101(23):e215-e220
See Also
--------
:func:`mne.datasets.sleep_physionet.age.fetch_data`
"""
records = np.loadtxt(TEMAZEPAM_SLEEP_RECORDS,
skiprows=1,
delimiter=',',
usecols=(0, 3, 6, 7, 8, 9),
dtype={'names': ('subject', 'record', 'hyp sha',
'psg sha', 'hyp fname', 'psg fname'),
'formats': ('<i2', '<S15', 'S40', 'S40',
'<S22', '<S16')}
)
_check_subjects(subjects, 22)
path = data_path(path=path, update_path=update_path)
params = [path, force_update, base_url]
fnames = []
for subject in subjects: # all the subjects are present at this point
for idx in np.where(records['subject'] == subject)[0]:
if records['record'][idx] == b'Placebo':
psg_fname = _fetch_one(records['psg fname'][idx].decode(),
records['psg sha'][idx].decode(),
*params)
hyp_fname = _fetch_one(records['hyp fname'][idx].decode(),
records['hyp sha'][idx].decode(),
*params)
fnames.append([psg_fname, hyp_fname])
return fnames
| bsd-3-clause | 7,652,548,257,144,214,000 | 5,216,466,351,628,650,000 | 38.896226 | 96 | 0.584062 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/virtual_network_peering_py3.py | 1 | 4752 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network. The remote virtual network can be in the same or different region
(preview). See here to register for the preview and learn more
(https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
:type remote_virtual_network:
~azure.mgmt.network.v2017_10_01.models.SubResource
:param remote_address_space: The reference of the remote virtual network
address space.
:type remote_address_space:
~azure.mgmt.network.v2017_10_01.models.AddressSpace
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkPeeringState
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'remote_address_space': {'key': 'properties.remoteAddressSpace', 'type': 'AddressSpace'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, allow_virtual_network_access: bool=None, allow_forwarded_traffic: bool=None, allow_gateway_transit: bool=None, use_remote_gateways: bool=None, remote_virtual_network=None, remote_address_space=None, peering_state=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkPeering, self).__init__(id=id, **kwargs)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.remote_address_space = remote_address_space
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| mit | -7,747,053,133,847,115,000 | 8,620,110,880,076,848,000 | 54.255814 | 339 | 0.681187 | false |
justnom/ansible-modules-core | cloud/openstack/os_image.py | 109 | 6129 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#TODO(mordred): we need to support "location"(v1) and "locations"(v2)
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_image
short_description: Add/Delete images from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove images from the OpenStack Image Repository
options:
name:
description:
- Name that has to be given to the image
required: true
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space required to deploy this image
required: false
default: None
min_ram:
description:
- The minimum ram required to deploy this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
required: false
default: 'yes'
filename:
description:
- The path to the file which has to be uploaded
required: false
default: None
ramdisk:
descrption:
- The name of an existing ramdisk image that will be associated with this image
required: false
default: None
kernel:
descrption:
- The name of an existing kernel image that will be associated with this image
required: false
default: None
properties:
description:
- Additional properties to be associated with this image
required: false
default: {}
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
- os_image:
auth:
auth_url: http://localhost/auth/v2.0
username: admin
password: passme
project_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
filename: cirros-0.3.0-x86_64-disk.img
kernel: cirros-vmlinuz
ramdisk: cirros-initrd
properties:
cpu_arch: x86_64
distro: ubuntu
'''
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']),
owner = dict(default=None),
min_disk = dict(default=None),
min_ram = dict(default=None),
is_public = dict(default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(default={}),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = False
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
image = cloud.create_image(
name=module.params['name'],
filename=module.params['filename'],
disk_format=module.params['disk_format'],
container_format=module.params['container_format'],
wait=module.params['wait'],
timeout=module.params['timeout']
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed, image=image, id=image.id)
cloud.update_image_properties(
image=image,
kernel=module.params['kernel'],
ramdisk=module.params['ramdisk'],
**module.params['properties'])
image = cloud.get_image(name_or_id=image.id)
module.exit_json(changed=changed, image=image, id=image.id)
elif module.params['state'] == 'absent':
if not image:
changed = False
else:
cloud.delete_image(
name_or_id=module.params['name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
| gpl-3.0 | -1,749,540,885,884,135,200 | 2,764,538,291,957,098,000 | 31.601064 | 126 | 0.611682 | false |
jarvys/django-1.7-jdb | django/contrib/gis/geos/prototypes/errcheck.py | 66 | 3563 | """
Error checking functions for GEOS ctypes prototype functions.
"""
import os
from ctypes import c_void_p, string_at, CDLL
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import GEOS_VERSION
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# Getting the `free` routine used to free the memory allocated for
# string pointers returned by GEOS.
if GEOS_VERSION >= (3, 1, 1):
# In versions 3.1.1 and above, `GEOSFree` was added to the C API
# because `free` isn't always available on all platforms.
free = GEOSFunc('GEOSFree')
free.argtypes = [c_void_p]
free.restype = None
else:
# Getting the `free` routine from the C library of the platform.
if os.name == 'nt':
# On NT, use the MS C library.
libc = CDLL('msvcrt')
else:
# On POSIX platforms C library is obtained by passing None into `CDLL`.
libc = CDLL(None)
free = libc.free
### ctypes error checking routines ###
def last_arg_byref(args):
"Returns the last C argument's value by reference."
return args[-1]._obj.value
def check_dbl(result, func, cargs):
"Checks the status code and returns the double value passed in by reference."
# Checking the status code
if result != 1:
return None
# Double passed in by reference, return its value.
return last_arg_byref(cargs)
def check_geom(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__)
return result
def check_minus_one(result, func, cargs):
"Error checking on routines that should not return -1."
if result == -1:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
def check_predicate(result, func, cargs):
"Error checking for unary/binary predicate functions."
val = ord(result) # getting the ordinal from the character
if val == 1:
return True
elif val == 0:
return False
else:
raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__)
def check_sized_string(result, func, cargs):
"""
Error checking for routines that return explicitly sized strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__)
# A c_size_t object is passed in by reference for the second
# argument on these routines, and its needed to determine the
# correct size.
s = string_at(result, last_arg_byref(cargs))
# Freeing the memory allocated within GEOS
free(result)
return s
def check_string(result, func, cargs):
"""
Error checking for routines that return strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__)
# Getting the string value at the pointer address.
s = string_at(result)
# Freeing the memory allocated within GEOS
free(result)
return s
def check_zero(result, func, cargs):
"Error checking on routines that should not return 0."
if result == 0:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
| bsd-3-clause | -3,885,777,574,380,108,300 | 8,333,320,185,307,203,000 | 32.299065 | 118 | 0.676958 | false |
cynngah/uofthacksIV | generate-jobs/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py | 395 | 25647 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registry."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
| mit | 6,455,744,009,499,268,000 | -1,311,314,460,228,021,000 | 32.701708 | 90 | 0.614653 | false |
NelleV/pyconfr-test | symposion_project/settings.py | 1 | 6854 | # -*- coding: utf-8 -*-
# Django settings for account project
import os.path
import posixpath
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("Your Name", "[email protected]"),
]
MANAGERS = ADMINS
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3", # Add "postgresql_psycopg2", "postgresql", "mysql", "sqlite3" or "oracle".
"NAME": "dev.db", # Or path to database file if using sqlite3.
"USER": "", # Not used with sqlite3.
"PASSWORD": "", # Not used with sqlite3.
"HOST": "", # Set to empty string for localhost. Not used with sqlite3.
"PORT": "", # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/site_media/static/"
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PACKAGE_ROOT, "static"),
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don't share it with anybody.
SECRET_KEY = "8*br)9@fs!4nzg-imfrsst&oa2udy6z-fqtdk0*e5c1=wn)(t3"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.transaction.TransactionMiddleware",
"reversion.middleware.RevisionMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "symposion_project.urls"
TEMPLATE_DIRS = [
os.path.join(PACKAGE_ROOT, "templates"),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"pinax_utils.context_processors.settings",
"account.context_processors.account",
"symposion.reviews.context_processors.reviews",
]
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize",
# theme
"pinax_theme_bootstrap_account",
"pinax_theme_bootstrap",
"django_forms_bootstrap",
# external
"debug_toolbar",
"mailer",
"timezones",
"metron",
"markitup",
"taggit",
"reversion",
"easy_thumbnails",
"sitetree",
"account",
# symposion
"symposion",
"symposion.sponsorship",
"symposion.conference",
"symposion.cms",
"symposion.boxes",
"symposion.proposals",
"symposion.speakers",
"symposion.teams",
"symposion.reviews",
"symposion.schedule",
# project
"symposion_project.proposals",
]
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
EMAIL_BACKEND = "mailer.backend.DbBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_USE_OPENID = False
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
ACCOUNT_SIGNUP_REDIRECT_URL = "dashboard"
ACCOUNT_LOGIN_REDIRECT_URL = "dashboard"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_USER_DISPLAY = lambda user: user.email
AUTHENTICATION_BACKENDS = [
# Permissions Backends
"symposion.teams.backends.TeamPermissionsBackend",
# Auth backends
"account.auth_backends.EmailAuthenticationBackend",
]
LOGIN_URL = "/account/login/" # @@@ any way this can be a url name?
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
MARKITUP_FILTER = ("markdown.markdown", {"safe_mode": True})
MARKITUP_SET = "markitup/sets/markdown"
MARKITUP_SKIN = "markitup/skins/simple"
CONFERENCE_ID = 1
SYMPOSION_PAGE_REGEX = r"(([\w-]{1,})(/[\w-]{1,})*)/"
PROPOSAL_FORMS = {
"tutorial": "symposion_project.proposals.forms.TutorialProposalForm",
"talk": "symposion_project.proposals.forms.TalkProposalForm",
"poster": "symposion_project.proposals.forms.PosterProposalForm",
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
| bsd-3-clause | 6,970,926,552,598,062,000 | -577,596,248,574,788,900 | 29.327434 | 122 | 0.695506 | false |
muffinresearch/olympia | apps/zadmin/tests/test_tasks.py | 10 | 6460 | # -*- coding: utf-8 -*-
from django.conf import settings
import mock
import urlparse
import amo
import amo.tests
from addons.models import Addon
from applications.models import AppVersion
from files.utils import make_xpi
from versions.compare import version_int
from zadmin import tasks
def RequestMock(response='', headers=None):
"""Mocks the request objects of urllib2 and requests modules."""
res = mock.Mock()
res.read.return_value = response
res.contents = response
res.text = response
res.iter_lines.side_effect = lambda chunk_size=1: (response.split('\n')
.__iter__())
res.iter_content.side_effect = lambda chunk_size=1: (response,).__iter__()
def lines():
return [l + '\n' for l in response.split('\n')[:-1]]
res.readlines.side_effect = lines
res.iter_lines.side_effect = lambda: lines().__iter__()
res.headers = headers or {}
res.headers['content-length'] = len(response)
return res
def make_langpack(version):
versions = (version, '%s.*' % version)
for version in versions:
AppVersion.objects.get_or_create(application=amo.FIREFOX.id,
version=version,
version_int=version_int(version))
return make_xpi({
'install.rdf': """<?xml version="1.0"?>
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest"
em:id="[email protected]"
em:name="Foo Language Pack"
em:version="{0}"
em:type="8"
em:creator="mozilla.org">
<em:targetApplication>
<Description>
<em:id>{{ec8030f7-c20a-464f-9b0e-13a3a9e97384}}</em:id>
<em:minVersion>{0}</em:minVersion>
<em:maxVersion>{1}</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
""".format(*versions)
}).read()
class TestLangpackFetcher(amo.tests.TestCase):
fixtures = ['zadmin/users']
LISTING = 'pretend-this-is-a-sha256-sum win32/xpi/de-DE.xpi\n'
def setUp(self):
super(TestLangpackFetcher, self).setUp()
request_patch = mock.patch('zadmin.tasks.requests.get')
self.mock_request = request_patch.start()
self.addCleanup(request_patch.stop)
def get_langpacks(self):
return (Addon.objects.no_cache()
.filter(addonuser__user__email=settings.LANGPACK_OWNER_EMAIL,
type=amo.ADDON_LPAPP))
def fetch_langpacks(self, version):
path = settings.LANGPACK_PATH_DEFAULT % ('firefox', version)
base_url = urlparse.urljoin(settings.LANGPACK_DOWNLOAD_BASE, path)
list_url = urlparse.urljoin(base_url, settings.LANGPACK_MANIFEST_PATH)
langpack_url = urlparse.urljoin(base_url, 'de-DE.xpi')
responses = {list_url: RequestMock(self.LISTING),
langpack_url: RequestMock(make_langpack(version))}
self.mock_request.reset_mock()
self.mock_request.side_effect = lambda url, **kw: responses.get(url)
tasks.fetch_langpacks(path)
self.mock_request.assert_has_calls(
[mock.call(list_url, verify=settings.CA_CERT_BUNDLE_PATH),
mock.call(langpack_url, verify=settings.CA_CERT_BUNDLE_PATH)])
def test_fetch_new_langpack(self):
assert self.get_langpacks().count() == 0
self.fetch_langpacks(amo.FIREFOX.latest_version)
langpacks = self.get_langpacks()
assert langpacks.count() == 1
a = langpacks[0]
assert a.default_locale == 'de-DE'
assert a.target_locale == 'de-DE'
assert a._current_version
assert a.current_version.version == amo.FIREFOX.latest_version
assert a.status == amo.STATUS_PUBLIC
assert a.current_version.files.all()[0].status == amo.STATUS_PUBLIC
def test_fetch_updated_langpack(self):
versions = ('16.0', '17.0')
self.fetch_langpacks(versions[0])
assert self.get_langpacks().count() == 1
self.fetch_langpacks(versions[1])
langpacks = self.get_langpacks()
assert langpacks.count() == 1
a = langpacks[0]
assert a.versions.count() == 2
v = a.versions.get(version=versions[1])
assert v.files.all()[0].status == amo.STATUS_PUBLIC
def test_fetch_duplicate_langpack(self):
self.fetch_langpacks(amo.FIREFOX.latest_version)
langpacks = self.get_langpacks()
assert langpacks.count() == 1
assert langpacks[0].versions.count() == 1
assert (langpacks[0].versions.all()[0].version ==
amo.FIREFOX.latest_version)
self.fetch_langpacks(amo.FIREFOX.latest_version)
langpacks = self.get_langpacks()
assert langpacks.count() == 1
assert langpacks[0].versions.count() == 1
assert (langpacks[0].versions.all()[0].version ==
amo.FIREFOX.latest_version)
def test_fetch_updated_langpack_beta(self):
versions = ('16.0', '16.0a2')
self.fetch_langpacks(versions[0])
assert self.get_langpacks().count() == 1
self.fetch_langpacks(versions[1])
langpacks = self.get_langpacks()
assert langpacks.count() == 1
a = langpacks[0]
assert a.versions.count() == 2
v = a.versions.get(version=versions[1])
assert v.files.all()[0].status == amo.STATUS_BETA
def test_fetch_new_langpack_beta(self):
self.fetch_langpacks('16.0a2')
assert self.get_langpacks().count() == 0
def test_fetch_langpack_wrong_owner(self):
Addon.objects.create(guid='[email protected]',
type=amo.ADDON_LPAPP)
self.fetch_langpacks(amo.FIREFOX.latest_version)
assert self.get_langpacks().count() == 0
def test_fetch_langpack_invalid_path_fails(self):
self.mock_request.return_value = None
with self.assertRaises(ValueError) as exc:
tasks.fetch_langpacks('../foo/')
assert str(exc.exception) == 'Invalid path'
| bsd-3-clause | 6,212,566,302,557,772,000 | -3,166,669,616,601,644,500 | 32.298969 | 78 | 0.590712 | false |
javipalanca/Django-facebook | django_facebook/auth_urls.py | 24 | 2822 | """
URL patterns for the views included in ``django.contrib.auth``.
Including these URLs (via the ``include()`` directive) will set up the
following patterns based at whatever URL prefix they are included
under:
* User login at ``login/``.
* User logout at ``logout/``.
* The two-step password change at ``password/change/`` and
``password/change/done/``.
* The four-step password reset at ``password/reset/``,
``password/reset/confirm/``, ``password/reset/complete/`` and
``password/reset/done/``.
The default registration backend already has an ``include()`` for
these URLs, so under the default setup it is not necessary to manually
include these views. Other backends may or may not include them;
consult a specific backend's documentation for details.
"""
try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
from django.contrib.auth import views as auth_views
from django_facebook import registration_views
from django_facebook.utils import replication_safe
urlpatterns = patterns('',
url(r'^login/$',
replication_safe(auth_views.login),
{'template_name': 'registration/login.html'},
name='auth_login'),
url(r'^logout/$',
replication_safe(auth_views.logout),
{'template_name': 'registration/logout.html'},
name='auth_logout'),
url(r'^password/change/$',
auth_views.password_change,
name='auth_password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='auth_password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
name='auth_password_reset'),
url(
r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='auth_password_reset_complete'),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
name='auth_password_reset_done'),
url(r'^register/$',
registration_views.register,
name='registration_register'),
)
| bsd-3-clause | 8,337,565,977,467,578,000 | 9,081,918,729,588,967,000 | 41.757576 | 95 | 0.53012 | false |
atlashealth/ansible | lib/ansible/plugins/connections/jail.py | 131 | 7291 | # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# and chroot.py (c) 2013, Maykel Moya <[email protected]>
# (c) 2013, Michael Scherer <[email protected]>
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import traceback
import os
import shlex
import subprocess
from ansible import errors
from ansible.utils.unicode import to_bytes
from ansible.callbacks import vvv
import ansible.constants as C
BUFSIZE = 65536
class Connection(object):
''' Local BSD Jail based connections '''
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise errors.AnsibleError("%s command not found in PATH") % executable
return cmd
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.split()
def get_jail_path(self):
p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# remove \n
return stdout[:-1]
def __init__(self, runner, host, port, *args, **kwargs):
self.jail = host
self.runner = runner
self.host = host
self.has_pipelining = False
self.become_methods_supported=C.BECOME_METHODS
if os.geteuid() != 0:
raise errors.AnsibleError("jail connection requires running as root")
self.jls_cmd = self._search_executable('jls')
self.jexec_cmd = self._search_executable('jexec')
if not self.jail in self.list_jails():
raise errors.AnsibleError("incorrect jail name %s" % self.jail)
self.host = host
# port is unused, since this is local
self.port = port
def connect(self, port=None):
''' connect to the jail; nothing to do here '''
vvv("THIS IS A LOCAL JAIL DIR", host=self.jail)
return self
# a modifier
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
else:
# Prev to python2.7.3, shlex couldn't handle unicode type strings
cmd = to_bytes(cmd)
cmd = shlex.split(cmd)
local_cmd = [self.jexec_cmd, self.jail]
local_cmd += cmd
return local_cmd
def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE):
''' run a command on the jail. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.jail)
p = subprocess.Popen(local_cmd, shell=False,
cwd=self.runner.basedir,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the jail '''
p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to jail '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file)
except OSError:
raise errors.AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise errors.AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from jail to local '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None)
except OSError:
raise errors.AnsibleError("jail connection requires dd command in the jail")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
pass
| gpl-3.0 | 5,874,018,582,023,953,000 | 1,567,497,502,552,637,700 | 38.625 | 152 | 0.602112 | false |
vinayvenu/orthanc | Resources/Samples/Python/ChangesLoop.py | 9 | 2347 | #!/usr/bin/python
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2015 Sebastien Jodogne, Medical Physics
# Department, University Hospital of Liege, Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import RestToolbox
##
## Print help message
##
if len(sys.argv) != 3:
print("""
Sample script that continuously monitors the arrival of new DICOM
images into Orthanc (through the Changes API).
Usage: %s [hostname] [HTTP port]
For instance: %s localhost 8042
""" % (sys.argv[0], sys.argv[0]))
exit(-1)
URL = 'http://%s:%d' % (sys.argv[1], int(sys.argv[2]))
##
## The following function is called each time a new instance is
## received.
##
def NewInstanceReceived(path):
global URL
patientName = RestToolbox.DoGet(URL + path + '/content/PatientName')
# Remove the possible trailing characters due to DICOM padding
patientName = patientName.strip()
print('New instance received for patient "%s": "%s"' % (patientName, path))
##
## Main loop that listens to the changes API.
##
current = 0
while True:
r = RestToolbox.DoGet(URL + '/changes', {
'since' : current,
'limit' : 4 # Retrieve at most 4 changes at once
})
for change in r['Changes']:
# We are only interested interested in the arrival of new instances
if change['ChangeType'] == 'NewInstance':
# Call the callback function
path = change['Path']
NewInstanceReceived(path)
# Delete the instance once it has been discovered
RestToolbox.DoDelete(URL + path)
current = r['Last']
if r['Done']:
print('Everything has been processed: Waiting...')
time.sleep(1)
| gpl-3.0 | 4,523,240,395,406,744,600 | 3,507,609,663,967,238,000 | 26.290698 | 79 | 0.672348 | false |
akalipetis/django-rest-framework | rest_framework/permissions.py | 71 | 6444 | """
Provides a set of pluggable permission policies.
"""
from __future__ import unicode_literals
from django.http import Http404
from rest_framework.compat import get_model_name
SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')
class BasePermission(object):
"""
A base class from which all permission classes should inherit.
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
def has_object_permission(self, request, view, obj):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
class AllowAny(BasePermission):
"""
Allow any access.
This isn't strictly required, since you could use an empty
permission_classes list, but it's useful because it makes the intention
more explicit.
"""
def has_permission(self, request, view):
return True
class IsAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_authenticated()
class IsAdminUser(BasePermission):
"""
Allows access only to admin users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_staff
class IsAuthenticatedOrReadOnly(BasePermission):
"""
The request is authenticated as a user, or is a read-only request.
"""
def has_permission(self, request, view):
return (
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated()
)
class DjangoModelPermissions(BasePermission):
"""
The request is authenticated using `django.contrib.auth` permissions.
See: https://docs.djangoproject.com/en/dev/topics/auth/#permissions
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the model.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
# Map methods into required permission codes.
# Override this if you need to also provide 'view' permissions,
# or if you want to provide custom permission codes.
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
authenticated_users_only = True
def get_required_permissions(self, method, model_cls):
"""
Given a model and an HTTP method, return the list of permission
codes that the user is required to have.
"""
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
return [perm % kwargs for perm in self.perms_map[method]]
def has_permission(self, request, view):
# Workaround to ensure DjangoModelPermissions are not applied
# to the root view when using DefaultRouter.
if getattr(view, '_ignore_model_permissions', False):
return True
try:
queryset = view.get_queryset()
except AttributeError:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoModelPermissions on a view that '
'does not have `.queryset` property or overrides the '
'`.get_queryset()` method.')
perms = self.get_required_permissions(request.method, queryset.model)
return (
request.user and
(request.user.is_authenticated() or not self.authenticated_users_only) and
request.user.has_perms(perms)
)
class DjangoModelPermissionsOrAnonReadOnly(DjangoModelPermissions):
"""
Similar to DjangoModelPermissions, except that anonymous users are
allowed read-only access.
"""
authenticated_users_only = False
class DjangoObjectPermissions(DjangoModelPermissions):
"""
The request is authenticated using Django's object-level permissions.
It requires an object-permissions-enabled backend, such as Django Guardian.
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the object using .has_perms.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
def get_required_object_permissions(self, method, model_cls):
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
return [perm % kwargs for perm in self.perms_map[method]]
def has_object_permission(self, request, view, obj):
try:
queryset = view.get_queryset()
except AttributeError:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoObjectPermissions on a view that '
'does not have `.queryset` property or overrides the '
'`.get_queryset()` method.')
model_cls = queryset.model
user = request.user
perms = self.get_required_object_permissions(request.method, model_cls)
if not user.has_perms(perms, obj):
# If the user does not have permissions we need to determine if
# they have read permissions to see 403, or not, and simply see
# a 404 response.
if request.method in SAFE_METHODS:
# Read permissions already checked and failed, no need
# to make another lookup.
raise Http404
read_perms = self.get_required_object_permissions('GET', model_cls)
if not user.has_perms(read_perms, obj):
raise Http404
# Has read permissions.
return False
return True
| bsd-2-clause | 8,006,884,108,197,648,000 | -5,523,236,822,141,919,000 | 30.281553 | 86 | 0.619957 | false |
itmanagerro/tresting | contrib/devtools/symbol-check.py | 52 | 6191 | #!/usr/bin/python2
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# bitcoin-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
exit(retval)
| mit | -7,513,871,933,820,243,000 | 5,487,489,218,973,526,000 | 36.75 | 142 | 0.641415 | false |
mathdd/numpy | numpy/core/tests/test_umath.py | 29 | 69197 | from __future__ import division, absolute_import, print_function
import sys
import platform
import warnings
from numpy.testing.utils import _gen_alignment_data
import numpy.core.umath as ncu
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
dec, assert_allclose, assert_no_warnings
)
def on_powerpc():
""" True if we are running on a Power PC platform."""
return platform.processor() == 'powerpc' or \
platform.machine().startswith('ppc')
class _FilterInvalids(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
class TestConstants(TestCase):
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
def test_e(self):
assert_allclose(ncu.e, 2.718281828459045, 1e-15)
def test_euler_gamma(self):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
class TestOut(TestCase):
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
o = np.empty(())
r = np.add(a, 2, o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=(o,), subok=subok)
assert_(r is o)
d = np.array(5.7)
o1 = np.empty(())
o2 = np.empty((), dtype=np.int32)
r1, r2 = np.frexp(d, o1, None, subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, None, o2, subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, o1, o2, subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
r1, r2 = np.frexp(d, out=o1, subok=subok)
assert_(r1 is o1)
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.add, a, 2, o, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, None, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
assert_raises(TypeError, np.add, a, 2, [], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
o.flags.writeable = False
assert_raises(ValueError, np.add, a, 2, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
def test_out_wrap_subok(self):
class ArrayWrap(np.ndarray):
__array_priority__ = 10
def __new__(cls, arr):
return np.asarray(arr).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
for subok in (True, False):
a = ArrayWrap([0.5])
r = np.add(a, 2, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=(None,), subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
d = ArrayWrap([5.7])
o1 = np.empty((1,))
o2 = np.empty((1,), dtype=np.int32)
r1, r2 = np.frexp(d, o1, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, o1, None, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, None, o2, subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
r1, r2 = np.frexp(d, out=o1, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
assert_(w[0].category is DeprecationWarning)
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
with np.errstate(invalid="ignore", divide="ignore"):
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2, x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestCbrt(TestCase):
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
def test_cbrt(self):
x = np.array([1., 2., -3., np.inf, -np.inf])
assert_almost_equal(np.cbrt(x**3), x)
assert_(np.isnan(np.cbrt(np.nan)))
assert_equal(np.cbrt(np.inf), np.inf)
assert_equal(np.cbrt(-np.inf), -np.inf)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
for out, inp, msg in _gen_alignment_data(dtype=np.float32,
type='unary',
max_size=11):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
for out, inp, msg in _gen_alignment_data(dtype=np.float64,
type='unary',
max_size=7):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
z = np.array([z], dtype=np.complex_)
with np.errstate(invalid="ignore"):
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cnan = np.array([complex(np.nan, np.nan)])
# FIXME cinf not tested.
#cinf = np.array([complex(np.inf, 0)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
with np.errstate(invalid="ignore"):
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x = np.array([1, 2, 3], np.int16)
assert_((x**2.00001).dtype is (x**2.0).dtype)
# Check that the fast path ignores 1-element not 0-d arrays
res = x ** np.array([[[2]]])
assert_equal(res.shape, (1, 1, 3))
class TestLog2(TestCase):
def test_log2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
def test_log2_ints(self):
# a good log2 implementation should provide this,
# might fail on OS with bad libm
for i in range(1, 65):
v = np.log2(2.**i)
assert_equal(v, float(i), err_msg='at exponent %d' % i)
def test_log2_special(self):
assert_equal(np.log2(1.), 0.)
assert_equal(np.log2(np.inf), np.inf)
assert_(np.isnan(np.log2(np.nan)))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.log2(-1.)))
assert_(np.isnan(np.log2(-np.inf)))
assert_equal(np.log2(0.), -np.inf)
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
assert_(w[2].category is RuntimeWarning)
class TestExp2(TestCase):
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
def test_logaddexp2_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
class TestLog(TestCase):
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
def test_special(self):
with np.errstate(invalid="ignore", divide="ignore"):
assert_equal(ncu.log1p(np.nan), np.nan)
assert_equal(ncu.log1p(np.inf), np.inf)
assert_equal(ncu.log1p(-1.), -np.inf)
assert_equal(ncu.log1p(-2.), np.nan)
assert_equal(ncu.log1p(-np.inf), np.nan)
class TestExpm1(TestCase):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
def test_special(self):
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(0.), 0.)
assert_equal(ncu.expm1(-0.), -0.)
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(-np.inf), -1.)
class TestHypot(TestCase, object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def assert_hypot_isnan(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isnan(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
def assert_hypot_isinf(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isinf(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
class TestHypotSpecialValues(TestCase):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs2(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
assert_hypot_isinf(np.inf, np.inf)
assert_hypot_isinf(np.inf, 23.0)
def test_no_fpe(self):
assert_no_warnings(ncu.hypot, np.inf, 0)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues(TestCase):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
def test_ldexp_overflow(self):
# silence warning emitted on overflow
with np.errstate(over="ignore"):
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1, 2j]), 1)
assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), np.object)
y = 1.0
z = np.array(float('nan'), np.object)
assert_(np.maximum(x, y) == 1.0)
assert_(np.maximum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1, 2j]), 2j)
assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), np.object)
y = 1.0
z = np.array(float('nan'), np.object)
assert_(np.minimum(x, y) == 1.0)
assert_(np.minimum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1, 2j]), 1)
assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1, 2j]), 2j)
assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestBool(TestCase):
def test_truth_table_logical(self):
# 2, 3 and 4 serves as true values
input1 = [0, 0, 3, 2]
input2 = [0, 4, 0, 2]
typecodes = (np.typecodes['AllFloat']
+ np.typecodes['AllInteger']
+ '?') # boolean
for dtype in map(np.dtype, typecodes):
arg1 = np.asarray(input1, dtype=dtype)
arg2 = np.asarray(input2, dtype=dtype)
# OR
out = [False, True, True, True]
for func in (np.logical_or, np.maximum):
assert_equal(func(arg1, arg2).astype(bool), out)
# AND
out = [False, False, False, True]
for func in (np.logical_and, np.minimum):
assert_equal(func(arg1, arg2).astype(bool), out)
# XOR
out = [False, True, True, False]
for func in (np.logical_xor, np.not_equal):
assert_equal(func(arg1, arg2).astype(bool), out)
def test_truth_table_bitwise(self):
arg1 = [False, False, True, True]
arg2 = [False, True, False, True]
out = [False, True, True, True]
assert_equal(np.bitwise_or(arg1, arg2), out)
out = [False, False, False, True]
assert_equal(np.bitwise_and(arg1, arg2), out)
out = [False, True, True, False]
assert_equal(np.bitwise_xor(arg1, arg2), out)
class TestInt(TestCase):
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
o = np.ones(10 * 2, dtype=np.bool)
tgt = o.copy()
tgt[::2] = False
os = o[::2]
assert_array_equal(np.logical_not(x, out=os), False)
assert_array_equal(o, tgt)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
class TestMinMax(TestCase):
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
for dt, sz in [(np.float32, 15), (np.float64, 7)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
for i in range(inp.size):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: '%r\n%s' % (inp, msg)
assert_(np.isnan(inp.max()), msg=emsg)
assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
inp[i] = -1e10
assert_equal(inp.min(), -1e10, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
class TestAbsoluteNegative(TestCase):
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
self.assertTrue((out >= 0).all())
tgt = [-1*(i) for i in inp]
np.negative(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
# will throw invalid flag depending on compiler optimizations
with np.errstate(invalid='ignore'):
for v in [np.nan, -np.inf, np.inf]:
for i in range(inp.size):
d = np.arange(inp.size, dtype=dt)
inp[:] = -d
inp[i] = v
d[i] = -v if v == -np.inf else v
assert_array_equal(np.abs(inp), d, err_msg=msg)
np.abs(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
assert_array_equal(-inp, -1*inp, err_msg=msg)
np.negative(inp, out=out)
assert_array_equal(out, -1*inp, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(np.abs(d), d)
assert_equal(np.negative(d), -d)
np.negative(d, out=d)
np.negative(np.ones_like(d), out=d)
np.abs(d, out=d)
np.abs(np.ones_like(d), out=d)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x, x)) is np.ndarray)
self.assertTrue(type(f(x, a)) is A)
self.assertTrue(type(f(x, b)) is B)
self.assertTrue(type(f(x, c)) is C)
self.assertTrue(type(f(a, x)) is A)
self.assertTrue(type(f(b, x)) is B)
self.assertTrue(type(f(c, x)) is C)
self.assertTrue(type(f(a, a)) is A)
self.assertTrue(type(f(a, b)) is B)
self.assertTrue(type(f(b, a)) is B)
self.assertTrue(type(f(b, b)) is B)
self.assertTrue(type(f(b, c)) is C)
self.assertTrue(type(f(c, b)) is C)
self.assertTrue(type(f(c, c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
def test_ufunc_override(self):
class A(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return self, func, method, pos, inputs, kwargs
a = A()
b = np.matrix([1])
res0 = np.multiply(a, b)
res1 = np.dot(a, b)
# self
assert_equal(res0[0], a)
assert_equal(res1[0], a)
assert_equal(res0[1], np.multiply)
assert_equal(res1[1], np.dot)
assert_equal(res0[2], '__call__')
assert_equal(res1[2], '__call__')
assert_equal(res0[3], 0)
assert_equal(res1[3], 0)
assert_equal(res0[4], (a, b))
assert_equal(res1[4], (a, b))
assert_equal(res0[5], {})
assert_equal(res1[5], {})
def test_ufunc_override_mro(self):
# Some multi arg functions for testing.
def tres_mul(a, b, c):
return a * b * c
def quatro_mul(a, b, c, d):
return a * b * c * d
# Make these into ufuncs.
three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
class A(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return "A"
class ASub(A):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return "ASub"
class B(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return "B"
class C(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return NotImplemented
class CSub(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
a_sub = ASub()
b = B()
c = C()
c_sub = CSub()
# Standard
res = np.multiply(a, a_sub)
assert_equal(res, "ASub")
res = np.multiply(a_sub, b)
assert_equal(res, "ASub")
# With 1 NotImplemented
res = np.multiply(c, a)
assert_equal(res, "A")
# Both NotImplemented.
assert_raises(TypeError, np.multiply, c, c_sub)
assert_raises(TypeError, np.multiply, c_sub, c)
assert_raises(TypeError, np.multiply, 2, c)
# Ternary testing.
assert_equal(three_mul_ufunc(a, 1, 2), "A")
assert_equal(three_mul_ufunc(1, a, 2), "A")
assert_equal(three_mul_ufunc(1, 2, a), "A")
assert_equal(three_mul_ufunc(a, a, 6), "A")
assert_equal(three_mul_ufunc(a, 2, a), "A")
assert_equal(three_mul_ufunc(a, 2, b), "A")
assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
assert_equal(three_mul_ufunc(a, b, c), "A")
assert_equal(three_mul_ufunc(a, b, c_sub), "A")
assert_equal(three_mul_ufunc(1, 2, b), "B")
assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)
# Quaternary testing.
assert_equal(four_mul_ufunc(a, 1, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, 3), "A")
assert_equal(four_mul_ufunc(1, 1, a, 3), "A")
assert_equal(four_mul_ufunc(1, 1, 2, a), "A")
assert_equal(four_mul_ufunc(a, b, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, b), "A")
assert_equal(four_mul_ufunc(b, 1, a, 3), "B")
assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c)
def test_ufunc_override_methods(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return self, ufunc, method, pos, inputs, kwargs
# __call__
a = A()
res = np.multiply.__call__(1, a, foo='bar', answer=42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], '__call__')
assert_equal(res[3], 1)
assert_equal(res[4], (1, a))
assert_equal(res[5], {'foo': 'bar', 'answer': 42})
# reduce, positional args
res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], 0)
assert_equal(res[4], (a,))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'keepdims': 'keep0',
'axis': 'axis0'})
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
keepdims='keep0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], 0)
assert_equal(res[4], (a,))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'keepdims': 'keep0',
'axis': 'axis0'})
# accumulate, pos args
res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], 0)
assert_equal(res[4], (a,))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'axis': 'axis0'})
# accumulate, kwargs
res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], 0)
assert_equal(res[4], (a,))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'axis': 'axis0'})
# reduceat, pos args
res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], 0)
assert_equal(res[4], (a, [4, 2]))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'axis': 'axis0'})
# reduceat, kwargs
res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], 0)
assert_equal(res[4], (a, [4, 2]))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'axis': 'axis0'})
# outer
res = np.multiply.outer(a, 42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'outer')
assert_equal(res[3], 0)
assert_equal(res[4], (a, 42))
assert_equal(res[5], {})
# at
res = np.multiply.at(a, [4, 2], 'b0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'at')
assert_equal(res[3], 0)
assert_equal(res[4], (a, [4, 2], 'b0'))
def test_ufunc_override_out(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return kwargs
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return kwargs
a = A()
b = B()
res0 = np.multiply(a, b, 'out_arg')
res1 = np.multiply(a, b, out='out_arg')
res2 = np.multiply(2, b, 'out_arg')
res3 = np.multiply(3, b, out='out_arg')
res4 = np.multiply(a, 4, 'out_arg')
res5 = np.multiply(a, 5, out='out_arg')
assert_equal(res0['out'], 'out_arg')
assert_equal(res1['out'], 'out_arg')
assert_equal(res2['out'], 'out_arg')
assert_equal(res3['out'], 'out_arg')
assert_equal(res4['out'], 'out_arg')
assert_equal(res5['out'], 'out_arg')
# ufuncs with multiple output modf and frexp.
res6 = np.modf(a, 'out0', 'out1')
res7 = np.frexp(a, 'out0', 'out1')
assert_equal(res6['out'][0], 'out0')
assert_equal(res6['out'][1], 'out1')
assert_equal(res7['out'][0], 'out0')
assert_equal(res7['out'][1], 'out1')
def test_ufunc_override_exception(self):
class A(object):
def __numpy_ufunc__(self, *a, **kwargs):
raise ValueError("oops")
a = A()
for func in [np.divide, np.dot]:
assert_raises(ValueError, func, a, a)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh:
x = 1.5
else:
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
def test_precisions_consistent(self):
z = 1 + 1j
for f in self.funcs:
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True
yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1
yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64
yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64
def test_against_cmath(self):
import cmath
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
def check_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin'))
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan'))
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.longcomplex:
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50*eps)
else:
check(x_series, 2.1*eps)
check(x_basic, 2*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert_(np.all(d < 1e-15))
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert_(np.all(d < 1e-15))
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert_(np.all(zp != zm), (zp, zm))
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert_(np.all(good), (func, z0[~good]))
for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):
pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
def test_loss_of_precision(self):
for dtype in [np.complex64, np.complex_]:
yield self.check_loss_of_precision, dtype
@dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
def test_loss_of_precision_longcomplex(self):
self.check_loss_of_precision(np.longcomplex)
class TestAttributes(TestCase):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert_(add.__doc__.startswith('add(x1, x2[, out])\n\n'))
self.assertTrue(add.ntypes >= 18) # don't fail if types added
self.assertTrue('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
class TestSubclass(TestCase):
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3, 4))
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=np.complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
if np.dtype(dtype).char == 'F':
scale = np.finfo(dtype).eps * 1e2
atol = np.float32(1e-2)
else:
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
if np.any(jr):
x = x0[jr]
x.real = np.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))
assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))
if np.any(ji):
x = x0[ji]
x.imag = np.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))
assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))
def test_copysign():
assert_(np.copysign(1, -1) == -1)
with np.errstate(divide="ignore"):
assert_(1 / np.copysign(0, -1) < 0)
assert_(1 / np.copysign(0, 1) > 0)
assert_(np.signbit(np.copysign(np.nan, -1)))
assert_(not np.signbit(np.copysign(np.nan, 1)))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert_(np.nextafter(one, two) - one == eps)
assert_(np.nextafter(one, zero) - one < 0)
assert_(np.isnan(np.nextafter(np.nan, one)))
assert_(np.isnan(np.nextafter(one, np.nan)))
assert_(np.nextafter(one, one) == one)
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(),
"Long double support buggy on win32 and PPC, ticket 1664.")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(),
"Long double support buggy on win32 and PPC, ticket 1664.")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {}
ref[np.float64] = [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012]
ref[np.float32] = [
9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert_(np.signbit(np.nan) == 0)
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0, 7, 15, 25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
def test_reduceat_empty():
"""Reduceat should work with empty arrays"""
indices = np.array([], 'i4')
x = np.array([], 'f8')
result = np.add.reduceat(x, indices)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0,))
# Another case with a slightly different zero-sized shape
x = np.ones((5, 2))
result = np.add.reduceat(x, [], axis=0)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0, 2))
result = np.add.reduceat(x, [], axis=1)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (5, 0))
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
with np.errstate(invalid='ignore'):
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
y = np.array([y])
if np.isfinite(x) and np.isfinite(y):
continue
assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -1,231,780,312,066,796,500 | -7,649,921,463,389,148,000 | 35.115344 | 135 | 0.519734 | false |
denisff/python-for-android | python3-alpha/extra_modules/gdata/Crypto/Cipher/__init__.py | 271 | 1145 | """Secret-key encryption algorithms.
Secret-key encryption algorithms transform plaintext in some way that
is dependent on a key, producing ciphertext. This transformation can
easily be reversed, if (and, hopefully, only if) one knows the key.
The encryption modules here all support the interface described in PEP
272, "API for Block Encryption Algorithms".
If you don't know which algorithm to choose, use AES because it's
standard and has undergone a fair bit of examination.
Crypto.Cipher.AES Advanced Encryption Standard
Crypto.Cipher.ARC2 Alleged RC2
Crypto.Cipher.ARC4 Alleged RC4
Crypto.Cipher.Blowfish
Crypto.Cipher.CAST
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
in the past, but today its 56-bit keys are too small.
Crypto.Cipher.DES3 Triple DES.
Crypto.Cipher.IDEA
Crypto.Cipher.RC5
Crypto.Cipher.XOR The simple XOR cipher.
"""
__all__ = ['AES', 'ARC2', 'ARC4',
'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5',
'XOR'
]
__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $"
| apache-2.0 | 4,942,411,220,717,497,000 | -5,200,196,151,822,464,000 | 33.69697 | 79 | 0.687336 | false |
leeseuljeong/leeseulstack_neutron | neutron/tests/unit/test_l3_dvr.py | 34 | 3931 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from neutron.agent.l3 import link_local_allocator as lla
from neutron.tests import base
class TestLinkLocalAddrAllocator(base.BaseTestCase):
def setUp(self):
super(TestLinkLocalAddrAllocator, self).setUp()
self.subnet = netaddr.IPNetwork('169.254.31.0/24')
def test__init__(self):
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
self.assertEqual('/file', a.state_file)
self.assertEqual({}, a.allocations)
def test__init__readfile(self):
with mock.patch.object(lla.LinkLocalAllocator, '_read') as read:
read.return_value = ["da873ca2,169.254.31.28/31\n"]
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
self.assertTrue('da873ca2' in a.remembered)
self.assertEqual({}, a.allocations)
def test_allocate(self):
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
with mock.patch.object(lla.LinkLocalAllocator, '_write') as write:
subnet = a.allocate('deadbeef')
self.assertTrue('deadbeef' in a.allocations)
self.assertTrue(subnet not in a.pool)
self._check_allocations(a.allocations)
write.assert_called_once_with(['deadbeef,%s\n' % subnet.cidr])
def test_allocate_from_file(self):
with mock.patch.object(lla.LinkLocalAllocator, '_read') as read:
read.return_value = ["deadbeef,169.254.31.88/31\n"]
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
with mock.patch.object(lla.LinkLocalAllocator, '_write') as write:
subnet = a.allocate('deadbeef')
self.assertEqual(netaddr.IPNetwork('169.254.31.88/31'), subnet)
self.assertTrue(subnet not in a.pool)
self._check_allocations(a.allocations)
self.assertFalse(write.called)
def test_allocate_exhausted_pool(self):
subnet = netaddr.IPNetwork('169.254.31.0/31')
with mock.patch.object(lla.LinkLocalAllocator, '_read') as read:
read.return_value = ["deadbeef,169.254.31.0/31\n"]
a = lla.LinkLocalAllocator('/file', subnet.cidr)
with mock.patch.object(lla.LinkLocalAllocator, '_write') as write:
allocation = a.allocate('abcdef12')
self.assertEqual(subnet, allocation)
self.assertFalse('deadbeef' in a.allocations)
self.assertTrue('abcdef12' in a.allocations)
self.assertTrue(allocation not in a.pool)
self._check_allocations(a.allocations)
write.assert_called_once_with(['abcdef12,%s\n' % allocation.cidr])
self.assertRaises(RuntimeError, a.allocate, 'deadbeef')
def test_release(self):
with mock.patch.object(lla.LinkLocalAllocator, '_write') as write:
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
subnet = a.allocate('deadbeef')
write.reset_mock()
a.release('deadbeef')
self.assertTrue('deadbeef' not in a.allocations)
self.assertTrue(subnet in a.pool)
self.assertEqual({}, a.allocations)
write.assert_called_once_with([])
def _check_allocations(self, allocations):
for key, subnet in allocations.items():
self.assertTrue(subnet in self.subnet)
self.assertEqual(subnet.prefixlen, 31)
| apache-2.0 | -4,273,848,278,257,516,500 | 7,480,348,043,143,850,000 | 39.947917 | 78 | 0.660646 | false |
wilvk/ansible | lib/ansible/modules/notification/flowdock.py | 27 | 5713 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: flowdock
version_added: "1.2"
author: "Matt Coddington (@mcodd)"
short_description: Send a message to a flowdock
description:
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
options:
token:
description:
- API token.
required: true
type:
description:
- Whether to post to 'inbox' or 'chat'
required: true
choices: [ "inbox", "chat" ]
msg:
description:
- Content of the message
required: true
tags:
description:
- tags of the message, separated by commas
required: false
external_user_name:
description:
- (chat only - required) Name of the "user" sending the message
required: false
from_address:
description:
- (inbox only - required) Email address of the message sender
required: false
source:
description:
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
required: false
subject:
description:
- (inbox only - required) Subject line of the message
required: false
from_name:
description:
- (inbox only) Name of the message sender
required: false
reply_to:
description:
- (inbox only) Email address for replies
required: false
project:
description:
- (inbox only) Human readable identifier for more detailed message categorization
required: false
link:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
requirements: [ ]
'''
EXAMPLES = '''
- flowdock:
type: inbox
token: AAAAAA
from_address: [email protected]
source: my cool app
msg: test from ansible
subject: test subject
- flowdock:
type: chat
token: AAAAAA
external_user_name: testuser
msg: test from ansible
tags: tag1,tag2,tag3
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox", "chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
subject=dict(required=False),
from_name=dict(required=False),
reply_to=dict(required=False),
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
type = module.params["type"]
token = module.params["token"]
if type == 'inbox':
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params
params['content'] = module.params["msg"]
# required params for the 'chat' type
if module.params['external_user_name']:
if type == 'inbox':
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
else:
params['external_user_name'] = module.params["external_user_name"]
elif type == 'chat':
module.fail_json(msg="external_user_name is required for the 'chat' type")
# required params for the 'inbox' type
for item in ['from_address', 'source', 'subject']:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# optional params
if module.params["tags"]:
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
for item in ['from_name', 'reply_to', 'project', 'link']:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=False)
# Send the data to Flowdock
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: %s" % info['msg'])
module.exit_json(changed=True, msg=module.params["msg"])
if __name__ == '__main__':
main()
| gpl-3.0 | 5,143,989,396,115,211,000 | 7,379,542,024,367,699,000 | 29.068421 | 155 | 0.618589 | false |
Ahmad31/Web_Flask_Cassandra | flask/lib/python2.7/site-packages/jinja2/exceptions.py | 222 | 4428 | # -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'none of the templates given were found: ' + \
u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
| apache-2.0 | 3,527,683,960,801,364,500 | -7,214,033,712,527,584,000 | 29.328767 | 77 | 0.607949 | false |
rrrrrr8/vnpy | vnpy/api/fcoin/vnfcoin.py | 1 | 8705 | # encoding: UTF-8
from __future__ import print_function
import hashlib
import hmac
import json
import ssl
import traceback
import base64
from queue import Queue, Empty
from multiprocessing.dummy import Pool
from time import time
from urlparse import urlparse
from copy import copy
from urllib import urlencode
from threading import Thread
import requests
import websocket
from six.moves import input
REST_HOST = 'https://api.fcoin.com/v2'
WEBSOCKET_HOST = 'wss://api.fcoin.com/v2/ws'
########################################################################
class FcoinRestApi(object):
"""REST API"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = ''
self.apiSecret = ''
self.active = False
self.reqid = 0
self.queue = Queue()
self.pool = None
self.sessionDict = {} # 会话对象字典
#----------------------------------------------------------------------
def init(self, apiKey, apiSecret):
"""初始化"""
self.apiKey = str(apiKey)
self.apiSecret = str(apiSecret)
#----------------------------------------------------------------------
def start(self, n=10):
"""启动"""
if self.active:
return
self.active = True
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.active = False
if self.pool:
self.pool.close()
self.pool.join()
#----------------------------------------------------------------------
def addReq(self, method, path, callback, params=None, postdict=None):
"""添加请求"""
self.reqid += 1
req = (method, path, callback, params, postdict, self.reqid)
self.queue.put(req)
return self.reqid
#----------------------------------------------------------------------
def processReq(self, req, i):
"""处理请求"""
method, path, callback, params, postdict, reqid = req
url = REST_HOST + path
timestamp = str(int(time()) * 1000)
header = {}
header['FC-ACCESS-TIMESTAMP'] = timestamp
header['FC-ACCESS-KEY'] = self.apiKey
header['FC-ACCESS-SIGNATURE'] = self.generateSignature(method, url, timestamp, params, postdict)
try:
# 使用长连接的session,比短连接的耗时缩短80%
session = self.sessionDict[i]
resp = session.request(method, url, headers=header, params=params, json=postdict)
#resp = requests.request(method, url, headers=header, params=params, data=postdict)
#if method != 'GET':
#print '-' * 30
#print 'method', method
#print 'url', url
#print 'header', header
#print 'params', params
#print 'postdict', postdict
code = resp.status_code
d = resp.json()
if code == 200:
callback(d, reqid)
else:
self.onError(code, d)
except Exception as e:
self.onError(type(e), e.message)
#----------------------------------------------------------------------
def run(self, i):
"""连续运行"""
self.sessionDict[i] = requests.Session()
while self.active:
try:
req = self.queue.get(timeout=1)
self.processReq(req, i)
except Empty:
pass
#----------------------------------------------------------------------
def generateSignature(self, method, path, timestamp, params=None, postdict=None):
"""生成签名"""
# 对params在HTTP报文路径中,以请求字段方式序列化
if params:
query = urlencode(sorted(params.items()))
path = path + '?' + query
if postdict:
post = urlencode(sorted(postdict.items()))
else:
post = ''
msg = method + path + timestamp + post
msg = base64.b64encode(msg)
signature = hmac.new(self.apiSecret, msg, digestmod=hashlib.sha1).digest()
signature = base64.b64encode(signature)
return signature
#----------------------------------------------------------------------
def onError(self, code, error):
"""错误回调"""
print('on error')
print(code, error)
#----------------------------------------------------------------------
def onData(self, data, reqid):
"""通用回调"""
print('on data')
print(data, reqid)
########################################################################
class FcoinWebsocketApi(object):
"""Websocket API"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ws = None
self.thread = None
self.active = False
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.ws = websocket.create_connection(WEBSOCKET_HOST,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.active = True
self.thread = Thread(target=self.run)
self.thread.start()
self.onConnect()
#----------------------------------------------------------------------
def reconnect(self):
"""重连"""
self.ws = websocket.create_connection(WEBSOCKET_HOST,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.onConnect()
#----------------------------------------------------------------------
def run(self):
"""运行"""
while self.active:
try:
stream = self.ws.recv()
data = json.loads(stream)
self.onData(data)
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.active = False
if self.thread:
self.thread.join()
#----------------------------------------------------------------------
def onConnect(self):
"""连接回调"""
print('connected')
#----------------------------------------------------------------------
def onData(self, data):
"""数据回调"""
print('-' * 30)
l = data.keys()
l.sort()
for k in l:
print(k, data[k])
#----------------------------------------------------------------------
def onError(self, msg):
"""错误回调"""
print(msg)
#----------------------------------------------------------------------
def sendReq(self, req):
"""发出请求"""
self.ws.send(json.dumps(req))
if __name__ == '__main__':
from datetime import datetime
from time import sleep
API_KEY = '88893f839fbd49f4b5fcb03e7c15c015'
API_SECRET = 'ef383295cf4e4c128e6d18d7e9564b12'
# REST测试
rest = FcoinRestApi()
rest.init(API_KEY, API_SECRET)
rest.start(3)
#rest.addReq('GET', '/accounts/balance', rest.onData)
# 查委托
#states = ['submitted', 'partial_filled', 'partial_canceled',
#'filled', 'canceled', 'pending_cancel']
#req = {
#'symbol': 'ethusdt',
#'start': datetime.now().strftime('%Y%m%d'),
#'states': 'submitted',
#'limit': 500
#}
#for i in range(10):
#rest.addReq('GET', '/orders', rest.onData, params=req)
#sleep(2)
req = {
'symbol': 'ethusdt',
'side': 'buy',
'type': 'limit',
'price': 300,
'amount': 0.01
}
rest.addReq('POST', '/orders', rest.onData, postdict=req)
#sleep(1)
#rest.addReq('POST', '/orders', rest.onData, params=req)
## WS测试
#ws = FcoinWebsocketApi()
#ws.start()
#req = {
#'cmd': 'sub',
#'args': ['depth.L20.btcusdt'],
#'id': 1
#}
#ws.sendReq(req)
input()
| mit | 6,284,238,859,746,349,000 | -4,423,215,896,958,488,600 | 27.914966 | 104 | 0.407246 | false |
Metronote/metronotesd-alpha | lib/blockchain/sochain.py | 6 | 3142 | '''
chain.sp
'''
import logging
from lib import config, util
def get_host():
if config.BLOCKCHAIN_SERVICE_CONNECT:
return config.BLOCKCHAIN_SERVICE_CONNECT
else:
return 'https://chain.so'
def sochain_network():
network = config.BTC
if config.TESTNET:
network += 'TEST'
return network
def check():
pass
def getinfo():
result = util.get_url(get_host() + '/api/v2/get_info/{}'.format(sochain_network()), abort_on_error=True)
if 'status' in result and result['status'] == 'success':
return {
"info": {
"blocks": result['data']['blocks']
}
}
else:
return None
def listunspent(address):
result = util.get_url(get_host() + '/api/v2/get_tx_unspent/{}/{}'.format(sochain_network(), address), abort_on_error=True)
if 'status' in result and result['status'] == 'success':
utxo = []
for txo in result['data']['txs']:
newtxo = {
'address': address,
'txid': txo['txid'],
'vout': txo['output_no'],
'ts': txo['time'],
'scriptPubKey': txo['script_hex'],
'amount': float(txo['value']),
'confirmations': txo['confirmations'],
'confirmationsFromCache': False
}
utxo.append(newtxo)
return utxo
else:
return None
def getaddressinfo(address):
infos = util.get_url(get_host() + '/api/v2/address/{}/{}'.format(sochain_network(), address), abort_on_error=True)
if 'status' in infos and infos['status'] == 'success':
transactions = []
for tx in infos['data']['txs']:
transactions.append(tx['txid'])
return {
'addrStr': address,
'balance': float(infos['data']['balance']),
'balanceSat': float(infos['data']['balance']) * config.UNIT,
'totalReceived': float(infos['data']['received_value']),
'totalReceivedSat': float(infos['data']['received_value']) * config.UNIT,
'unconfirmedBalance': 0,
'unconfirmedBalanceSat': 0,
'unconfirmedTxApperances': 0,
'txApperances': infos['data']['total_txs'],
'transactions': transactions
}
return None
def gettransaction(tx_hash):
tx = util.get_url(get_host() + '/api/v2/get_tx/{}/{}'.format(sochain_network(), address), abort_on_error=True)
if 'status' in tx and tx['status'] == 'success':
valueOut = 0
for vout in tx['data']['tx']['vout']:
valueOut += float(vout['value'])
return {
'txid': tx_hash,
'version': tx['data']['tx']['version'],
'locktime': tx['data']['tx']['locktime'],
'blockhash': tx['data']['tx']['blockhash'],
'confirmations': tx['data']['tx']['confirmations'],
'time': tx['data']['tx']['time'],
'blocktime': tx['data']['tx']['blocktime'],
'valueOut': valueOut,
'vin': tx['data']['tx']['vin'],
'vout': tx['data']['tx']['vout']
}
return None | mit | -9,156,771,837,113,923,000 | -7,044,143,279,470,322,000 | 32.43617 | 126 | 0.52578 | false |
knossos-project/PythonQt | examples/NicePyConsole/pygments/lexers/inferno.py | 52 | 3110 | # -*- coding: utf-8 -*-
"""
pygments.lexers.inferno
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Inferno os and all the related stuff.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number
__all__ = ['LimboLexer']
class LimboLexer(RegexLexer):
"""
Lexer for `Limbo programming language <http://www.vitanuova.com/inferno/limbo.html>`_
TODO:
- maybe implement better var declaration highlighting
- some simple syntax error highlighting
.. versionadded:: 2.0
"""
name = 'Limbo'
aliases = ['limbo']
filenames = ['*.b']
mimetypes = ['text/limbo']
tokens = {
'whitespace': [
(r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\', String), # stray backslash
],
'statements': [
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float),
(r'16r[0-9a-fA-F]+', Number.Hex),
(r'8r[0-7]+', Number.Oct),
(r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer),
(r'[()\[\],.]', Punctuation),
(r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator),
(r'(alt|break|case|continue|cyclic|do|else|exit'
r'for|hd|if|implement|import|include|len|load|or'
r'pick|return|spawn|tagof|tl|to|while)\b', Keyword),
(r'(byte|int|big|real|string|array|chan|list|adt'
r'|fn|ref|of|module|self|type)\b', Keyword.Type),
(r'(con|iota|nil)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
}
def analyse_text(text):
# Any limbo module implements something
if re.search(r'^implement \w+;', text, re.MULTILINE):
return 0.7
# TODO:
# - Make lexers for:
# - asm sources
# - man pages
# - mkfiles
# - module definitions
# - namespace definitions
# - shell scripts
# - maybe keyfiles and fonts
# they all seem to be quite similar to their equivalents
# from unix world, so there should not be a lot of problems
| lgpl-2.1 | -2,130,091,659,456,033,800 | 6,963,353,930,287,593,000 | 31.395833 | 89 | 0.483601 | false |
tzewangdorje/SIPserv | Twisted-13.1.0/twisted/internet/test/reactormixins.py | 7 | 12552 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utilities for unit testing reactor implementations.
The main feature of this module is L{ReactorBuilder}, a base class for use when
writing interface/blackbox tests for reactor implementations. Test case classes
for reactor features should subclass L{ReactorBuilder} instead of
L{SynchronousTestCase}. All of the features of L{SynchronousTestCase} will be
available. Additionally, the tests will automatically be applied to all
available reactor implementations.
"""
from __future__ import division, absolute_import
__metaclass__ = type
__all__ = ['TestTimeoutError', 'ReactorBuilder', 'needsRunningReactor']
import os, signal, time
from twisted.python.compat import _PY3
from twisted.trial.unittest import SynchronousTestCase, SkipTest
from twisted.trial.util import DEFAULT_TIMEOUT_DURATION, acquireAttribute
from twisted.python.runtime import platform
from twisted.python._reflectpy3 import namedAny
from twisted.python.deprecate import _fullyQualifiedName as fullyQualifiedName
from twisted.python import log
from twisted.python.failure import Failure
# Access private APIs.
if platform.isWindows():
process = None
elif _PY3:
# Enable this on Python 3 when twisted.internet.process is ported.
# See #5968.
process = None
else:
from twisted.internet import process
class TestTimeoutError(Exception):
"""
The reactor was still running after the timeout period elapsed in
L{ReactorBuilder.runReactor}.
"""
def needsRunningReactor(reactor, thunk):
"""
Various functions within these tests need an already-running reactor at
some point. They need to stop the reactor when the test has completed, and
that means calling reactor.stop(). However, reactor.stop() raises an
exception if the reactor isn't already running, so if the L{Deferred} that
a particular API under test returns fires synchronously (as especially an
endpoint's C{connect()} method may do, if the connect is to a local
interface address) then the test won't be able to stop the reactor being
tested and finish. So this calls C{thunk} only once C{reactor} is running.
(This is just an alias for
L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given
reactor parameter, in order to centrally reference the above paragraph and
repeating it everywhere as a comment.)
@param reactor: the L{twisted.internet.interfaces.IReactorCore} under test
@param thunk: a 0-argument callable, which eventually finishes the test in
question, probably in a L{Deferred} callback.
"""
reactor.callWhenRunning(thunk)
class ReactorBuilder:
"""
L{SynchronousTestCase} mixin which provides a reactor-creation API. This
mixin defines C{setUp} and C{tearDown}, so mix it in before
L{SynchronousTestCase} or call its methods from the overridden ones in the
subclass.
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
which the tests defined by this class will be skipped to strings
giving the skip message.
@cvar requiredInterfaces: A C{list} of interfaces which the reactor must
provide or these tests will be skipped. The default, C{None}, means
that no interfaces are required.
@ivar reactorFactory: A no-argument callable which returns the reactor to
use for testing.
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
ran and which will be re-installed when tearDown runs.
@ivar _reactors: A list of FQPN strings giving the reactors for which
L{SynchronousTestCase}s will be created.
"""
_reactors = [
# Select works everywhere
"twisted.internet.selectreactor.SelectReactor",
]
if platform.isWindows():
# PortableGtkReactor is only really interesting on Windows,
# but not really Windows specific; if you want you can
# temporarily move this up to the all-platforms list to test
# it on other platforms. It's not there in general because
# it's not _really_ worth it to support on other platforms,
# since no one really wants to use it on other platforms.
_reactors.extend([
"twisted.internet.gtk2reactor.PortableGtkReactor",
"twisted.internet.gireactor.PortableGIReactor",
"twisted.internet.gtk3reactor.PortableGtk3Reactor",
"twisted.internet.win32eventreactor.Win32Reactor",
"twisted.internet.iocpreactor.reactor.IOCPReactor"])
else:
_reactors.extend([
"twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor",
"twisted.internet.gireactor.GIReactor",
"twisted.internet.gtk3reactor.Gtk3Reactor"])
if platform.isMacOSX():
_reactors.append("twisted.internet.cfreactor.CFReactor")
else:
_reactors.extend([
"twisted.internet.pollreactor.PollReactor",
"twisted.internet.epollreactor.EPollReactor"])
if not platform.isLinux():
# Presumably Linux is not going to start supporting kqueue, so
# skip even trying this configuration.
_reactors.extend([
# Support KQueue on non-OS-X POSIX platforms for now.
"twisted.internet.kqreactor.KQueueReactor",
])
reactorFactory = None
originalHandler = None
requiredInterfaces = None
skippedReactors = {}
def setUp(self):
"""
Clear the SIGCHLD handler, if there is one, to ensure an environment
like the one which exists prior to a call to L{reactor.run}.
"""
if not platform.isWindows():
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def tearDown(self):
"""
Restore the original SIGCHLD handler and reap processes as long as
there seem to be any remaining.
"""
if self.originalHandler is not None:
signal.signal(signal.SIGCHLD, self.originalHandler)
if process is not None:
begin = time.time()
while process.reapProcessHandlers:
log.msg(
"ReactorBuilder.tearDown reaping some processes %r" % (
process.reapProcessHandlers,))
process.reapAllProcesses()
# The process should exit on its own. However, if it
# doesn't, we're stuck in this loop forever. To avoid
# hanging the test suite, eventually give the process some
# help exiting and move on.
time.sleep(0.001)
if time.time() - begin > 60:
for pid in process.reapProcessHandlers:
os.kill(pid, signal.SIGKILL)
raise Exception(
"Timeout waiting for child processes to exit: %r" % (
process.reapProcessHandlers,))
def unbuildReactor(self, reactor):
"""
Clean up any resources which may have been allocated for the given
reactor by its creation or by a test which used it.
"""
# Chris says:
#
# XXX These explicit calls to clean up the waker (and any other
# internal readers) should become obsolete when bug #3063 is
# fixed. -radix, 2008-02-29. Fortunately it should probably cause an
# error when bug #3063 is fixed, so it should be removed in the same
# branch that fixes it.
#
# -exarkun
reactor._uninstallHandler()
if getattr(reactor, '_internalReaders', None) is not None:
for reader in reactor._internalReaders:
reactor.removeReader(reader)
reader.connectionLost(None)
reactor._internalReaders.clear()
# Here's an extra thing unrelated to wakers but necessary for
# cleaning up after the reactors we make. -exarkun
reactor.disconnectAll()
# It would also be bad if any timed calls left over were allowed to
# run.
calls = reactor.getDelayedCalls()
for c in calls:
c.cancel()
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
from twisted.internet.cfreactor import CFReactor
from twisted.internet import reactor as globalReactor
except ImportError:
pass
else:
if (isinstance(globalReactor, CFReactor)
and self.reactorFactory is CFReactor):
raise SkipTest(
"CFReactor uses APIs which manipulate global state, "
"so it's not safe to run its own reactor-builder tests "
"under itself")
try:
reactor = self.reactorFactory()
except:
# Unfortunately, not all errors which result in a reactor
# being unusable are detectable without actually
# instantiating the reactor. So we catch some more here
# and skip the test if necessary. We also log it to aid
# with debugging, but flush the logged error so the test
# doesn't fail.
log.err(None, "Failed to install reactor")
self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterfaces is not None:
missing = [
required for required in self.requiredInterfaces
if not required.providedBy(reactor)]
if missing:
self.unbuildReactor(reactor)
raise SkipTest("%s does not provide %s" % (
fullyQualifiedName(reactor.__class__),
",".join([fullyQualifiedName(x) for x in missing])))
self.addCleanup(self.unbuildReactor, reactor)
return reactor
def getTimeout(self):
"""
Determine how long to run the test before considering it failed.
@return: A C{int} or C{float} giving a number of seconds.
"""
return acquireAttribute(self._parents, 'timeout', DEFAULT_TIMEOUT_DURATION)
def runReactor(self, reactor, timeout=None):
"""
Run the reactor for at most the given amount of time.
@param reactor: The reactor to run.
@type timeout: C{int} or C{float}
@param timeout: The maximum amount of time, specified in seconds, to
allow the reactor to run. If the reactor is still running after
this much time has elapsed, it will be stopped and an exception
raised. If C{None}, the default test method timeout imposed by
Trial will be used. This depends on the L{IReactorTime}
implementation of C{reactor} for correct operation.
@raise TestTimeoutError: If the reactor is still running after
C{timeout} seconds.
"""
if timeout is None:
timeout = self.getTimeout()
timedOut = []
def stop():
timedOut.append(None)
reactor.stop()
reactor.callLater(timeout, stop)
reactor.run()
if timedOut:
raise TestTimeoutError(
"reactor still running after %s seconds" % (timeout,))
def makeTestCaseClasses(cls):
"""
Create a L{SynchronousTestCase} subclass which mixes in C{cls} for each
known reactor and return a dict mapping their names to them.
"""
classes = {}
for reactor in cls._reactors:
shortReactorName = reactor.split(".")[-1]
name = (cls.__name__ + "." + shortReactorName).replace(".", "_")
class testcase(cls, SynchronousTestCase):
__module__ = cls.__module__
if reactor in cls.skippedReactors:
skip = cls.skippedReactors[reactor]
try:
reactorFactory = namedAny(reactor)
except:
skip = Failure().getErrorMessage()
testcase.__name__ = name
classes[testcase.__name__] = testcase
return classes
makeTestCaseClasses = classmethod(makeTestCaseClasses)
| gpl-3.0 | -5,900,226,535,452,846,000 | -6,472,838,620,085,062,000 | 38.847619 | 83 | 0.628744 | false |
mbareta/edx-platform-ft | common/djangoapps/student/tests/test_user_profile_properties.py | 11 | 3560 | """Unit tests for custom UserProfile properties."""
import datetime
import ddt
from django.test import TestCase
from student.models import UserProfile
from student.tests.factories import UserFactory
from django.core.cache import cache
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
@ddt.ddt
class UserProfilePropertiesTest(CacheIsolationTestCase):
"""Unit tests for age, gender_display, and level_of_education_display properties ."""
password = "test"
ENABLED_CACHES = ['default']
def setUp(self):
super(UserProfilePropertiesTest, self).setUp()
self.user = UserFactory.create(password=self.password)
self.profile = self.user.profile
def _set_year_of_birth(self, year_of_birth):
"""
Helper method that sets a birth year for the specified user.
"""
self.profile.year_of_birth = year_of_birth
self.profile.save()
def _set_level_of_education(self, level_of_education):
"""
Helper method that sets a level of education for the specified user.
"""
self.profile.level_of_education = level_of_education
self.profile.save()
def _set_gender(self, gender):
"""
Helper method that sets a gender for the specified user.
"""
self.profile.gender = gender
self.profile.save()
@ddt.data(0, 1, 13, 20, 100)
def test_age(self, years_ago):
"""Verify the age calculated correctly."""
current_year = datetime.datetime.now().year
self._set_year_of_birth(current_year - years_ago)
# In the year that your turn a certain age you will also have been a
# year younger than that in that same year. We calculate age based off of
# the youngest you could be that year.
age = years_ago - 1
self.assertEqual(self.profile.age, age)
def test_age_no_birth_year(self):
"""Verify nothing is returned."""
self.assertIsNone(self.profile.age)
@ddt.data(*UserProfile.LEVEL_OF_EDUCATION_CHOICES)
@ddt.unpack
def test_display_level_of_education(self, level_enum, display_level):
"""Verify the level of education is displayed correctly."""
self._set_level_of_education(level_enum)
self.assertEqual(self.profile.level_of_education_display, display_level)
def test_display_level_of_education_none_set(self):
"""Verify nothing is returned."""
self.assertIsNone(self.profile.level_of_education_display)
@ddt.data(*UserProfile.GENDER_CHOICES)
@ddt.unpack
def test_display_gender(self, gender_enum, display_gender):
"""Verify the gender displayed correctly."""
self._set_gender(gender_enum)
self.assertEqual(self.profile.gender_display, display_gender)
def test_display_gender_none_set(self):
"""Verify nothing is returned."""
self._set_gender(None)
self.assertIsNone(self.profile.gender_display)
def test_invalidate_cache_user_profile_country_updated(self):
country = 'us'
self.profile.country = country
self.profile.save()
cache_key = UserProfile.country_cache_key_name(self.user.id)
self.assertIsNone(cache.get(cache_key))
cache.set(cache_key, self.profile.country)
self.assertEqual(cache.get(cache_key), country)
country = 'bd'
self.profile.country = country
self.profile.save()
self.assertNotEqual(cache.get(cache_key), country)
self.assertIsNone(cache.get(cache_key))
| agpl-3.0 | 7,023,402,984,374,456,000 | -2,096,797,181,781,600,800 | 32.584906 | 89 | 0.664045 | false |
kkintaro/termite-data-server | web2py/gluon/contrib/memdb.py | 9 | 28472 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <[email protected]> and
Robin B <[email protected]>.
License: GPL v2
"""
__all__ = ['MEMDB', 'Field']
import re
import sys
import os
import types
import datetime
import thread
import cStringIO
import csv
import copy
import gluon.validators as validators
from gluon.utils import web2py_uuid
from gluon.storage import Storage
from gluon import SQLTABLE
import random
SQL_DIALECTS = {'memcache': {
'boolean': bool,
'string': unicode,
'text': unicode,
'password': unicode,
'blob': unicode,
'upload': unicode,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': int,
'reference': int,
'lower': None,
'upper': None,
'is null': 'IS NULL',
'is not null': 'IS NOT NULL',
'extract': None,
'left join': None,
}}
def cleanup(text):
if re.compile('[^0-9a-zA-Z_]').findall(text):
raise SyntaxError('Can\'t cleanup \'%s\': only [0-9a-zA-Z_] allowed in table and field names' % text)
return text
def assert_filter_fields(*fields):
for field in fields:
if isinstance(field, (Field, Expression)) and field.type\
in ['text', 'blob']:
raise SyntaxError('AppEngine does not index by: %s'
% field.type)
def dateobj_to_datetime(object):
# convert dates,times to datetimes for AppEngine
if isinstance(object, datetime.date):
object = datetime.datetime(object.year, object.month,
object.day)
if isinstance(object, datetime.time):
object = datetime.datetime(
1970,
1,
1,
object.hour,
object.minute,
object.second,
object.microsecond,
)
return object
def sqlhtml_validators(field_type, length):
v = {
'boolean': [],
'string': validators.IS_LENGTH(length),
'text': [],
'password': validators.IS_LENGTH(length),
'blob': [],
'upload': [],
'double': validators.IS_FLOAT_IN_RANGE(-1e100, 1e100),
'integer': validators.IS_INT_IN_RANGE(-1e100, 1e100),
'date': validators.IS_DATE(),
'time': validators.IS_TIME(),
'datetime': validators.IS_DATETIME(),
'reference': validators.IS_INT_IN_RANGE(0, 1e100),
}
try:
return v[field_type[:9]]
except KeyError:
return []
class DALStorage(dict):
"""
a dictionary that let you do d['a'] as well as d.a
"""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key in self:
raise SyntaxError(
'Object \'%s\'exists and cannot be redefined' % key)
self[key] = value
def __repr__(self):
return '<DALStorage ' + dict.__repr__(self) + '>'
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class MEMDB(DALStorage):
"""
an instance of this class represents a database connection
Example::
db=MEMDB(Client())
db.define_table('tablename',Field('fieldname1'),
Field('fieldname2'))
"""
def __init__(self, client):
self._dbname = 'memdb'
self['_lastsql'] = ''
self.tables = SQLCallableList()
self._translator = SQL_DIALECTS['memcache']
self.client = client
def define_table(
self,
tablename,
*fields,
**args
):
tablename = cleanup(tablename)
if tablename in dir(self) or tablename[0] == '_':
raise SyntaxError('invalid table name: %s' % tablename)
if not tablename in self.tables:
self.tables.append(tablename)
else:
raise SyntaxError('table already defined: %s' % tablename)
t = self[tablename] = Table(self, tablename, *fields)
t._create()
return t
def __call__(self, where=''):
return Set(self, where)
class SQLALL(object):
def __init__(self, table):
self.table = table
class Table(DALStorage):
"""
an instance of this class represents a database table
Example::
db=MEMDB(Client())
db.define_table('users',Field('name'))
db.users.insert(name='me')
"""
def __init__(
self,
db,
tablename,
*fields
):
self._db = db
self._tablename = tablename
self.fields = SQLCallableList()
self._referenced_by = []
fields = list(fields)
fields.insert(0, Field('id', 'id'))
for field in fields:
self.fields.append(field.name)
self[field.name] = field
field._tablename = self._tablename
field._table = self
field._db = self._db
self.ALL = SQLALL(self)
def _create(self):
fields = []
myfields = {}
for k in self.fields:
field = self[k]
attr = {}
if not field.type[:9] in ['id', 'reference']:
if field.notnull:
attr = dict(required=True)
if field.type[:2] == 'id':
continue
if field.type[:9] == 'reference':
referenced = field.type[10:].strip()
if not referenced:
raise SyntaxError('Table %s: reference \'%s\' to nothing!' % (
self._tablename, k))
if not referenced in self._db:
raise SyntaxError(
'Table: table %s does not exist' % referenced)
referee = self._db[referenced]
ftype = \
self._db._translator[field.type[:9]](
self._db[referenced]._tableobj)
if self._tablename in referee.fields: # ## THIS IS OK
raise SyntaxError('Field: table \'%s\' has same name as a field '
'in referenced table \'%s\'' % (
self._tablename, referenced))
self._db[referenced]._referenced_by.append((self._tablename,
field.name))
elif not field.type in self._db._translator\
or not self._db._translator[field.type]:
raise SyntaxError('Field: unknown field type %s' % field.type)
self._tableobj = self._db.client
return None
def create(self):
# nothing to do, here for backward compatility
pass
def drop(self):
# nothing to do, here for backward compatibility
self._db(self.id > 0).delete()
def insert(self, **fields):
# Checks 3 times that the id is new. 3 times is enough!
for i in range(3):
id = self._create_id()
if self.get(id) is None and self.update(id, **fields):
return long(id)
else:
raise RuntimeError("Too many ID conflicts")
def get(self, id):
val = self._tableobj.get(self._id_to_key(id))
if val:
return Storage(val)
else:
return None
def update(self, id, **fields):
for field in fields:
if not field in fields and self[field].default\
is not None:
fields[field] = self[field].default
if field in fields:
fields[field] = obj_represent(fields[field],
self[field].type, self._db)
return self._tableobj.set(self._id_to_key(id), fields)
def delete(self, id):
return self._tableobj.delete(self._id_to_key(id))
def _id_to_key(self, id):
return '__memdb__/t/%s/k/%s' % (self._tablename, str(id))
def _create_id(self):
return long(web2py_uuid().replace('-',''),16)
def __str__(self):
return self._tablename
def __call__(self, id, **kwargs):
record = self.get(id)
if kwargs and any(record[key]!=kwargs[key] for key in kwargs):
return None
return record
class Expression(object):
def __init__(
self,
name,
type='string',
db=None,
):
(self.name, self.type, self._db) = (name, type, db)
def __str__(self):
return self.name
def __or__(self, other): # for use in sortby
assert_filter_fields(self, other)
return Expression(self.name + '|' + other.name, None, None)
def __invert__(self):
assert_filter_fields(self)
return Expression('-' + self.name, self.type, None)
# for use in Query
def __eq__(self, value):
return Query(self, '=', value)
def __ne__(self, value):
return Query(self, '!=', value)
def __lt__(self, value):
return Query(self, '<', value)
def __le__(self, value):
return Query(self, '<=', value)
def __gt__(self, value):
return Query(self, '>', value)
def __ge__(self, value):
return Query(self, '>=', value)
# def like(self,value): return Query(self,' LIKE ',value)
# def belongs(self,value): return Query(self,' IN ',value)
# for use in both Query and sortby
def __add__(self, other):
return Expression('%s+%s' % (self, other), 'float', None)
def __sub__(self, other):
return Expression('%s-%s' % (self, other), 'float', None)
def __mul__(self, other):
return Expression('%s*%s' % (self, other), 'float', None)
def __div__(self, other):
return Expression('%s/%s' % (self, other), 'float', None)
class Field(Expression):
"""
an instance of this class represents a database field
example::
a = Field(name, 'string', length=32, required=False,
default=None, requires=IS_NOT_EMPTY(), notnull=False,
unique=False, uploadfield=True)
to be used as argument of GQLDB.define_table
allowed field types:
string, boolean, integer, double, text, blob,
date, time, datetime, upload, password
strings must have a length or 512 by default.
fields should have a default or they will be required in SQLFORMs
the requires argument are used to validate the field input in SQLFORMs
"""
def __init__(
self,
fieldname,
type='string',
length=None,
default=None,
required=False,
requires=sqlhtml_validators,
ondelete='CASCADE',
notnull=False,
unique=False,
uploadfield=True,
):
self.name = cleanup(fieldname)
if fieldname in dir(Table) or fieldname[0] == '_':
raise SyntaxError('Field: invalid field name: %s' % fieldname)
if isinstance(type, Table):
type = 'reference ' + type._tablename
if not length:
length = 512
self.type = type # 'string', 'integer'
self.length = length # the length of the string
self.default = default # default value for field
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
self.uploadfield = uploadfield
if requires == sqlhtml_validators:
requires = sqlhtml_validators(type, length)
elif requires is None:
requires = []
self.requires = requires # list of validators
def formatter(self, value):
if value is None or not self.requires:
return value
if not isinstance(self.requires, (list, tuple)):
requires = [self.requires]
else:
requires = copy.copy(self.requires)
requires.reverse()
for item in requires:
if hasattr(item, 'formatter'):
value = item.formatter(value)
return value
def __str__(self):
return '%s.%s' % (self._tablename, self.name)
MEMDB.Field = Field # ## required by gluon/globals.py session.connect
def obj_represent(object, fieldtype, db):
if object is not None:
if fieldtype == 'date' and not isinstance(object,
datetime.date):
(y, m, d) = [int(x) for x in str(object).strip().split('-')]
object = datetime.date(y, m, d)
elif fieldtype == 'time' and not isinstance(object, datetime.time):
time_items = [int(x) for x in str(object).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.time(h, mi, s)
elif fieldtype == 'datetime' and not isinstance(object,
datetime.datetime):
(y, m, d) = [int(x) for x in
str(object)[:10].strip().split('-')]
time_items = [int(x) for x in
str(object)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
elif fieldtype == 'integer' and not isinstance(object, long):
object = long(object)
return object
class QueryException:
def __init__(self, **a):
self.__dict__ = a
class Query(object):
"""
A query object necessary to define a set.
It can be stored or can be passed to GQLDB.__call__() to obtain a Set
Example:
query=db.users.name=='Max'
set=db(query)
records=set.select()
"""
def __init__(
self,
left,
op=None,
right=None,
):
if isinstance(right, (Field, Expression)):
raise SyntaxError(
'Query: right side of filter must be a value or entity')
if isinstance(left, Field) and left.name == 'id':
if op == '=':
self.get_one = QueryException(
tablename=left._tablename, id=long(right or 0))
return
else:
raise SyntaxError('only equality by id is supported')
raise SyntaxError('not supported')
def __str__(self):
return str(self.left)
class Set(object):
"""
As Set represents a set of records in the database,
the records are identified by the where=Query(...) object.
normally the Set is generated by GQLDB.__call__(Query(...))
given a set, for example
set=db(db.users.name=='Max')
you can:
set.update(db.users.name='Massimo')
set.delete() # all elements in the set
set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10))
and take subsets:
subset=set(db.users.id<5)
"""
def __init__(self, db, where=None):
self._db = db
self._tables = []
self.filters = []
if hasattr(where, 'get_all'):
self.where = where
self._tables.insert(0, where.get_all)
elif hasattr(where, 'get_one') and isinstance(where.get_one,
QueryException):
self.where = where.get_one
else:
# find out which tables are involved
if isinstance(where, Query):
self.filters = where.left
self.where = where
self._tables = [field._tablename for (field, op, val) in
self.filters]
def __call__(self, where):
if isinstance(self.where, QueryException) or isinstance(where,
QueryException):
raise SyntaxError('neither self.where nor where can be a QueryException instance')
if self.where:
return Set(self._db, self.where & where)
else:
return Set(self._db, where)
def _get_table_or_raise(self):
tablenames = list(set(self._tables)) # unique
if len(tablenames) < 1:
raise SyntaxError('Set: no tables selected')
if len(tablenames) > 1:
raise SyntaxError('Set: no join in appengine')
return self._db[tablenames[0]]._tableobj
def _getitem_exception(self):
(tablename, id) = (self.where.tablename, self.where.id)
fields = self._db[tablename].fields
self.colnames = ['%s.%s' % (tablename, t) for t in fields]
item = self._db[tablename].get(id)
return (item, fields, tablename, id)
def _select_except(self):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return []
new_item = []
for t in fields:
if t == 'id':
new_item.append(long(id))
else:
new_item.append(getattr(item, t))
r = [new_item]
return Rows(self._db, r, *self.colnames)
def select(self, *fields, **attributes):
"""
Always returns a Rows object, even if it may be empty
"""
if isinstance(self.where, QueryException):
return self._select_except()
else:
raise SyntaxError('select arguments not supported')
def count(self):
return len(self.select())
def delete(self):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
self._db[tablename].delete(id)
else:
raise Exception('deletion not implemented')
def update(self, **update_fields):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
for (key, value) in update_fields.items():
setattr(item, key, value)
self._db[tablename].update(id, **item)
else:
raise Exception('update not implemented')
def update_record(
t,
s,
id,
a,
):
item = s.get(id)
for (key, value) in a.items():
t[key] = value
setattr(item, key, value)
s.update(id, **item)
class Rows(object):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a dictionary.
"""
# ## this class still needs some work to care for ID/OID
def __init__(
self,
db,
response,
*colnames
):
self._db = db
self.colnames = colnames
self.response = response
def __len__(self):
return len(self.response)
def __getitem__(self, i):
if i >= len(self.response) or i < 0:
raise SyntaxError('Rows: no such row: %i' % i)
if len(self.response[0]) != len(self.colnames):
raise SyntaxError('Rows: internal error')
row = DALStorage()
for j in xrange(len(self.colnames)):
value = self.response[i][j]
if isinstance(value, unicode):
value = value.encode('utf-8')
packed = self.colnames[j].split('.')
try:
(tablename, fieldname) = packed
except:
if not '_extra' in row:
row['_extra'] = DALStorage()
row['_extra'][self.colnames[j]] = value
continue
table = self._db[tablename]
field = table[fieldname]
if not tablename in row:
row[tablename] = DALStorage()
if field.type[:9] == 'reference':
referee = field.type[10:].strip()
rid = value
row[tablename][fieldname] = rid
elif field.type == 'boolean' and value is not None:
# row[tablename][fieldname]=Set(self._db[referee].id==rid)
if value == True or value == 'T':
row[tablename][fieldname] = True
else:
row[tablename][fieldname] = False
elif field.type == 'date' and value is not None\
and not isinstance(value, datetime.date):
(y, m, d) = [int(x) for x in
str(value).strip().split('-')]
row[tablename][fieldname] = datetime.date(y, m, d)
elif field.type == 'time' and value is not None\
and not isinstance(value, datetime.time):
time_items = [int(x) for x in
str(value).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.time(h, mi, s)
elif field.type == 'datetime' and value is not None\
and not isinstance(value, datetime.datetime):
(y, m, d) = [int(x) for x in
str(value)[:10].strip().split('-')]
time_items = [int(x) for x in
str(value)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
else:
row[tablename][fieldname] = value
if fieldname == 'id':
id = row[tablename].id
row[tablename].update_record = lambda t = row[tablename], \
s = self._db[tablename], id = id, **a: update_record(t,
s, id, a)
for (referee_table, referee_name) in \
table._referenced_by:
s = self._db[referee_table][referee_name]
row[tablename][referee_table] = Set(self._db, s
== id)
if len(row.keys()) == 1:
return row[row.keys()[0]]
return row
def __iter__(self):
"""
iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __str__(self):
"""
serializes the table into a csv file
"""
s = cStringIO.StringIO()
writer = csv.writer(s)
writer.writerow(self.colnames)
c = len(self.colnames)
for i in xrange(len(self)):
row = [self.response[i][j] for j in xrange(c)]
for k in xrange(c):
if isinstance(row[k], unicode):
row[k] = row[k].encode('utf-8')
writer.writerow(row)
return s.getvalue()
def xml(self):
"""
serializes the table using SQLTABLE (if present)
"""
return SQLTABLE(self).xml()
def test_all():
"""
How to run from web2py dir:
export PYTHONPATH=.:YOUR_PLATFORMS_APPENGINE_PATH
python gluon/contrib/memdb.py
Setup the UTC timezone and database stubs
>>> import os
>>> os.environ['TZ'] = 'UTC'
>>> import time
>>> if hasattr(time, 'tzset'):
... time.tzset()
>>>
>>> from google.appengine.api import apiproxy_stub_map
>>> from google.appengine.api.memcache import memcache_stub
>>> apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
>>> apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub())
Create a table with all possible field types
>>> from google.appengine.api.memcache import Client
>>> db=MEMDB(Client())
>>> tmp=db.define_table('users', Field('stringf','string',length=32,required=True), Field('booleanf','boolean',default=False), Field('passwordf','password',notnull=True), Field('blobf','blob'), Field('uploadf','upload'), Field('integerf','integer',unique=True), Field('doublef','double',unique=True,notnull=True), Field('datef','date',default=datetime.date.today()), Field('timef','time'), Field('datetimef','datetime'), migrate='test_user.table')
Insert a field
>>> user_id = db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A', uploadf=None, integerf=5,doublef=3.14, datef=datetime.date(2001,1,1), timef=datetime.time(12,30,15), datetimef=datetime.datetime(2002,2,2,12,30,15))
>>> user_id != None
True
Select all
# >>> all = db().select(db.users.ALL)
Drop the table
# >>> db.users.drop()
Select many entities
>>> tmp = db.define_table(\"posts\", Field('body','text'), Field('total','integer'), Field('created_at','datetime'))
>>> many = 20 #2010 # more than 1000 single fetch limit (it can be slow)
>>> few = 5
>>> most = many - few
>>> 0 < few < most < many
True
>>> for i in range(many):
... f=db.posts.insert(body='', total=i,created_at=datetime.datetime(2008, 7, 6, 14, 15, 42, i))
>>>
# test timezones
>>> class TZOffset(datetime.tzinfo):
... def __init__(self,offset=0):
... self.offset = offset
... def utcoffset(self, dt): return datetime.timedelta(hours=self.offset)
... def dst(self, dt): return datetime.timedelta(0)
... def tzname(self, dt): return 'UTC' + str(self.offset)
...
>>> SERVER_OFFSET = -8
>>>
>>> stamp = datetime.datetime(2008, 7, 6, 14, 15, 42, 828201)
>>> post_id = db.posts.insert(created_at=stamp,body='body1')
>>> naive_stamp = db(db.posts.id==post_id).select()[0].created_at
>>> utc_stamp=naive_stamp.replace(tzinfo=TZOffset())
>>> server_stamp = utc_stamp.astimezone(TZOffset(SERVER_OFFSET))
>>> stamp == naive_stamp
True
>>> utc_stamp == server_stamp
True
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 1
True
>>> rows[0].body == 'body1'
True
>>> db(db.posts.id==post_id).delete()
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 0
True
>>> id = db.posts.insert(total='0') # coerce str to integer
>>> rows = db(db.posts.id==id).select()
>>> len(rows) == 1
True
>>> rows[0].total == 0
True
Examples of insert, select, update, delete
>>> tmp=db.define_table('person', Field('name'), Field('birth','date'), migrate='test_person.table')
>>> marco_id=db.person.insert(name=\"Marco\",birth='2005-06-22')
>>> person_id=db.person.insert(name=\"Massimo\",birth='1971-12-21')
>>> me=db(db.person.id==person_id).select()[0] # test select
>>> me.name
'Massimo'
>>> db(db.person.id==person_id).update(name='massimo') # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.name
'massimo'
>>> str(me.birth)
'1971-12-21'
# resave date to ensure it comes back the same
>>> me=db(db.person.id==person_id).update(birth=me.birth) # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.birth
datetime.date(1971, 12, 21)
>>> db(db.person.id==marco_id).delete() # test delete
>>> len(db(db.person.id==marco_id).select())
0
Update a single record
>>> me.update_record(name=\"Max\")
>>> me.name
'Max'
>>> me = db(db.person.id == person_id).select()[0]
>>> me.name
'Max'
"""
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = DALStorage
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause | 2,785,572,435,043,453,400 | -3,084,685,843,707,248,000 | 30.530454 | 594 | 0.522303 | false |
cfg2015/EPT-2015-2 | addons/sale_journal/sale_journal.py | 276 | 4290 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_journal_invoice_type(osv.osv):
_name = 'sale_journal.invoice.type'
_description = 'Invoice Types'
_columns = {
'name': fields.char('Invoice Type', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the invoice type without removing it."),
'note': fields.text('Note'),
'invoicing_method': fields.selection([('simple', 'Non grouped'), ('grouped', 'Grouped')], 'Invoicing method', required=True),
}
_defaults = {
'active': True,
'invoicing_method': 'simple'
}
#==============================================
# sale journal inherit
#==============================================
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_invoice_type': fields.property(
type = 'many2one',
relation = 'sale_journal.invoice.type',
string = "Invoicing Type",
group_name = "Accounting Properties",
help = "This invoicing type will be used, by default, to invoice the current partner."),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['property_invoice_type']
class picking(osv.osv):
_inherit = "stock.picking"
_columns = {
'invoice_type_id': fields.many2one('sale_journal.invoice.type', 'Invoice Type', readonly=True)
}
class stock_move(osv.osv):
_inherit = "stock.move"
def action_confirm(self, cr, uid, ids, context=None):
"""
Pass the invoice type to the picking from the sales order
(Should also work in case of Phantom BoMs when on explosion the original move is deleted, similar to carrier_id on delivery)
"""
procs_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.invoice_type_id:
procs_to_check += [move.procurement_id]
res = super(stock_move, self).action_confirm(cr, uid, ids, context=context)
pick_obj = self.pool.get("stock.picking")
for proc in procs_to_check:
pickings = list(set([x.picking_id.id for x in proc.move_ids if x.picking_id and not x.picking_id.invoice_type_id]))
if pickings:
pick_obj.write(cr, uid, pickings, {'invoice_type_id': proc.sale_line_id.order_id.invoice_type_id.id}, context=context)
return res
class sale(osv.osv):
_inherit = "sale.order"
_columns = {
'invoice_type_id': fields.many2one('sale_journal.invoice.type', 'Invoice Type', help="Generate invoice based on the selected option.")
}
def onchange_partner_id(self, cr, uid, ids, part, context=None):
result = super(sale, self).onchange_partner_id(cr, uid, ids, part, context=context)
if part:
itype = self.pool.get('res.partner').browse(cr, uid, part, context=context).property_invoice_type
if itype:
result['value']['invoice_type_id'] = itype.id
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,612,940,390,103,039,500 | 8,082,081,765,396,736,000 | 41.9 | 152 | 0.609324 | false |