repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
zhjunlang/kbengine | refs/heads/master | kbe/src/lib/python/Lib/multiprocessing/managers.py | 85 | #
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import array
import queue
from time import time as _time
from traceback import format_exc
from . import connection
from . import context
from . import pool
from . import process
from . import reduction
from . import util
from . import get_context
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = list(self.id_to_obj.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
assert self._state.value == State.STARTED
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()):
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if context.get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
fxtentacle/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/QueueStatusServer/model/queuepropertymixin_unittest.py | 125 | # Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from model.queuepropertymixin import QueuePropertyMixin
from model.queues import Queue
class ObjectWithQueueName(QueuePropertyMixin):
def __init__(self):
self.queue_name = None
class QueuePropertyMixinTest(unittest.TestCase):
def test_queue_property(self):
test_object = ObjectWithQueueName()
mac_ews = Queue("mac-ews")
test_object.queue = mac_ews
self.assertEqual(test_object.queue.name(), "mac-ews")
self.assertEqual(test_object.queue_name, "mac-ews")
test_object.queue = None
self.assertEqual(test_object.queue_name, None)
if __name__ == '__main__':
unittest.main()
|
kcolford/txt2boil | refs/heads/develop | txt2boil/cmi.py | 1 | # Copyright (C) 2014 Kieran Colford
#
# This file is part of txt2boil.
#
# txt2boil is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# txt2boil is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with txt2boil. If not, see <http://www.gnu.org/licenses/>.
"""Cooprative Multiple Inheritance (CMI) made easy.
This module provides a function descriptor that transforms an ordinary
overloaded function into a function that is compatible with CMI. This
allows us to mimic the Chain of Resposibility design pattern entirely
using CMI.
There are two API's. One is the generic descripter that takes a
binary operation and uses it to merge two answers together. The other
is two functions: one which locates the first non-None object and
returns it, and the other which finds the minimum element of all the
posibilities.
Note that all the cls arguments to the descriptors must be passed
wrapped in a lambda like so:
class Foo:
@nonNoneCMI(lambda: Foo)
def bar(self):
pass
Otherwise the name Foo won't be defined yet.
"""
from functools import wraps
class AbstractCMI:
"""A descriptor that enables easy CMI according to a function.
The final result function will be achieved by using merge as a
binary operation on the results of the current function and the
super-class' function of the same name.
"""
def __init__(self, cls, merge):
"""Initialize the abstract descriptor.
Note that cls has to be wrapped in a lambda because otherwise
there will be a name resolution error (since that class hasn't
been defined yet).
cls - the current class wrapped in a lambda
merge - the binary operator that will determine the final
result
"""
self.cls = cls
self.merge = merge
def _getSuperFunc(self, s, func):
"""Return the the super function."""
return getattr(super(self.cls(), s), func.__name__)
def __call__(self, func):
"""Apply this abstract descriptor to func."""
@wraps(func)
def wrapper(s, *args, **kwargs):
a = func(s, *args, **kwargs)
b = self._getSuperFunc(s, func)(*args, **kwargs)
return self.merge(a, b)
return wrapper
def minCMI(cls):
"""Return an AbstractCMI that locates the minimum element."""
return AbstractCMI(cls, min)
def nonNoneCMI(cls):
"""Return an AbstractCMI that locates the first non-None element."""
return AbstractCMI(cls, lambda x, y: x if x is not None else y)
__all__ = ['AbstractCMI', 'minCMI', 'nonNoneCMI']
|
theoryno3/scikit-learn | refs/heads/master | sklearn/feature_extraction/setup.py | 314 | import os
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('feature_extraction', parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension('_hashing',
sources=['_hashing.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
|
googledatalab/pydatalab | refs/heads/master | tests/mlworkbench_magic/explainer_tests.py | 2 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Integration Tests of PredictionExplainer."""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
from PIL import Image
import logging
import mock
import numpy as np
import os
import pandas as pd
import shutil
import six
from six.moves.urllib.request import urlopen
import tempfile
import sys
# import Python so we can mock the parts we need to here.
import IPython.core.display
import IPython.core.magic
from google.datalab.contrib.mlworkbench import PredictionExplainer
def noop_decorator(func):
return func
IPython.core.magic.register_line_cell_magic = noop_decorator
IPython.core.magic.register_line_magic = noop_decorator
IPython.core.magic.register_cell_magic = noop_decorator
IPython.core.display.HTML = lambda x: x
IPython.core.display.JSON = lambda x: x
IPython.get_ipython = mock.Mock()
IPython.get_ipython().user_ns = {}
import google.datalab.contrib.mlworkbench.commands._ml as mlmagic # noqa
class TestMLExplainer(unittest.TestCase):
"""Integration tests of PredictionExplainer"""
def setUp(self):
self._logger = logging.getLogger('TestExplainerLogger')
self._logger.setLevel(logging.DEBUG)
if not self._logger.handlers:
self._logger.addHandler(logging.StreamHandler(stream=sys.stdout))
self._code_path = mlmagic.DEFAULT_PACKAGE_PATH
mlmagic.DEFAULT_PACKAGE_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../solutionbox/ml_workbench/tensorflow'))
self._test_dir = tempfile.mkdtemp()
def tearDown(self):
mlmagic.DEFAULT_PACKAGE_PATH = self._code_path
shutil.rmtree(self._test_dir)
def _create_tabular_test_data(self):
"""Create tabular model with text."""
test_data = """1,5.0,monday,word1 word2 word3,true
2,3.2,tuesday,word1 word3,true
3,-1.1,friday,word1,false"""
train_csv = os.path.join(self._test_dir, 'train.csv')
with open(train_csv, 'w') as f:
f.write(test_data)
df = pd.read_csv(train_csv, names=['key', 'num', 'weekday', 'garbage', 'target'])
analyze_dir = os.path.join(self._test_dir, 'analysistab')
train_dir = os.path.join(self._test_dir, 'traintab')
mlmagic.ml(
line='dataset create',
cell="""\
format: csv
name: mytabular
schema:
- name: key
type: INTEGER
- name: num
type: FLOAT
- name: weekday
type: STRING
- name: garbage
type: STRING
- name: target
type: STRING
train: %s
eval: %s""" % (train_csv, train_csv))
mlmagic.ml(
line='analyze',
cell="""\
output: %s
data: mytabular
features:
key:
transform: key
num:
transform: scale
weekday:
transform: one_hot
garbage:
transform: bag_of_words
target:
transform: target""" % (analyze_dir))
mlmagic.ml(
line='train',
cell="""\
output: %s
analysis: %s
data: mytabular
notb: true
model_args:
model: linear_classification
top-n: 0
max-steps: 300""" % (train_dir, analyze_dir))
return df
def _create_text_test_data(self):
"""Create text model."""
test_data = """1,sour green round,lime
2,melon green long,cucumber
3,sweet round red,apple"""
train_csv = os.path.join(self._test_dir, 'train.csv')
with open(train_csv, 'w') as f:
f.write(test_data)
analyze_dir = os.path.join(self._test_dir, 'analysistxt')
train_dir = os.path.join(self._test_dir, 'traintxt')
mlmagic.ml(
line='dataset create',
cell="""\
format: csv
name: mytext
schema:
- name: key
type: INTEGER
- name: text
type: STRING
- name: target
type: STRING
train: %s
eval: %s""" % (train_csv, train_csv))
mlmagic.ml(
line='analyze',
cell="""\
output: %s
data: mytext
features:
key:
transform: key
text:
transform: bag_of_words
target:
transform: target""" % (analyze_dir))
mlmagic.ml(
line='train',
cell="""\
output: %s
analysis: %s
data: mytext
notb: true
model_args:
model: linear_classification
top-n: 0
max-steps: 300""" % (train_dir, analyze_dir))
def _create_image_test_data(self):
image_path1 = os.path.join(self._test_dir, 'img1.jpg')
image_path2 = os.path.join(self._test_dir, 'img2.jpg')
image_path3 = os.path.join(self._test_dir, 'img3.jpg')
Image.new('RGB', size=(128, 128), color=(155, 211, 64)).save(image_path1, "JPEG")
Image.new('RGB', size=(64, 64), color=(111, 21, 86)).save(image_path2, "JPEG")
Image.new('RGB', size=(16, 16), color=(255, 21, 1)).save(image_path3, "JPEG")
test_data = """1,1.2,word1 word2,%s,true
2,3.2,word2 word3,%s,false
5,-2.1,word3 word4,%s,true""" % (image_path1, image_path2, image_path3)
train_csv = os.path.join(self._test_dir, 'train.csv')
with open(train_csv, 'w') as f:
f.write(test_data)
analyze_dir = os.path.join(self._test_dir, 'analysisimg')
transform_dir = os.path.join(self._test_dir, 'transformimg')
train_dir = os.path.join(self._test_dir, 'trainimg')
# Download inception checkpoint. Note that gs url doesn't work because
# we may not have gcloud signed in when running the test.
url = ('https://storage.googleapis.com/cloud-ml-data/img/' +
'flower_photos/inception_v3_2016_08_28.ckpt')
checkpoint_path = os.path.join(self._test_dir, "checkpoint")
response = urlopen(url)
with open(checkpoint_path, 'wb') as f:
f.write(response.read())
mlmagic.ml(
line='dataset create',
cell="""\
format: csv
name: myds
schema:
- name: key
type: INTEGER
- name: num
type: FLOAT
- name: text
type: STRING
- name: img_url
type: STRING
- name: target
type: STRING
train: %s
eval: %s""" % (train_csv, train_csv))
mlmagic.ml(
line='analyze',
cell="""\
output: %s
data: myds
features:
key:
transform: key
num:
transform: scale
text:
transform: bag_of_words
img_url:
transform: image_to_vec
checkpoint: %s
target:
transform: target""" % (analyze_dir, checkpoint_path))
mlmagic.ml(
line='transform',
cell="""\
output: %s
analysis: %s
data: myds""" % (transform_dir, analyze_dir))
mlmagic.ml(
line='dataset create',
cell="""\
format: transformed
name: transformed_ds
train: %s/train-*
eval: %s/eval-*""" % (transform_dir, transform_dir))
mlmagic.ml(
line='train',
cell="""\
output: %s
analysis: %s
data: transformed_ds
notb: true
model_args:
model: linear_classification
top-n: 0
max-steps: 200""" % (train_dir, analyze_dir))
@unittest.skipIf(not six.PY2, 'Integration test that invokes mlworkbench with DataFlow.')
def test_text_explainer(self):
"""Test text explainer."""
self._logger.debug('Starting text explainer test.')
self._create_text_test_data()
explainer = PredictionExplainer(os.path.join(self._test_dir, 'traintxt', 'model'))
exp_instance = explainer.explain_text(['apple', 'lime', 'cucumber'], '4,green long')
apple = exp_instance.as_list(label=0)
self.assertEqual(len(apple), 2)
for word, score in apple:
# "green" and "long" are both negative to "apple"
self.assertLess(score, 0.0)
cucumber = exp_instance.as_list(label=2)
self.assertEqual(len(cucumber), 2)
for word, score in cucumber:
# "green" and "long" are both positive to "cucumber"
self.assertGreater(score, 0.0)
@unittest.skipIf(not six.PY2, 'Integration test that invokes mlworkbench with DataFlow.')
def test_image_explainer(self):
"""Test image explainer."""
self._logger.debug('Starting image explainer test.')
self._create_image_test_data()
explainer = PredictionExplainer(os.path.join(self._test_dir, 'trainimg', 'model'))
exp_instance = explainer.explain_image(
['true', 'false'],
'4,2.0,word2 word1,%s' % os.path.join(self._test_dir, 'img1.jpg'),
num_samples=50)
for i in range(2):
image, mask = exp_instance.get_image_and_mask(i, positive_only=False, num_features=3)
# image's dimension is length*width*channel
self.assertEqual(len(np.asarray(image).shape), 3)
# mask's dimension is length*width
self.assertEqual(len(np.asarray(mask).shape), 2)
@unittest.skipIf(not six.PY2, 'Integration test that invokes mlworkbench with DataFlow.')
def test_image_prober(self):
"""Test image explainer."""
self._logger.debug('Starting image prober test.')
self._create_image_test_data()
explainer = PredictionExplainer(os.path.join(self._test_dir, 'trainimg', 'model'))
raw_image, grads_vizs = explainer.probe_image(
['true', 'false'],
'4,2.0,word2 word1,%s' % os.path.join(self._test_dir, 'img1.jpg'),
num_scaled_images=5,
top_percent=20)
self.assertEqual((299, 299, 3), np.asarray(raw_image).shape)
for im in grads_vizs:
self.assertEqual((299, 299, 3), np.asarray(im).shape)
arr = np.asarray(im)
arr = arr.reshape(-1)
self.assertGreater(float((arr == 0).sum()) / len(arr), 0.79)
@unittest.skipIf(not six.PY2, 'Integration test that invokes mlworkbench with DataFlow.')
def test_tabular_explainer(self):
"""Test tabular explainer."""
self._logger.debug('Starting tabular explainer test.')
train_df = self._create_tabular_test_data()
explainer = PredictionExplainer(os.path.join(self._test_dir, 'traintab', 'model'))
exp_instance = explainer.explain_tabular(train_df, ['true', 'false'], '8,-1.0,tuesday,word3',
num_features=5)
for i in range(2):
label_data = exp_instance.as_list(label=i)
# There should be 2 entries. One for categorical ("weekday") and one for numeric ("num")
# label_data should look like:
# [
# ("weekday=tuesday", 0.02),
# ("num > 1.0", 0.03),
# ]
self.assertEqual(2, len(label_data))
keys = [x[0] for x in label_data]
self.assertIn('weekday=tuesday', keys)
keys.remove('weekday=tuesday')
self.assertTrue('num' in keys[0])
|
cyrusin/tornado | refs/heads/master | tornado/test/windows_test.py | 16 | import functools
import os
import socket
import unittest
from tornado.platform.auto import set_close_exec
skipIfNonWindows = unittest.skipIf(os.name != 'nt', 'non-windows platform')
@skipIfNonWindows
class WindowsTest(unittest.TestCase):
def test_set_close_exec(self):
# set_close_exec works with sockets.
s = socket.socket()
self.addCleanup(s.close)
set_close_exec(s.fileno())
# But it doesn't work with pipes.
r, w = os.pipe()
self.addCleanup(functools.partial(os.close, r))
self.addCleanup(functools.partial(os.close, w))
with self.assertRaises(WindowsError) as cm:
set_close_exec(r)
ERROR_INVALID_HANDLE = 6
self.assertEqual(cm.exception.winerror, ERROR_INVALID_HANDLE)
|
AEDA-Solutions/matweb | refs/heads/master | backend/Models/Predio/RespostaListar.py | 1 | from Framework.Resposta import Resposta
from Models.Predio.Predio import Predio as ModelPredio
class RespostaListar(Resposta):
def __init__(self,predios):
self.corpo = []
for predio in predios:
self.corpo.append(ModelPredio(predio))
|
nhicher/ansible | refs/heads/devel | lib/ansible/modules/storage/infinidat/infini_fs.py | 70 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_fs
version_added: 2.3
short_description: Create, Delete or Modify filesystems on Infinibox
description:
- This module creates, deletes or modifies filesystems on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- File system name.
required: true
state:
description:
- Creates/Modifies file system when present or removes when absent.
required: false
default: present
choices: [ "present", "absent" ]
size:
description:
- File system size in MB, GB or TB units. See examples.
required: false
pool:
description:
- Pool that will host file system.
required: true
extends_documentation_fragment:
- infinibox
requirements:
- capacity
'''
EXAMPLES = '''
- name: Create new file system named foo under pool named bar
infini_fs:
name: foo
size: 1TB
pool: bar
state: present
user: admin
password: secret
system: ibox001
'''
RETURN = '''
'''
try:
from capacity import KiB, Capacity
HAS_CAPACITY = True
except ImportError:
HAS_CAPACITY = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_pool(module, system):
"""Return Pool or None"""
try:
return system.pools.get(name=module.params['pool'])
except:
return None
@api_wrapper
def get_filesystem(module, system):
"""Return Filesystem or None"""
try:
return system.filesystems.get(name=module.params['name'])
except:
return None
@api_wrapper
def create_filesystem(module, system):
"""Create Filesystem"""
if not module.check_mode:
filesystem = system.filesystems.create(name=module.params['name'], pool=get_pool(module, system))
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
filesystem.update_size(size)
module.exit_json(changed=True)
@api_wrapper
def update_filesystem(module, filesystem):
"""Update Filesystem"""
changed = False
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
if filesystem.get_size() != size:
if not module.check_mode:
filesystem.update_size(size)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_filesystem(module, filesystem):
""" Delete Filesystem"""
if not module.check_mode:
filesystem.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
pool=dict(required=True),
size=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
if not HAS_CAPACITY:
module.fail_json(msg='The capacity python library is required for this module')
if module.params['size']:
try:
Capacity(module.params['size'])
except:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
state = module.params['state']
system = get_system(module)
pool = get_pool(module, system)
filesystem = get_filesystem(module, system)
if pool is None:
module.fail_json(msg='Pool {} not found'.format(module.params['pool']))
if state == 'present' and not filesystem:
create_filesystem(module, system)
elif state == 'present' and filesystem:
update_filesystem(module, filesystem)
elif state == 'absent' and filesystem:
delete_filesystem(module, filesystem)
elif state == 'absent' and not filesystem:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
tellapart/Diamond | refs/heads/master | src/collectors/openstackswift/openstackswift.py | 6 | # coding=utf-8
"""
Openstack swift collector.
#### Dependencies
* swift-dispersion-report commandline tool (for dispersion report)
if using this, make sure swift.conf and dispersion.conf are reable by diamond
also get an idea of the runtime of a swift-dispersion-report call and make
sure the collect interval is high enough to avoid contention.
* swift commandline tool (for container_metrics)
both of these should come installed with swift
"""
import diamond.collector
from subprocess import Popen, PIPE
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
class OpenstackSwiftCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(OpenstackSwiftCollector,
self).get_default_config_help()
config_help.update({
'enable_dispersion_report': 'gather swift-dispersion-report metrics'
+ ' (default False)',
'enable_container_metrics': 'gather containers metrics'
+ '(# objects, bytes used, x_timestamp. default True)',
'auth_url': 'authentication url (for enable_container_metrics)',
'account': 'swift auth account (for enable_container_metrics)',
'user': 'swift auth user (for enable_container_metrics)',
'password': 'swift auth password (for enable_container_metrics)',
'containers': 'containers on which to count number of objects, '
+ 'space separated list (for enable_container_metrics)'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(OpenstackSwiftCollector, self).get_default_config()
config.update({
'path': 'openstackswift',
'enable_dispersion_report': False,
'enable_container_metrics': True,
# don't use the threaded model with this one.
# for some reason it crashes.
'interval': 1200, # by default, every 20 minutes
})
return config
def collect(self):
# dispersion report. this can take easily >60s. beware!
if (self.config['enable_dispersion_report']):
p = Popen(
['swift-dispersion-report', '-j'],
stdout=PIPE,
stderr=PIPE)
stdout, stderr = p.communicate()
self.publish('dispersion.errors', len(stderr.split('\n')) - 1)
data = json.loads(stdout)
for t in ('object', 'container'):
for (k, v) in data[t].items():
self.publish('dispersion.%s.%s' % (t, k), v)
# container metrics returned by stat <container>
if(self.config['enable_container_metrics']):
account = '%s:%s' % (self.config['account'], self.config['user'])
for container in self.config['containers'].split(','):
cmd = ['swift', '-A', self.config['auth_url'],
'-U', account,
'-K', self.config['password'],
'stat', container]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stats = {}
# stdout is some lines in 'key : val' format
for line in stdout.split('\n'):
if line:
line = line.split(':', 2)
stats[line[0].strip()] = line[1].strip()
key = 'container_metrics.%s.%s' % (self.config['account'],
container)
self.publish('%s.objects' % key, stats['Objects'])
self.publish('%s.bytes' % key, stats['Bytes'])
self.publish('%s.x_timestamp' % key, stats['X-Timestamp'])
|
solring/TWCompanyTree | refs/heads/master | TWCTenv/lib/python2.7/site-packages/setuptools/tests/test_packageindex.py | 66 | """Package Index Tests
"""
import sys
import unittest
import pkg_resources
from setuptools.compat import urllib2, httplib, HTTPError, unicode
import distutils.errors
import setuptools.package_index
from setuptools.tests.server import IndexServer
class TestPackageIndex(unittest.TestCase):
def test_bad_url_bad_port(self):
index = setuptools.package_index.PackageIndex()
url = 'http://127.0.0.1:0/nonesuch/test_package_index'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_typo(self):
# issue 16
# easy_install inquant.contentmirror.plone breaks because of a typo
# in its home URL
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_bad_status_line(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
def _urlopen(*args):
raise httplib.BadStatusLine('line')
index.opener = _urlopen
url = 'http://example.com'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue('line' in str(v))
else:
raise AssertionError('Should have raise here!')
def test_bad_url_double_scheme(self):
"""
A bad URL with a double scheme should raise a DistutilsError.
"""
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue 20
url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
try:
index.open_url(url)
except distutils.errors.DistutilsError:
error = sys.exc_info()[1]
msg = unicode(error)
assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
return
raise RuntimeError("Did not raise")
def test_bad_url_screwy_href(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue #160
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
# this should not fail
url = 'http://example.com'
page = ('<a href="http://www.famfamfam.com]('
'http://www.famfamfam.com/">')
index.process_index(url, page)
def test_url_ok(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'file:///tmp/test_package_index'
self.assertTrue(index.url_ok(url, True))
def test_links_priority(self):
"""
Download links from the pypi simple index should be used before
external download links.
https://bitbucket.org/tarek/distribute/issue/163
Usecase :
- someone uploads a package on pypi, a md5 is generated
- someone manually copies this link (with the md5 in the url) onto an
external page accessible from the package page.
- someone reuploads the package (with a different md5)
- while easy_installing, an MD5 error occurs because the external link
is used
-> Setuptools should use the link from pypi, not the external one.
"""
if sys.platform.startswith('java'):
# Skip this test on jython because binding to :0 fails
return
# start an index server
server = IndexServer()
server.start()
index_url = server.base_url() + 'test_links_priority/simple/'
# scan a test index
pi = setuptools.package_index.PackageIndex(index_url)
requirement = pkg_resources.Requirement.parse('foobar')
pi.find_packages(requirement)
server.stop()
# the distribution has been found
self.assertTrue('foobar' in pi)
# we have only one link, because links are compared without md5
self.assertTrue(len(pi['foobar'])==1)
# the link should be from the index
self.assertTrue('correct_md5' in pi['foobar'][0].location)
def test_parse_bdist_wininst(self):
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32-py2.4.exe'), ('reportlab-2.5', '2.4', 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32.exe'), ('reportlab-2.5', None, 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64-py2.7.exe'), ('reportlab-2.5', '2.7', 'win-amd64'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64.exe'), ('reportlab-2.5', None, 'win-amd64'))
def test__vcs_split_rev_from_url(self):
"""
Test the basic usage of _vcs_split_rev_from_url
"""
vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
url, rev = vsrfu('https://example.com/bar@2995')
self.assertEqual(url, 'https://example.com/bar')
self.assertEqual(rev, '2995')
class TestContentCheckers(unittest.TestCase):
def test_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
checker.feed('You should probably not be using MD5'.encode('ascii'))
self.assertEqual(checker.hash.hexdigest(),
'f12895fdffbd45007040d2e44df98478')
self.assertTrue(checker.is_valid())
def test_other_fragment(self):
"Content checks should succeed silently if no hash is present"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#something%20completely%20different')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_blank_md5(self):
"Content checks should succeed if a hash is empty"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_get_hash_name_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
self.assertEqual(checker.hash_name, 'md5')
def test_report(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
rep = checker.report(lambda x: x, 'My message about %s')
self.assertEqual(rep, 'My message about md5')
|
zenodo/invenio | refs/heads/zenodo-master | invenio/legacy/bibupload/scripts/batchuploader.py | 13 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader: Batch uploading of metadata and documents.
Usage: batchuploader [options]
Examples:
$ batchuploader --documents
Options:
-m, --metadata metadata working mode
-d, --documents documents working mode
Scheduling options:
-u, --user=USER user name to store task, password needed
General options:
-h, --help print this help and exit
-v, --verbose=LEVEL verbose level (from 0 to 9, default 1)
-V --version print the script version
"""
from invenio.base.factory import with_app_context
@with_app_context()
def main():
from invenio.legacy.batchuploader.cli import main as batchuploader_main
return batchuploader_main()
|
Volvagia356/mobile-gst | refs/heads/master | gst.py | 1 | import requests
from time import time
from bs4 import BeautifulSoup
class FWDC(requests.Session):
def __init__(self, *args, **kwargs):
self.fwdc_data = {}
self.fwdc_data['FAST_CLIENT_WINDOW__'] = "FWDC.WND-0000-0000-0000"
self.fwdc_data['FAST_CLIENT_AJAX_ID__'] = 0
super(FWDC, self).__init__(*args, **kwargs)
def before_request(self):
self.fwdc_data['FAST_CLIENT_WHEN__'] = str(int(time()*1000))
self.fwdc_data['FAST_CLIENT_AJAX_ID__'] += 1
def after_request(self, response):
try:
self.fwdc_data['FAST_VERLAST__'] = response.headers['Fast-Ver-Last']
self.fwdc_data['FAST_VERLAST_SOURCE__'] = response.headers['Fast-Ver-Source']
except KeyError:
pass
def get(self, *args, **kwargs):
self.before_request()
if "params" not in kwargs: kwargs['params'] = {}
kwargs['params'].update(self.fwdc_data)
r = super(FWDC, self).get(*args, **kwargs)
self.after_request(r)
return r
def post(self, *args, **kwargs):
self.before_request()
if "data" not in kwargs: kwargs['data'] = {}
kwargs['data'].update(self.fwdc_data)
r = super(FWDC, self).post(*args, **kwargs)
self.after_request(r)
return r
class GST():
def __init__(self):
self.fwdc = FWDC()
def load_front_page(self):
self.fwdc.get("https://gst.customs.gov.my/TAP/_/")
self.fwdc.get("https://gst.customs.gov.my/TAP/_/", params={'Load': "1"})
def click_lookup_gst_status(self):
data = {
'DOC_MODAL_ID__': "0",
'EVENT__': "b-m",
'TYPE__': "0",
'CLOSECONFIRMED__': "false",
}
self.fwdc.post("https://gst.customs.gov.my/TAP/_/EventOccurred", data=data)
def select_radio_button(self, button_id):
data = {
button_id: "true",
'DOC_MODAL_ID__': "0",
}
self.fwdc.post("https://gst.customs.gov.my/TAP/_/Recalc", data=data)
def enter_text_field(self, field_id, text):
data = {
field_id: text,
'DOC_MODAL_ID__': "0",
}
r = self.fwdc.post("https://gst.customs.gov.my/TAP/_/Recalc", data=data)
r.encoding = "utf-8-sig"
return r.json()
def select_gst_num_radio(self):
self.select_radio_button("e-4")
def select_business_num_radio(self):
self.select_radio_button("e-7")
def select_business_name_radio(self):
self.select_radio_button("e-9")
def enter_gst_num(self, gst_num):
return self.enter_text_field("e-6", gst_num)
def enter_business_num(self, business_num):
return self.enter_text_field("e-8", business_num)
def enter_business_name(self, business_name):
return self.enter_text_field("e-a", business_name)
class GSTError(Exception): pass
def find_field_update(fwdc_response, field):
for field_update in fwdc_response['Updates']['FieldUpdates']:
if field_update['field'] == field:
return field_update
def is_field_visible(fwdc_response, field):
field_update = find_field_update(fwdc_response, field)
if field_update:
return field_update.get("visible", False)
return False
def parse_business_table(table_html):
FIELDS = ["gst_num", "legal_name", "trading_name", "date", "status"]
soup = BeautifulSoup(table_html)
rows = soup.tbody.find_all("tr", class_="DataRow", recursive=False)
data = []
for row in rows:
cells = row.find_all("td", recursive=False)
row_data = []
for cell in cells:
cell_data = cell.get_text()
row_data.append(cell_data)
row_dict = dict(zip(FIELDS, row_data))
data.append(row_dict)
return data
def get_table_from_response(fwdc_response):
field_update = find_field_update(fwdc_response, "e-h")
if not field_update:
if is_field_visible(fwdc_response, "e-k"):
raise GSTError("No Registrants Found!")
elif is_field_visible(fwdc_response, "e-p"):
raise GSTError("Over 100 results found. Please narrow search terms!")
elif is_field_visible(fwdc_response, "e-s"):
raise GSTError("Server under maintenance. Please check back later!")
else:
raise GSTError("Unknown error occured!")
table_html = field_update['value']
return parse_business_table(table_html)
def prepare_GST():
s = GST()
s.load_front_page()
s.click_lookup_gst_status()
return s
def search_gst_num(gst_num):
s = prepare_GST()
s.select_gst_num_radio()
response = s.enter_gst_num(gst_num)
return get_table_from_response(response)
def search_business_num(business_num):
s = prepare_GST()
s.select_business_num_radio()
response = s.enter_business_num(business_num)
return get_table_from_response(response)
def search_business_name(business_name):
s = prepare_GST()
s.select_business_name_radio()
response = s.enter_business_name(business_name)
return get_table_from_response(response)
|
pepeportela/edx-platform | refs/heads/master | cms/djangoapps/contentstore/tests/test_import.py | 20 | # -*- coding: utf-8 -*-
# pylint: disable=protected-access
"""
Tests for import_course_from_xml using the mongo modulestore.
"""
import copy
from uuid import uuid4
import ddt
from django.conf import settings
from django.test.client import Client
from django.test.utils import override_settings
from mock import patch
from openedx.core.djangoapps.content.course_structures.tests import SignalDisconnectTestMixin
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import check_exact_number_of_calls, check_number_of_calls
from xmodule.modulestore.xml_importer import import_course_from_xml
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
@ddt.ddt
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE, SEARCH_ENGINE=None)
class ContentStoreImportTest(SignalDisconnectTestMixin, ModuleStoreTestCase):
"""
Tests that rely on the toy and test_import_course courses.
NOTE: refactor using CourseFactory so they do not.
"""
def setUp(self):
super(ContentStoreImportTest, self).setUp()
self.client = Client()
self.client.login(username=self.user.username, password=self.user_password)
# block_structure.update_course_in_cache cannot succeed in tests, as it needs to be run async on an lms worker
self.task_patcher = patch('openedx.core.djangoapps.content.block_structure.tasks.update_course_in_cache_v2')
self._mock_lms_task = self.task_patcher.start()
def tearDown(self):
self.task_patcher.stop()
super(ContentStoreImportTest, self).tearDown()
def load_test_import_course(self, target_id=None, create_if_not_present=True, module_store=None):
'''
Load the standard course used to test imports
(for do_import_static=False behavior).
'''
content_store = contentstore()
if module_store is None:
module_store = modulestore()
import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['test_import_course'],
static_content_store=content_store,
do_import_static=False,
verbose=True,
target_id=target_id,
create_if_not_present=create_if_not_present,
)
course_id = module_store.make_course_key('edX', 'test_import_course', '2012_Fall')
course = module_store.get_course(course_id)
self.assertIsNotNone(course)
return module_store, content_store, course
def test_import_course_into_similar_namespace(self):
# Checks to make sure that a course with an org/course like
# edx/course can be imported into a namespace with an org/course
# like edx/course_name
module_store, __, course = self.load_test_import_course()
course_items = import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['test_import_course_2'],
target_id=course.id,
verbose=True,
)
self.assertEqual(len(course_items), 1)
def test_unicode_chars_in_course_name_import(self):
"""
# Test that importing course with unicode 'id' and 'display name' doesn't give UnicodeEncodeError
"""
# Test with the split modulestore because store.has_course fails in old mongo with unicode characters.
with modulestore().default_store(ModuleStoreEnum.Type.split):
module_store = modulestore()
course_id = module_store.make_course_key(u'Юникода', u'unicode_course', u'échantillon')
import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['2014_Uni'],
target_id=course_id,
create_if_not_present=True
)
course = module_store.get_course(course_id)
self.assertIsNotNone(course)
# test that course 'display_name' same as imported course 'display_name'
self.assertEqual(course.display_name, u"Φυσικά το όνομα Unicode")
def test_static_import(self):
'''
Stuff in static_import should always be imported into contentstore
'''
_, content_store, course = self.load_test_import_course()
# make sure we have ONE asset in our contentstore ("should_be_imported.html")
all_assets, count = content_store.get_all_content_for_course(course.id)
print "len(all_assets)=%d" % len(all_assets)
self.assertEqual(len(all_assets), 1)
self.assertEqual(count, 1)
content = None
try:
location = course.id.make_asset_key('asset', 'should_be_imported.html')
content = content_store.find(location)
except NotFoundError:
pass
self.assertIsNotNone(content)
# make sure course.static_asset_path is correct
print "static_asset_path = {0}".format(course.static_asset_path)
self.assertEqual(course.static_asset_path, 'test_import_course')
def test_asset_import_nostatic(self):
'''
This test validates that an image asset is NOT imported when do_import_static=False
'''
content_store = contentstore()
module_store = modulestore()
import_course_from_xml(
module_store, self.user.id, TEST_DATA_DIR, ['toy'],
static_content_store=content_store, do_import_static=False,
create_if_not_present=True, verbose=True
)
course = module_store.get_course(module_store.make_course_key('edX', 'toy', '2012_Fall'))
# make sure we have NO assets in our contentstore
all_assets, count = content_store.get_all_content_for_course(course.id)
self.assertEqual(len(all_assets), 0)
self.assertEqual(count, 0)
def test_no_static_link_rewrites_on_import(self):
module_store = modulestore()
courses = import_course_from_xml(
module_store, self.user.id, TEST_DATA_DIR, ['toy'], do_import_static=False, verbose=True,
create_if_not_present=True
)
course_key = courses[0].id
handouts = module_store.get_item(course_key.make_usage_key('course_info', 'handouts'))
self.assertIn('/static/', handouts.data)
handouts = module_store.get_item(course_key.make_usage_key('html', 'toyhtml'))
self.assertIn('/static/', handouts.data)
def test_tab_name_imports_correctly(self):
_module_store, _content_store, course = self.load_test_import_course()
print "course tabs = {0}".format(course.tabs)
self.assertEqual(course.tabs[2]['name'], 'Syllabus')
def test_import_performance_mongo(self):
store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo)
# we try to refresh the inheritance tree for each update_item in the import
with check_exact_number_of_calls(store, 'refresh_cached_metadata_inheritance_tree', 28):
# _get_cached_metadata_inheritance_tree should be called once
with check_exact_number_of_calls(store, '_get_cached_metadata_inheritance_tree', 1):
# with bulk-edit in progress, the inheritance tree should be recomputed only at the end of the import
# NOTE: On Jenkins, with memcache enabled, the number of calls here is 1.
# Locally, without memcache, the number of calls is 1 (publish no longer counted)
with check_number_of_calls(store, '_compute_metadata_inheritance_tree', 1):
self.load_test_import_course(create_if_not_present=False, module_store=store)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_reimport(self, default_ms_type):
with modulestore().default_store(default_ms_type):
__, __, course = self.load_test_import_course(create_if_not_present=True)
self.load_test_import_course(target_id=course.id)
def test_rewrite_reference_list(self):
# This test fails with split modulestore (the HTML component is not in "different_course_id" namespace).
# More investigation needs to be done.
module_store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo)
target_id = module_store.make_course_key('testX', 'conditional_copy', 'copy_run')
import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['conditional'],
target_id=target_id
)
conditional_module = module_store.get_item(
target_id.make_usage_key('conditional', 'condone')
)
self.assertIsNotNone(conditional_module)
different_course_id = module_store.make_course_key('edX', 'different_course', None)
self.assertListEqual(
[
target_id.make_usage_key('problem', 'choiceprob'),
different_course_id.make_usage_key('html', 'for_testing_import_rewrites')
],
conditional_module.sources_list
)
self.assertListEqual(
[
target_id.make_usage_key('html', 'congrats'),
target_id.make_usage_key('html', 'secret_page')
],
conditional_module.show_tag_list
)
def test_rewrite_reference_value_dict_published(self):
"""
Test rewriting references in ReferenceValueDict, specifically with published content.
"""
self._verify_split_test_import(
'split_test_copy',
'split_test_module',
'split1',
{"0": 'sample_0', "2": 'sample_2'},
)
def test_rewrite_reference_value_dict_draft(self):
"""
Test rewriting references in ReferenceValueDict, specifically with draft content.
"""
self._verify_split_test_import(
'split_test_copy_with_draft',
'split_test_module_draft',
'fb34c21fe64941999eaead421a8711b8',
{"0": '9f0941d021414798836ef140fb5f6841', "1": '0faf29473cf1497baa33fcc828b179cd'},
)
def _verify_split_test_import(self, target_course_name, source_course_name, split_test_name, groups_to_verticals):
module_store = modulestore()
target_id = module_store.make_course_key('testX', target_course_name, 'copy_run')
import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
[source_course_name],
target_id=target_id,
create_if_not_present=True
)
split_test_module = module_store.get_item(
target_id.make_usage_key('split_test', split_test_name)
)
self.assertIsNotNone(split_test_module)
remapped_verticals = {
key: target_id.make_usage_key('vertical', value) for key, value in groups_to_verticals.iteritems()
}
self.assertEqual(remapped_verticals, split_test_module.group_id_to_child)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_video_components_present_while_import(self, store):
"""
Test that video components with same edx_video_id are present while re-importing
"""
with modulestore().default_store(store):
module_store = modulestore()
course_id = module_store.make_course_key('edX', 'test_import_course', '2012_Fall')
# Import first time
__, __, course = self.load_test_import_course(target_id=course_id, module_store=module_store)
# Re-import
__, __, re_course = self.load_test_import_course(target_id=course.id, module_store=module_store)
vertical = module_store.get_item(re_course.id.make_usage_key('vertical', 'vertical_test'))
video = module_store.get_item(vertical.children[1])
self.assertEqual(video.display_name, 'default')
|
charles-g-young/Table2NetCDF | refs/heads/master | gov/noaa/gmd/table_2_netcdf/Properties.py | 1 | '''
Global properties.
Created on Mar 5, 2017
@author: greg
'''
PROJECT_DIR="../../../.."
TEST_DATA_DIR=PROJECT_DIR+"/test-data"
|
TRESCLOUD/odoo | refs/heads/Integracion&ControlDeCalidad | addons/web/doc/_themes/flask_theme_support.py | 2228 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
|
zhenzhenyang-psu/bootcamp-central | refs/heads/master | web/2016/context.py | 9 | # Pyblue specific context
# Name, URL tuples
navbar = [
("Day 1", "/day1/day1_index.html"),
("Day 2", "/day2/day2_index.html"),
("Day 3", "/day3/day3_index.html"),
("Day 4", "/day4/day4_index.html"),
("Day 5", "/day5/day5_index.html"),
]
|
iRGBit/QGIS | refs/heads/master | python/plugins/processing/algs/qgis/Grid.py | 5 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Grid.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import math
from PyQt4.QtCore import QVariant
from qgis.core import QgsRectangle, QgsCoordinateReferenceSystem, QGis, QgsField, QgsFeature, QgsGeometry, QgsPoint
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterExtent
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterCrs
from processing.core.outputs import OutputVector
class Grid(GeoAlgorithm):
TYPE = 'TYPE'
EXTENT = 'EXTENT'
HSPACING = 'HSPACING'
VSPACING = 'VSPACING'
CRS = 'CRS'
OUTPUT = 'OUTPUT'
TYPES = ['Rectangle (line)',
'Rectangle (polygon)',
'Diamond (polygon)',
'Hexagon (polygon)'
]
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Create grid')
self.group, self.i18n_group = self.trAlgorithm('Vector creation tools')
self.addParameter(ParameterSelection(self.TYPE,
self.tr('Grid type'), self.TYPES))
self.addParameter(ParameterExtent(self.EXTENT,
self.tr('Grid extent')))
self.addParameter(ParameterNumber(self.HSPACING,
self.tr('Horizontal spacing'), default=10.0))
self.addParameter(ParameterNumber(self.VSPACING,
self.tr('Vertical spacing'), default=10.0))
self.addParameter(ParameterCrs(self.CRS, 'Grid CRS'))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Grid')))
def processAlgorithm(self, progress):
idx = self.getParameterValue(self.TYPE)
extent = self.getParameterValue(self.EXTENT).split(',')
hSpacing = self.getParameterValue(self.HSPACING)
vSpacing = self.getParameterValue(self.VSPACING)
crs = QgsCoordinateReferenceSystem(self.getParameterValue(self.CRS))
bbox = QgsRectangle(float(extent[0]), float(extent[2]),
float(extent[1]), float(extent[3]))
width = bbox.width()
height = bbox.height()
centerX = bbox.center().x()
centerY = bbox.center().y()
originX = centerX - width / 2.0
originY = centerY - height / 2.0
if hSpacing <= 0 or vSpacing <= 0:
raise GeoAlgorithmExecutionException(
self.tr('Invalid grid spacing: %s/%s' % (hSpacing, vSpacing)))
if width < hSpacing:
raise GeoAlgorithmExecutionException(
self.tr('Horizontal spacing is too small for the covered area'))
if height < vSpacing:
raise GeoAlgorithmExecutionException(
self.tr('Vertical spacing is too small for the covered area'))
if self.TYPES[idx].find('polygon') >= 0:
geometryType = QGis.WKBPolygon
else:
geometryType = QGis.WKBLineString
fields = [QgsField('left', QVariant.Double, '', 24, 16),
QgsField('top', QVariant.Double, '', 24, 16),
QgsField('right', QVariant.Double, '', 24, 16),
QgsField('bottom', QVariant.Double, '', 24, 16)
]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
geometryType, crs)
if idx == 0:
self._rectangleGridLine(
writer, width, height, originX, originY, hSpacing, vSpacing)
elif idx == 1:
self._rectangleGridPoly(
writer, width, height, originX, originY, hSpacing, vSpacing)
elif idx == 2:
self._diamondGrid(
writer, width, height, originX, originY, hSpacing, vSpacing)
elif idx == 3:
self._hexagonGrid(
writer, width, height, originX, originY, hSpacing, vSpacing)
del writer
def _rectangleGridLine(self, writer, width, height, originX, originY,
hSpacing, vSpacing):
ft = QgsFeature()
columns = int(math.floor(float(width) / hSpacing))
rows = int(math.floor(float(height) / vSpacing))
# Longitude lines
for col in xrange(0, columns + 1):
polyline = []
x = originX + (col * hSpacing)
for row in xrange(0, rows + 1):
y = originY + (row * vSpacing)
polyline.append(QgsPoint(x, y))
ft.setGeometry(QgsGeometry.fromPolyline(polyline))
ft.setAttributes([x, originY, x, originY + (rows * vSpacing)])
writer.addFeature(ft)
# Latitude lines
for row in xrange(0, rows + 1):
polyline = []
y = originY + (row * vSpacing)
for col in xrange(0, columns + 1):
x = originX + (col * hSpacing)
polyline.append(QgsPoint(x, y))
ft.setGeometry(QgsGeometry.fromPolyline(polyline))
ft.setAttributes([originX, y, originX + (col * hSpacing), y])
writer.addFeature(ft)
def _rectangleGridPoly(self, writer, width, height, originX, originY,
hSpacing, vSpacing):
ft = QgsFeature()
columns = int(math.floor(float(width) / hSpacing))
rows = int(math.floor(float(height) / vSpacing))
for col in xrange(0, columns):
# (column + 1) and (row + 1) calculation is used to maintain
# topology between adjacent shapes and avoid overlaps/holes
# due to rounding errors
x1 = originX + (col * hSpacing)
x2 = originX + ((col + 1) * hSpacing)
for row in xrange(0, rows):
y1 = originY + (row * vSpacing)
y2 = originY + ((row + 1) * vSpacing)
polyline = []
polyline.append(QgsPoint(x1, y1))
polyline.append(QgsPoint(x2, y1))
polyline.append(QgsPoint(x2, y2))
polyline.append(QgsPoint(x1, y2))
polyline.append(QgsPoint(x1, y1))
ft.setGeometry(QgsGeometry.fromPolygon([polyline]))
ft.setAttributes([x1, y1, x2, y2])
writer.addFeature(ft)
def _diamondGrid(self, writer, width, height, originX, originY,
hSpacing, vSpacing):
ft = QgsFeature()
halfHSpacing = hSpacing / 2
halfVSpacing = vSpacing / 2
columns = int(math.floor(float(width) / halfHSpacing))
rows = int(math.floor(float(height) / vSpacing))
for col in xrange(0, columns):
x1 = originX + ((col + 0) * halfHSpacing)
x2 = originX + ((col + 1) * halfHSpacing)
x3 = originX + ((col + 2) * halfHSpacing)
for row in xrange(0, rows):
if (col % 2) == 0:
y1 = originY + (((row * 2) + 0) * halfVSpacing)
y2 = originY + (((row * 2) + 1) * halfVSpacing)
y3 = originY + (((row * 2) + 2) * halfVSpacing)
else:
y1 = originY + (((row * 2) + 1) * halfVSpacing)
y2 = originY + (((row * 2) + 2) * halfVSpacing)
y3 = originY + (((row * 2) + 3) * halfVSpacing)
polyline = []
polyline.append(QgsPoint(x1, y2))
polyline.append(QgsPoint(x2, y1))
polyline.append(QgsPoint(x3, y2))
polyline.append(QgsPoint(x2, y3))
polyline.append(QgsPoint(x1, y2))
ft.setGeometry(QgsGeometry.fromPolygon([polyline]))
ft.setAttributes([x1, y1, x3, y3])
writer.addFeature(ft)
def _hexagonGrid(self, writer, width, height, originX, originY,
hSpacing, vSpacing):
ft = QgsFeature()
# To preserve symmetry, hspacing is fixed relative to vspacing
xVertexLo = 0.288675134594813 * vSpacing
xVertexHi = 0.577350269189626 * vSpacing
hSpacing = xVertexLo + xVertexHi
halfVSpacing = vSpacing / 2
columns = int(math.floor(float(width) / hSpacing))
rows = int(math.floor(float(height) / vSpacing))
for col in xrange(0, columns):
# (column + 1) and (row + 1) calculation is used to maintain
# topology between adjacent shapes and avoid overlaps/holes
# due to rounding errors
x1 = originX + (col * hSpacing) # far left
x2 = x1 + (xVertexHi - xVertexLo) # left
x3 = originX + ((col + 1) * hSpacing) # right
x4 = x3 + (xVertexHi - xVertexLo) # far right
for row in xrange(0, rows):
if (col % 2) == 0:
y1 = originY + (((row * 2) + 0) * halfVSpacing) # hi
y2 = originY + (((row * 2) + 1) * halfVSpacing) # mid
y3 = originY + (((row * 2) + 2) * halfVSpacing) # lo
else:
y1 = originY + (((row * 2) + 1) * halfVSpacing) # hi
y2 = originY + (((row * 2) + 2) * halfVSpacing) # mid
y3 = originY + (((row * 2) + 3) * halfVSpacing) # lo
polyline = []
polyline.append(QgsPoint(x1, y2))
polyline.append(QgsPoint(x2, y1))
polyline.append(QgsPoint(x3, y1))
polyline.append(QgsPoint(x4, y2))
polyline.append(QgsPoint(x3, y3))
polyline.append(QgsPoint(x2, y3))
polyline.append(QgsPoint(x1, y2))
ft.setGeometry(QgsGeometry.fromPolygon([polyline]))
ft.setAttributes([x1, y1, x4, y3])
writer.addFeature(ft)
|
ad-m/django-atom | refs/heads/master | atom/ext/guardian/tests.py | 1 | from guardian.shortcuts import assign_perm
from django.core.exceptions import ImproperlyConfigured
class PermissionStatusMixin(object):
"""Mixin to verify object permission status codes for different users
Require user with username='john' and password='pass'
Attributes:
permission (list): Description
status_anonymous (int): Status code for anonymouser
status_has_permission (int): Status code for user with permission
status_no_permission (403): Status code for user without permission
url (string): url to test
"""
url = None
permission = None
status_anonymous = 302
status_no_permission = 403
status_has_permission = 200
def setUp(self):
super(PermissionStatusMixin, self).setUp()
def get_url(self):
"""Get url to tests
Returns:
str: url to test
Raises:
ImproperlyConfigured: Missing a url to test
"""
if self.url is None:
raise ImproperlyConfigured(
'{0} is missing a url to test. Define {0}.url '
'or override {0}.get_url().'.format(self.__class__.__name__))
return self.url
def get_permission(self):
"""Returns the permission to assign for granted permission user
Returns:
list: A list of permission in format ```codename.permission_name```
Raises:
ImproperlyConfigured: Missing a permission to assign
"""
if self.permission is None:
raise ImproperlyConfigured(
'{0} is missing a permissions to assign. Define {0}.permission '
'or override {0}.get_permission().'.format(self.__class__.__name__))
return self.permission
def get_permission_object(self):
"""Returns object of permission-carrying object for grant permission
"""
return getattr(self, 'permission_object', None)
def grant_permission(self):
"""Grant permission to user in self.user
Returns:
TYPE: Description
"""
for perm in self.get_permission():
obj = self.get_permission_object()
assign_perm(perm, self.user, obj)
def login_permitted_user(self):
"""Login client to user with granted permissions
"""
self.grant_permission()
self.client.login(username='john', password='pass')
def test_status_code_for_anonymous_user(self):
"""A test status code of response for anonymous user
"""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, self.status_anonymous)
def test_status_code_for_signed_user(self):
"""A test for status code of response for signed (logged-in) user
Only login before test.
"""
self.client.login(username='john', password='pass')
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, self.status_no_permission)
def test_status_code_for_privileged_user(self):
"""A test for status code of response for privileged user
Grant permission to permission-carrying object and login before test
"""
self.grant_permission()
self.client.login(username='john', password='pass')
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, self.status_has_permission)
|
tgerla/ansible | refs/heads/devel | test/units/playbook/test_playbook.py | 290 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook import Playbook
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestPlaybook(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_playbook(self):
fake_loader = DictDataLoader({})
p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
"test_file.yml":"""
- hosts: all
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
# represents a playbook which is not a list of plays
"bad_list.yml": """
foo: bar
""",
# represents a playbook where a play entry is mis-formatted
"bad_entry.yml": """
-
- "This should be a mapping..."
""",
})
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
|
aozima/rt-thread | refs/heads/master | bsp/x1000/rtconfig.py | 9 | import os
# toolchains options
ARCH ='mips'
CPU ='x1000'
CROSS_TOOL ='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'E:\work\env\tools\gnu_gcc\mips_gcc\mips-2016.05\bin'
else:
print('Please make sure your toolchains is GNU GCC!')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
# BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'mips-sde-elf-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'g++'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
STRIP = PREFIX + 'strip'
DEVICE = ' -mips32r2 -msoft-float -mfp32'
CFLAGS = DEVICE + ' -EL -G0 -mno-abicalls -fno-pic -fno-builtin -fno-exceptions -ffunction-sections -fno-omit-frame-pointer'
AFLAGS = ' -c' + DEVICE + ' -EL -x assembler-with-cpp'
LFLAGS = DEVICE + ' -EL -Wl,--gc-sections,-Map=rtthread_x1000.map,-cref,-u,Reset_Handler -T x1000_ram.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
M_CFLAGS = DEVICE + ' -EL -G0 -O2 -mno-abicalls -fno-common -fno-exceptions -fno-omit-frame-pointer -mlong-calls -fno-pic '
M_CXXFLAGS = M_CFLAGS
M_LFLAGS = DEVICE + ' -EL -r -Wl,--gc-sections,-z,max-page-size=0x4' +\
' -nostartfiles -static-libgcc'
M_POST_ACTION = STRIP + ' -R .hash $TARGET\n' + SIZE + ' $TARGET \n'
DUMP_ACTION = OBJDUMP + ' -D -S $TARGET > rtt.asm\n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
|
MShel/ttw | refs/heads/master | stats/adapters/sqliteAdapter.py | 1 | from listener.packets.abstractPacket import AbstractPacket
from pprint import pprint
import sqlite3
from stats.adapters.abstractAdapter import AbstractAdapter
class SqliteAdapter(AbstractAdapter):
sqliteConnection = None
def __init__(self, credentials: dict):
self.sqliteConnection = sqlite3.connect(credentials['dbFilePath'])
self.schemas = ["""CREATE TABLE `packet_stats` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`toAddress` varchar(255) DEFAULT '',
`fromAddress` varchar(255) DEFAULT '',
`protocol` varchar(255) DEFAULT '',
`toPort` int(11) DEFAULT '0',
`fromPort`int(11) DEFAULT '0',
`created_at` datetime default current_timestamp
);
""", """ CREATE TABLE `packet_data` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`packet_id`int(11) DEFAULT '0',
`packet_data` TEXT DEFAULT '',
`created_at` datetime default current_timestamp
);"""]
def recordPacket(self, packet:AbstractPacket):
cursor = self.sqliteConnection.cursor()
cursor.execute('INSERT INTO packet_stats(toAddress,fromAddress,protocol,toPort,fromPort) VALUES (?,?,?,?,?)', (packet.toAddress, packet.fromAddress, packet.protocol, packet.toPort, packet.fromPort))
self.sqliteConnection.commit()
cursor.execute('INSERT INTO packet_data(packet_id,packet_data) VALUES (?,?)', (cursor.lastrowid, packet.data))
self.sqliteConnection.commit()
return cursor.lastrowid
def executeSchema(self):
for schema in self.schemas:
cursor = self.sqliteConnection.cursor()
cursor.execute(schema)
self.sqliteConnection.commit()
|
dvliman/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.2/tests/regressiontests/reverse_single_related/tests.py | 43 | from django.test import TestCase
from regressiontests.reverse_single_related.models import *
class ReverseSingleRelatedTests(TestCase):
"""
Regression tests for an object that cannot access a single related
object due to a restrictive default manager.
"""
def test_reverse_single_related(self):
public_source = Source.objects.create(is_public=True)
public_item = Item.objects.create(source=public_source)
private_source = Source.objects.create(is_public=False)
private_item = Item.objects.create(source=private_source)
# Only one source is available via all() due to the custom default manager.
self.assertQuerysetEqual(
Source.objects.all(),
["<Source: Source object>"]
)
self.assertEquals(public_item.source, public_source)
# Make sure that an item can still access its related source even if the default
# manager doesn't normally allow it.
self.assertEquals(private_item.source, private_source)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
Source.objects.use_for_related_fields = True
private_item = Item.objects.get(pk=private_item.pk)
self.assertRaises(Source.DoesNotExist, lambda: private_item.source)
|
enigmagroup/enigmasuite | refs/heads/master | files/webinterface/app/views.py | 1 | from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from subprocess import Popen, PIPE
from app.models import *
from app.forms import *
import random
import string
import json
from datetime import datetime, timedelta
from django.views.decorators.csrf import csrf_exempt
from django.utils.translation import ugettext as _
from slugify import slugify
from helpers import *
def home(request):
o = Option()
if request.session.get('django_language') == None:
language = o.get_value('language', 'de')
request.session['django_language'] = language
return redirect('/')
language = request.session.get('django_language')
netstat = {
'dhcp': '0',
'internet': '0',
'cjdns': '0',
'cjdns_internet': '0',
}
try:
internet_access = o.get_value('internet_access')
dt = datetime.strptime(internet_access, '%Y-%m-%d')
if language == 'en':
internet_access_formatted = dt.strftime('%m %d, %Y')
else:
internet_access_formatted = dt.strftime('%d.%m.%Y')
except Exception:
internet_access = ''
internet_access_formatted = ''
for key, value in netstat.items():
try:
with open('/tmp/netstat-' + key, 'r') as f:
netstat[key] = f.read().strip()
except Exception:
pass
return render_to_response('home.html', {
'hostid': o.get_value('hostid'),
'internet_access': internet_access,
'internet_access_formatted': internet_access_formatted,
'teletext_enabled': o.get_value('teletext_enabled'),
'root_password': o.get_value('root_password'),
'netstat': netstat,
}, context_instance=RequestContext(request))
# language switcher
def switch_language(request, language):
o = Option()
language = o.set_value('language', language)
request.session['django_language'] = language
return redirect('/')
# Addressbook
def addressbook(request):
if request.POST:
form = AddressbookForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
a = Address()
a.name = cd['name'].strip()
a.display_name = cd['name'].replace('-', ' ').title()
a.ipv6 = cd['ipv6'].strip()
a.phone = cd['phone']
a.save()
o = Option()
o.config_changed(True)
return redirect('/addressbook/')
else:
form = AddressbookForm()
order = request.GET.get('order', 'id')
addresses = Address.objects.all().order_by(order)
sip_peers = Popen(["sudo", "asterisk", "-rx", "sip show peers"], stdout=PIPE).communicate()[0]
return render_to_response('addressbook/overview.html', {
'addresses': addresses,
'form': form,
'sip_peers': sip_peers,
}, context_instance=RequestContext(request))
def addressbook_edit(request, addr_id):
if request.POST:
if request.POST.get('submit') == 'delete':
a = Address.objects.get(pk=addr_id)
a.delete()
o = Option()
o.config_changed(True)
return redirect('/addressbook/')
form = AddressbookForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
a = Address.objects.get(pk=addr_id)
a.name = cd['name'].strip()
a.display_name = cd['display_name'].strip()
a.ipv6 = cd['ipv6'].strip()
a.phone = cd['phone']
a.save()
o = Option()
o.config_changed(True)
return redirect('/addressbook/')
else:
a = Address.objects.get(pk=addr_id)
form = AddressbookForm(initial={
'name': a.name,
'display_name': a.display_name,
'ipv6': a.ipv6,
'phone': a.phone,
})
address = Address.objects.get(pk=addr_id)
return render_to_response('addressbook/detail.html', {
'address': address,
'form': form,
}, context_instance=RequestContext(request))
def addressbook_global(request):
o = Option()
if request.POST.get('global-availability'):
o.toggle_value('global_availability')
o.config_changed(True)
return redirect('/addressbook-global/')
global_hostname = o.get_value('global_hostname')
global_phone = o.get_value('global_phone')
global_address_status = o.get_value('global_address_status')
global_availability = o.get_value('global_availability')
ipv6 = o.get_value('ipv6')
ipv6 = normalize_ipv6(ipv6)
import sqlite3
db = sqlite3.connect('/etc/enigmabox/addressbook.db')
db.text_factory = sqlite3.OptimizedUnicode
db_cursor = db.cursor()
db_cursor.execute("SELECT ipv6, hostname, phone FROM addresses ORDER BY id desc")
db_addresses = db_cursor.fetchall()
addresses = []
for adr in db_addresses:
addresses.append({
'ipv6': adr[0],
'name': adr[1],
'phone': adr[2],
'mine': '1' if normalize_ipv6(adr[0]) == ipv6 else '0',
})
return render_to_response('addressbook/overview-global.html', {
'global_hostname': global_hostname,
'global_phone': global_phone,
'global_address_status': global_address_status,
'global_availability': global_availability,
'addresses': addresses,
}, context_instance=RequestContext(request))
def addressbook_global_edit(request):
o = Option()
global_hostname = o.get_value('global_hostname', '')
global_phone = o.get_value('global_phone', '')
if request.POST.get('submit') == 'save':
form = GlobalAddressbookForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
name = cd['name'].strip()
phone = cd['phone']
o.set_value('global_hostname', name)
o.set_value('global_phone', phone)
o.set_value('global_address_status', 'pending')
return redirect('/addressbook-global/')
elif request.POST.get('submit') == 'delete':
o.set_value('global_hostname', '')
o.set_value('global_phone', '')
o.set_value('global_address_status', 'pending')
return redirect('/addressbook-global/')
else:
form = GlobalAddressbookForm(initial={
'name': global_hostname,
'phone': global_phone,
})
return render_to_response('addressbook/overview-global-edit.html', {
'form': form,
'global_hostname': global_hostname,
'global_phone': global_phone,
}, context_instance=RequestContext(request))
# Passwords
def passwords(request):
o = Option()
if request.POST.get('set_webinterface_password'):
o.set_value('webinterface_password', request.POST.get('webinterface_password'))
o.config_changed(True)
if request.POST.get('set_mailbox_password'):
o.set_value('mailbox_password', request.POST.get('mailbox_password'))
o.config_changed(True)
return render_to_response('passwords.html', {
'webinterface_password': o.get_value('webinterface_password'),
'mailbox_password': o.get_value('mailbox_password'),
}, context_instance=RequestContext(request))
# Upgrade
def upgrade(request):
step = 'overview'
show_output = False
errormsg = ''
if request.POST.get('start') == '1':
step = 'check_usb'
if request.POST.get('check_usb') == '1':
result = Popen(["sudo /bin/busybox sh /usr/sbin/upgrader check_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0]
result = result.strip()
if result == 'yes':
step = 'format_usb'
elif result == 'sizefail':
step = 'check_usb'
errormsg = 'sizefail'
elif result == 'nodrive':
step = 'check_usb'
errormsg = 'nodrive'
if request.POST.get('format_usb') == '1':
Popen(["sudo /bin/busybox sh /usr/sbin/upgrader format_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0]
step = 'backup_to_usb'
if request.POST.get('backup_to_usb') == '1':
Popen(["sudo /bin/busybox sh /usr/sbin/upgrader backup_to_usb 2>&1 > /tmp/backup_output"], shell=True, stdout=PIPE, close_fds=True)
show_output = True
step = 'backup_to_usb'
if request.POST.get('proceed_to_step_4') == '1':
step = 'download_image'
if request.POST.get('download_image') == '1':
result = Popen(["sudo /bin/busybox sh /usr/sbin/upgrader download_image"], shell=True, stdout=PIPE, close_fds=True).communicate()[0]
result = result.strip()
if result == 'yes':
step = 'ensure_usb_unplugged'
else:
step = 'download_image'
errormsg = 'imagefail'
if request.POST.get('ensure_usb_unplugged') == '1':
result = Popen(["sudo /bin/busybox sh /usr/sbin/upgrader check_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0]
result = result.strip()
if result == 'nodrive':
step = 'start_upgrade'
else:
step = 'ensure_usb_unplugged'
errormsg = 'usbfail'
if request.POST.get('start_upgrade') == '1':
import os
os.system("sudo /bin/busybox sh /usr/sbin/upgrader run_upgrade &")
return render_to_response('upgrade/' + step + '.html', {
'show_output': show_output,
'errormsg': errormsg,
}, context_instance=RequestContext(request))
def backup_output(request):
with open('/tmp/backup_output', 'r') as f:
output = f.read()
from ansi2html import Ansi2HTMLConverter
from django.http import HttpResponse
conv = Ansi2HTMLConverter()
html = conv.convert(output, full=False)
return HttpResponse(html)
# Backup & restore
def backup(request):
o = Option()
if request.POST.get('set_webinterface_password'):
o.set_value('webinterface_password', request.POST.get('webinterface_password'))
o.config_changed(True)
if request.POST.get('set_mailbox_password'):
o.set_value('mailbox_password', request.POST.get('mailbox_password'))
o.config_changed(True)
return render_to_response('backup/overview.html', {
'webinterface_password': o.get_value('webinterface_password'),
'mailbox_password': o.get_value('mailbox_password'),
}, context_instance=RequestContext(request))
def backup_system(request):
o = Option()
temp_db = '/tmp/settings.sqlite'
final_db = '/box/settings.sqlite'
msg = False
if request.POST.get('backup'):
import os
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
filename = '/box/settings.sqlite'
wrapper = FileWrapper(file(filename))
response = HttpResponse(wrapper, content_type='application/x-sqlite3')
response['Content-Disposition'] = 'attachment; filename=settings.sqlite'
response['Content-Length'] = os.path.getsize(filename)
return response
if request.POST.get('upload_check'):
import sqlite3
try:
destination = open(temp_db, 'wb+')
for chunk in request.FILES['file'].chunks():
destination.write(chunk)
destination.close()
conn = sqlite3.connect(temp_db)
c = conn.cursor()
c.execute('select value from app_option where key = "ipv6"')
msg = c.fetchone()[0]
conn.close()
except Exception:
msg = 'invalid'
if request.POST.get('restore'):
import shutil
from crypt import crypt
shutil.move(temp_db, final_db)
# set rootpw
password = o.get_value('root_password')
salt = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(10))
hashed_password = crypt(password, "$6$" + salt + "$")
try:
Popen(['sudo', 'usermod', '-p', hashed_password, 'root'], stdout=PIPE).communicate()[0]
except Exception:
pass
o.config_changed(True)
o.set_value('internet_requested', 0)
return redirect('/backup/system/')
return render_to_response('backup/system.html', {
'msg': msg,
}, context_instance=RequestContext(request))
def backup_emails(request):
o = Option()
filename = '/tmp/emails.tar.gz'
msg = False
if request.POST.get('backup'):
import os
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
try:
Popen(["sudo", "/usr/local/sbin/backup-stuff", "emails"], stdout=PIPE).communicate()[0]
wrapper = FileWrapper(file(filename))
response = HttpResponse(wrapper, content_type='application/x-gzip')
response['Content-Disposition'] = 'attachment; filename=emails.tar.gz'
response['Content-Length'] = os.path.getsize(filename)
return response
except Exception:
msg = 'backuperror'
if request.POST.get('restore'):
try:
destination = open('/tmp/emails.tar.gz', 'wb+')
for chunk in request.FILES['file'].chunks():
destination.write(chunk)
destination.close()
Popen(["sudo", "/usr/local/sbin/restore-stuff", "emails"], stdout=PIPE).communicate()[0]
msg = 'restoresuccess'
except Exception:
msg = 'restoreerror'
return render_to_response('backup/emails.html', {
'msg': msg,
}, context_instance=RequestContext(request))
def backup_sslcerts(request):
o = Option()
hostid = o.get_value('hostid')
filename = '/tmp/sslcerts-' + hostid + '.zip'
msg = False
if request.POST.get('backup'):
import os
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
try:
Popen(["sudo", "/usr/local/sbin/backup-stuff", "sslcerts"], stdout=PIPE).communicate()[0]
wrapper = FileWrapper(file(filename))
response = HttpResponse(wrapper, content_type='application/x-gzip')
response['Content-Disposition'] = 'attachment; filename=sslcerts-' + hostid + '.zip'
response['Content-Length'] = os.path.getsize(filename)
return response
except Exception:
msg = 'backuperror'
if request.POST.get('restore'):
try:
destination = open(filename, 'wb+')
for chunk in request.FILES['file'].chunks():
destination.write(chunk)
destination.close()
Popen(["sudo", "/usr/local/sbin/restore-stuff", "sslcerts"], stdout=PIPE).communicate()[0]
msg = 'restoresuccess'
except Exception:
msg = 'restoreerror'
return render_to_response('backup/sslcerts.html', {
'msg': msg,
'hostid': hostid,
}, context_instance=RequestContext(request))
# Subscription
def subscription(request):
o = Option()
currency = request.POST.get('currency', 'CHF')
subscription = request.POST.get('subscription', '1')
amount_table = {}
amount_table['CHF'] = {
'1': 120,
'5': 500,
'lt': 1000,
}
amount_table['EUR'] = {
'1': 120,
'5': 500,
'lt': 1000,
}
amount_table['USD'] = {
'1': 130,
'5': 550,
'lt': 1100,
}
amount = amount_table[currency][subscription]
return render_to_response('subscription/overview.html', {
'hostid': o.get_value('hostid'),
'show_invoice': request.POST.get('show_invoice'),
'currency': currency,
'subscription': subscription,
'amount': amount,
}, context_instance=RequestContext(request))
def subscription_hide_notice(request):
o = Option()
o.set_value('expiration_notice_confirmed', '1')
try:
Popen(["sudo", "/usr/local/sbin/hide_expiration_notice"], stdout=PIPE).communicate()[0]
except Exception:
pass
try:
referrer = request.META['HTTP_REFERER']
except Exception:
referrer = ''
return redirect(referrer)
# Peerings
def peerings(request):
o = Option()
if request.POST.get('allow_peering'):
if o.get_value('peering_password') is None:
o.set_value('peering_password', ''.join(random.choice(string.ascii_letters + string.digits) for x in range(30)))
if o.get_value('peering_port') is None:
o.set_value('peering_port', random.randint(2000, 60000))
ap = o.get_value('allow_peering')
if ap == '1':
o.set_value('allow_peering', 0)
else:
o.set_value('allow_peering', 1)
o.config_changed(True)
if request.POST.get('autopeering'):
ap = o.get_value('autopeering')
if ap == '1':
o.set_value('autopeering', 0)
else:
o.set_value('autopeering', 1)
o.config_changed(True)
if request.POST.get('save_peeringinfo'):
o.set_value('peering_port', request.POST.get('peering_port'))
o.set_value('peering_password', request.POST.get('peering_password'))
o.config_changed(True)
peerings = Peering.objects.filter(custom=True).order_by('id')
return render_to_response('peerings/overview.html', {
'peerings': peerings,
'allow_peering': o.get_value('allow_peering'),
'autopeering': o.get_value('autopeering'),
'peering_port': o.get_value('peering_port'),
'peering_password': o.get_value('peering_password'),
'public_key': o.get_value('public_key'),
}, context_instance=RequestContext(request))
def peerings_edit(request, peering_id=None):
peering = ''
form = ''
if request.POST:
if request.POST.get('submit') == 'delete':
p = Peering.objects.get(pk=peering_id)
p.delete()
o = Option()
o.config_changed(True)
return redirect('/peerings/')
form = PeeringsForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
if peering_id == None:
p = Peering()
else:
p = Peering.objects.get(pk=peering_id)
p.address = cd['address'].strip()
p.public_key = cd['public_key'].strip()
p.password = cd['password'].strip()
p.description = cd['description'].strip()
p.custom = True
p.save()
o = Option()
o.config_changed(True)
return redirect('/peerings/')
else:
if peering_id:
peering = Peering.objects.get(pk=peering_id)
form = PeeringsForm(initial={
'address': peering.address,
'public_key': peering.public_key,
'password': peering.password,
'description': peering.description,
})
return render_to_response('peerings/detail.html', {
'peering': peering,
'form': form,
}, context_instance=RequestContext(request))
# Network selection
def network_selection(request):
o = Option()
if request.POST.get('set_network_preference_regular'):
o.set_value('network_preference', 'regular')
o.config_changed(True)
if request.POST.get('set_network_preference_topo128'):
o.set_value('network_preference', 'topo128')
o.config_changed(True)
network_preference = o.get_value('network_preference', 'regular')
return render_to_response('network_selection.html', {
'network_preference': network_preference,
}, context_instance=RequestContext(request))
# Country selection
def countryselect(request):
o = Option()
country = request.POST.get('country', False)
if country:
o.set_value('selected_country', country)
o.config_changed(True)
country_active = request.POST.get('country-active', False)
if country_active:
c = Country.objects.get(countrycode=country_active)
c.active = False
c.save()
country_inactive = request.POST.get('country-inactive', False)
if country_inactive:
c = Country.objects.get(countrycode=country_inactive)
c.active = True
c.save()
peerings = Peering.objects.filter(custom=False)
for p in peerings:
country = Country.objects.filter(countrycode=p.country)
if len(country) < 1:
c = Country()
c.countrycode = p.country
c.priority = 0
c.save()
# kick out countrys which aren't in the peerings anymore
countries = Country.objects.all()
for c in countries:
peering = Peering.objects.filter(country=c.countrycode)
if len(peering) < 1:
c.delete()
countries_trans = {
'ch': _('Switzerland'),
'se': _('Sweden'),
'hu': _('Hungary'),
'fr': _('France'),
'de': _('Germany'),
'us': _('United Stasi of America'),
}
db_countries = Country.objects.all().order_by('priority')
countries = []
for c in db_countries:
countries.append({
'countrycode': c.countrycode,
'active': c.active,
'priority': c.priority,
'countryname': countries_trans.get(c.countrycode),
})
return render_to_response('countryselect/overview.html', {
'countries': countries,
'countries_trans': countries_trans,
'selected_country': o.get_value('selected_country', 'ch'),
}, context_instance=RequestContext(request))
# Web filter
def webfilter(request):
o = Option()
if request.POST:
o.config_changed(True)
# always send that data, even if its an empty string
o.set_value('webfilter_custom-rules-text', request.POST.get('custom-rules-text'))
settings_fields = ['filter-ads', 'filter-headers', 'disable-browser-ident', 'block-facebook', 'block-google', 'block-twitter', 'custom-rules']
for postval in settings_fields:
if request.POST.get(postval):
o.toggle_value('webfilter_' + postval)
settings = {}
for getval in settings_fields:
field_name = getval.replace('-', '_')
setting_name = 'webfilter_' + getval
settings[field_name] = o.get_value(setting_name, '')
settings['custom_rules_text'] = o.get_value('webfilter_custom-rules-text', '')
return render_to_response('webfilter/overview.html', settings, context_instance=RequestContext(request))
# WLAN settings
def wlan_settings(request):
o = Option()
if request.POST:
o.set_value('wlan_ssid', request.POST.get('ssid'))
o.set_value('wlan_pass', request.POST.get('pass'))
o.set_value('wlan_security', request.POST.get('security'))
o.config_changed(True)
return render_to_response('wlan_settings/overview.html', {
'wlan_ssid': o.get_value('wlan_ssid', ''),
'wlan_pass': o.get_value('wlan_pass', ''),
'wlan_security': o.get_value('wlan_security', 'WPA2'),
}, context_instance=RequestContext(request))
def wlan_scan(request):
o = Option()
if request.POST:
o.set_value('wlan_ssid', request.POST.get('ssid'))
o.set_value('wlan_security', request.POST.get('security'))
o.set_value('wlan_group', request.POST.get('group'))
o.set_value('wlan_pairwise', request.POST.get('pairwise'))
return redirect('/wlan_settings/')
final_cells = []
Popen(["sudo", "ifconfig", "wlan0", "up"], stdout=PIPE).communicate()[0]
scan = Popen(["sudo", "iwlist", "wlan0", "scan"], stdout=PIPE).communicate()[0]
cells = scan.split('Cell ')
for cell in cells:
try:
ssid = cell.split('ESSID:')[1].split('\n')[0].replace('"', '').strip()
quality = cell.split('Quality=')[1].split(' ')[0].strip()
try:
group = cell.split('Group Cipher')[1].split('\n')[0].split(' ')[-1:][0].strip()
except Exception:
group = ''
try:
pairwise = cell.split('Pairwise Ciphers')[1].split('\n')[0].split(' ')[-1:][0].strip()
except Exception:
pairwise = ''
if 'WPA' in cell:
security = 'WPA'
else:
security = 'WEP'
final_cells.append({
'ssid': ssid,
'quality': quality,
'security': security,
'group': group,
'pairwise': pairwise,
})
except Exception:
pass
return render_to_response('wlan_settings/scan.html', {
'cells': final_cells,
}, context_instance=RequestContext(request))
# Teletext
def teletext(request):
o = Option()
if request.POST:
o.toggle_value('teletext_enabled')
o.config_changed(True)
return render_to_response('teletext/overview.html', {
'teletext_enabled': o.get_value('teletext_enabled', 0),
}, context_instance=RequestContext(request))
# Changes
def apply_changes(request):
output_window = False
loader_hint = ''
if request.POST.get('apply_changes') == 'dry_run':
output_window = True
loader_hint = 'dry-run'
Popen(["sudo", "/usr/local/sbin/puppet-apply", "-b"], stdout=PIPE)
if request.POST.get('apply_changes') == 'run':
output_window = True
loader_hint = 'run'
Popen(["sudo", "/usr/local/sbin/puppet-apply", "-r", "-b"], stdout=PIPE)
if request.POST.get('apply_changes') == 'back':
return redirect('/')
return render_to_response('changes/apply.html', {
'output_window': output_window,
'loader_hint': loader_hint,
}, context_instance=RequestContext(request))
def puppet_output(request):
with open('/tmp/puppet_output', 'r') as f:
output = f.read()
from ansi2html import Ansi2HTMLConverter
from django.http import HttpResponse
conv = Ansi2HTMLConverter()
html = conv.convert(output, full=False)
return HttpResponse(html)
# API
@csrf_exempt
def api_v1(request, api_url):
from django.http import HttpResponse
resp = {}
resp['result'] = 'failed'
if api_url == 'get_option':
try:
o = Option()
r = o.get_value(request.POST['key'])
resp['value'] = r
resp['result'] = 'success'
except Exception:
resp['message'] = 'option not found or POST.key parameter missing'
if api_url == 'set_option':
try:
o = Option()
o.set_value(request.POST['key'], request.POST['value'])
resp['result'] = 'success'
except Exception:
resp['message'] = 'error setting option'
if api_url == 'get_puppetmasters':
try:
puppetmasters = Puppetmaster.objects.all().order_by('priority')
data = []
for pm in puppetmasters:
data.append(pm.hostname)
resp['value'] = data
resp['result'] = 'success'
except Exception:
resp['message'] = 'fail'
if api_url == 'get_contacts':
try:
contacts = Address.objects.all().order_by('id')
data = []
for ct in contacts:
data.append({
'name': ct.name,
'display_name': ct.display_name,
'ipv6': ct.ipv6,
})
resp['value'] = data
resp['result'] = 'success'
except Exception:
resp['message'] = 'fail'
if api_url == 'add_contact':
try:
hostname = request.POST.get('hostname', '').strip()
hostname = slugify(hostname)
ipv6 = request.POST.get('ipv6', '').strip()
if hostname != '' and ipv6 != '':
a = Address()
a.name = hostname
a.display_name = hostname.replace('-', ' ').title()
a.ipv6 = ipv6
a.save()
resp['addrbook_url'] = 'http://enigma.box/addressbook/edit/' + str(a.id) + '/'
resp['result'] = 'success'
o = Option()
o.config_changed(True)
else:
raise
except Exception:
resp['message'] = 'fail'
if api_url == 'set_countries':
try:
countries = request.POST.get('countries', '').strip()
prio = 1
for country in countries.split(','):
c = Country.objects.get(countrycode=country)
c.priority = prio
c.save()
prio = prio + 1
except Exception:
resp['message'] = 'fail'
if api_url == 'set_next_country':
our_default = 'ch'
o = Option()
current_country = o.get_value('selected_country')
countries = Country.objects.filter(active=True).order_by('priority')
if len(countries) < 1:
next_country = our_default
else:
no_next_country = True
i = 0
for c in countries:
if c.countrycode == current_country:
no_next_country = False
try:
next_country = countries[i+1].countrycode
except Exception:
try:
next_country = countries[0].countrycode
except Exception:
next_country = our_default
i = i + 1
if no_next_country:
next_country = countries[0].countrycode
o.set_value('selected_country', next_country)
resp['value'] = next_country
resp['result'] = 'success'
return HttpResponse(json.dumps(resp), content_type='application/json')
# Sites
def puppet_site(request, program):
if program == 'puppet':
template = 'puppet/site.pp'
else:
template = 'ansible/site.yml'
o = Option()
box = {}
box['ipv6'] = o.get_value('ipv6').strip()
box['public_key'] = o.get_value('public_key')
box['private_key'] = o.get_value('private_key')
selected_country = o.get_value('selected_country', 'ch')
addresses = ''
puppetmasters = ''
internet_gateway = ''
network_preference = ''
peerings = ''
display_expiration_notice = '0'
# get Enigmabox-specific server data, when available
try:
f = open('/box/server.json', 'r')
json_data = json.load(f)
hostid = json_data['hostid']
internet_access = json_data['internet_access']
password = json_data['password']
puppetmasters = json_data['puppetmasters']
# get network preference. json overrides user preference.
try:
network_preference = json_data['network_preference']
except Exception:
network_preference = o.get_value('network_preference', 'regular')
if network_preference == 'topo128':
peerings = json_data['peerings_topo128']
else:
peerings = json_data['peerings']
o.set_value('hostid', hostid)
o.set_value('internet_access', internet_access)
o.set_value('password', password)
Puppetmaster.objects.all().delete()
for pm in puppetmasters:
p = Puppetmaster()
p.ip = pm[0]
p.hostname = pm[1]
p.priority = pm[2]
p.save()
Peering.objects.filter(custom=False).delete()
for address, peering in peerings.items():
p = Peering()
p.address = address
p.public_key = peering['publicKey']
p.password = peering['password']
p.country = peering['country']
p.save()
puppetmasters = Puppetmaster.objects.all().order_by('priority')
internet_gateway = Peering.objects.filter(custom=False,country=selected_country).order_by('id')[:1][0]
# expiration notice
dt = datetime.strptime(internet_access, '%Y-%m-%d')
now = datetime.utcnow()
three_weeks = timedelta(days=20)
expiration_notice_confirmed = o.get_value('expiration_notice_confirmed', False)
if (now + three_weeks) > dt:
display_expiration_notice = '1'
if expiration_notice_confirmed:
display_expiration_notice = '0'
# well, umm, leave it hidden, in case the box didn't get the update
#if now > dt:
# display_expiration_notice = '1'
except Exception:
# no additional server data found, moving on...
pass
peerings = []
server_peerings = Peering.objects.filter(custom=False,country=selected_country).order_by('id')[:1]
#server_peerings = Peering.objects.filter(custom=False).order_by('id')
for peering in server_peerings:
peerings.append(peering)
custom_peerings = Peering.objects.filter(custom=True).order_by('id')
for peering in custom_peerings:
peerings.append(peering)
addresses = Address.objects.all().order_by('id')
global_addresses = []
try:
import sqlite3
db = sqlite3.connect('/etc/enigmabox/addressbook.db')
db.text_factory = sqlite3.OptimizedUnicode
c = db.cursor()
c.execute("SELECT ipv6,hostname,phone FROM addresses")
for address in c.fetchall():
global_addresses.append({
'ipv6': address[0],
'hostname': address[1],
'phone': address[2],
})
except Exception:
pass
webinterface_password = o.get_value('webinterface_password')
mailbox_password = o.get_value(u'mailbox_password')
if webinterface_password is None:
webinterface_password = ''
if mailbox_password is None:
mailbox_password = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(64))
o.set_value('mailbox_password', mailbox_password)
mailbox_password = mailbox_password.encode('utf-8')
# hash the password
import hashlib
import base64
p = hashlib.sha1(mailbox_password)
mailbox_password = base64.b64encode(p.digest())
# webfilter: format custom rules
custom_rules_text = o.get_value('webfilter_custom-rules-text', '')
## four backslashes: django -> puppet -> tinyproxy
#custom_rules_text = custom_rules_text.replace('.', '\\\\.')
#custom_rules_text = custom_rules_text.replace('-', '\\\\-')
custom_rules_text = custom_rules_text.replace('\r', '')
cr2 = ''
for crt in custom_rules_text.split('\n'):
cr2 += '.*' + crt + '.*\n'
custom_rules_text = cr2
custom_rules = o.get_value('webfilter_custom-rules', '')
if custom_rules != '1':
custom_rules_text = ''
return render_to_response(template, {
'box': box,
'hostid': hostid,
'addresses': addresses,
'global_addresses': global_addresses,
'global_availability': o.get_value('global_availability', 0),
'puppetmasters': puppetmasters,
'wlan_ssid': o.get_value('wlan_ssid'),
'wlan_pass': o.get_value('wlan_pass'),
'wlan_security': o.get_value('wlan_security'),
'wlan_group': o.get_value('wlan_group', ''),
'wlan_pairwise': o.get_value('wlan_pairwise', ''),
'network_preference': network_preference,
'peerings': peerings,
'internet_gateway': internet_gateway,
'autopeering': o.get_value('autopeering'),
'allow_peering': o.get_value('allow_peering'),
'peering_port': o.get_value('peering_port'),
'peering_password': o.get_value('peering_password'),
'webinterface_password': webinterface_password,
'mailbox_password': mailbox_password,
'webfilter_filter_ads': o.get_value('webfilter_filter-ads', ''),
'webfilter_filter_headers': o.get_value('webfilter_filter-headers', ''),
'webfilter_disable_browser_ident': o.get_value('webfilter_disable-browser-ident', ''),
'webfilter_block_facebook': o.get_value('webfilter_block-facebook', ''),
'webfilter_block_google': o.get_value('webfilter_block-google', ''),
'webfilter_block_twitter': o.get_value('webfilter_block-twitter', ''),
'webfilter_custom_rules': custom_rules,
'webfilter_custom_rules_text': custom_rules_text,
'teletext_enabled': o.get_value('teletext_enabled', '0'),
'display_expiration_notice': display_expiration_notice,
})
|
Akylas/CouchPotatoServer | refs/heads/master | couchpotato/core/plugins/trailer/__init__.py | 4 | from .main import Trailer
def start():
return Trailer()
config = [{
'name': 'trailer',
'groups': [
{
'tab': 'renamer',
'subtab': 'trailer',
'name': 'trailer',
'label': 'Download trailer after rename',
'options': [
{
'name': 'enabled',
'label': 'Search and download trailers',
'default': False,
'type': 'enabler',
},
{
'name': 'quality',
'default': '720p',
'type': 'dropdown',
'values': [('1080P', '1080p'), ('720P', '720p'), ('480P', '480p')],
},
{
'name': 'automatic',
'default': False,
'type': 'bool',
'description': 'Automaticly search & download for movies in library',
},
],
},
],
}]
|
Tatsh-ansible/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_role.py | 18 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_role
short_description: Manages user roles on Apache CloudStack based clouds.
description:
- Create, update, delete user roles.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the role.
required: true
id:
description:
- ID of the role.
- If provided, C(id) is used as key.
required: false
default: null
aliases: [ 'uuid' ]
role_type:
description:
- Type of the role.
- Only considered for creation.
required: false
default: User
choices: [ 'User', 'DomainAdmin', 'ResourceAdmin', 'Admin' ]
description:
description:
- Description of the role.
required: false
default: null
state:
description:
- State of the role.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure an user role is present
- local_action:
module: cs_role
name: myrole_user
# Ensure a role having particular ID is named as myrole_user
- local_action:
module: cs_role
name: myrole_user
id: 04589590-ac63-4ffc-93f5-b698b8ac38b6
# Ensure a role is absent
- local_action:
module: cs_role
name: myrole_user
state: absent
'''
RETURN = '''
---
id:
description: UUID of the role.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the role.
returned: success
type: string
sample: myrole
description:
description: Description of the role.
returned: success
type: string
sample: "This is my role description"
role_type:
description: Type of the role.
returned: success
type: string
sample: User
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackRole(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRole, self).__init__(module)
self.returns = {
'type': 'role_type',
}
def get_role(self):
uuid = self.module.params.get('uuid')
if uuid:
args = {
'id': uuid,
}
roles = self.query_api('listRoles', **args)
if roles:
return roles['role'][0]
else:
args = {
'name': self.module.params.get('name'),
}
roles = self.query_api('listRoles', **args)
if roles:
return roles['role'][0]
return None
def present_role(self):
role = self.get_role()
if role:
role = self._update_role(role)
else:
role = self._create_role(role)
return role
def _create_role(self, role):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.module.params.get('role_type'),
'description': self.module.params.get('description'),
}
if not self.module.check_mode:
res = self.query_api('createRole', **args)
role = res['role']
return role
def _update_role(self, role):
args = {
'id': role['id'],
'name': self.module.params.get('name'),
'description': self.module.params.get('description'),
}
if self.has_changed(args, role):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateRole', **args)
# The API as in 4.9 does not return an updated role yet
if 'role' not in res:
role = self.get_role()
else:
role = res['role']
return role
def absent_role(self):
role = self.get_role()
if role:
self.result['changed'] = True
args = {
'id': role['id'],
}
if not self.module.check_mode:
self.query_api('deleteRole', **args)
return role
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
uuid=dict(aliases=['id']),
name=dict(required=True),
description=dict(),
role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_role = AnsibleCloudStackRole(module)
state = module.params.get('state')
if state == 'absent':
role = acs_role.absent_role()
else:
role = acs_role.present_role()
result = acs_role.get_result(role)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Xilinx/BNN-PYNQ | refs/heads/master | bnn/src/training/mnist-gen-weights-W1A2.py | 1 | # BSD 3-Clause License
# =======
# Copyright (c) 2020, Xilinx
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import finnthesizer as fth
if __name__ == "__main__":
bnnRoot = "."
npzFile = bnnRoot + "/mnist-1w-2a.npz"
targetDirBin = bnnRoot + "/lfcW1A2"
targetDirHLS = bnnRoot + "/lfcW1A2/hw"
simdCounts = [64, 32, 64, 8]
peCounts = [32, 64, 32, 16]
WeightsPrecisions_integer = [1, 1, 1, 1]
WeightsPrecisions_fractional = [0, 0, 0, 0]
InputPrecisions_integer = [1, 2, 2, 2]
InputPrecisions_fractional = [0, 0, 0, 0]
ActivationPrecisions_integer = [2, 2, 2, 1]
ActivationPrecisions_fractional = [0, 0, 0, 0]
classes = [str(x) for x in range(10)]
fth.convertFCNetwork(npzFile, targetDirBin, targetDirHLS, simdCounts, peCounts, \
WeightsPrecisions_fractional, ActivationPrecisions_fractional, InputPrecisions_fractional,\
WeightsPrecisions_integer, ActivationPrecisions_integer, InputPrecisions_integer)
with open(targetDirBin + "/classes.txt", "w") as f:
f.write("\n".join(classes))
|
tdyas/pants | refs/heads/master | src/python/pants/build_graph/address_lookup_error.py | 2 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
class AddressLookupError(Exception):
"""Raised by various modules when an address can't be resolved. Use this common base class so
other modules can trap the error at each node along the path and construct a useful diagnostic.
:API: public
"""
|
nvoron23/hue | refs/heads/master | apps/sqoop/src/sqoop/api/submission.py | 32 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import socket
from django.utils.translation import ugettext as _
from sqoop import client, conf
from decorators import get_submission_or_exception
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions import StructuredException
from desktop.lib.rest.http_client import RestException
from exception import handle_rest_exception
from utils import list_to_dict
from django.views.decorators.cache import never_cache
__all__ = ['get_submissions', 'submissions']
LOG = logging.getLogger(__name__)
@never_cache
def get_submissions(request):
response = {
'status': 0,
'errors': None,
'submissions': []
}
status = request.GET.get('status', 'submissions').split(',')
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
submissions = c.get_submissions()
response['submissions'] = list_to_dict(submissions)
except RestException, e:
response.update(handle_rest_exception(e, _('Could not get submissions.')))
return JsonResponse(response)
@never_cache
def submissions(request):
if request.method == 'GET':
return get_submissions(request)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405)
|
bala4901/odoo | refs/heads/master | addons/hr_contract/__init__.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_contract
import base_action_rule
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hsoft/pdfmasher | refs/heads/master | core/gui/build_pane.py | 1 | # Created By: Virgil Dupras
# Created On: 2011-07-09
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPL v3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/gplv3_license
import os.path as op
from datetime import datetime
import markdown
from ebooks.html.input import HTMLInput
from ebooks.mobi.output import convert as convert2mobi
from ebooks.epub.output import convert as convert2epub
from ebooks.metadata.book import Metadata
from ..output import generate_markdown, wrap_html
from .base import GUIObject
class EbookType:
MOBI = 1
EPUB = 2
class BuildPane(GUIObject):
#--- model -> view calls:
# refresh() (for generation label and post processing buttons)
#
def __init__(self, app):
GUIObject.__init__(self, app)
self.lastgen_desc = ''
self.post_processing_enabled = False
self.selected_ebook_type = EbookType.MOBI
self.ebook_title = ''
self.ebook_author = ''
def _view_updated(self):
self.view.refresh()
#--- Private
def _current_path(self, ext):
assert self.app.current_path
without_ext, _ = op.splitext(self.app.current_path)
return without_ext + '.' + ext
def _generate_html(self):
md_path = self._current_path('txt')
with open(md_path, 'rt', encoding='utf-8') as fp:
md_contents = fp.read()
html_body = markdown.markdown(md_contents)
dest_path = self._current_path('htm')
with open(dest_path, 'wt', encoding='utf-8') as fp:
fp.write(wrap_html(html_body, 'utf-8'))
return dest_path
#--- Public
def generate_markdown(self):
dest_path = self._current_path('txt')
with open(dest_path, 'wt', encoding='utf-8') as fp:
fp.write(generate_markdown(self.app.elements))
self.lastgen_desc = 'Generated at {}'.format(datetime.now().strftime('%H:%M:%S'))
self.post_processing_enabled = True
self.view.refresh()
def edit_markdown(self):
md_path = self._current_path('txt')
self.app.open_path(md_path)
def reveal_markdown(self):
md_path = self._current_path('txt')
self.app.reveal_path(md_path)
def view_html(self):
self.app.open_path(self._generate_html())
def create_ebook(self):
allowed_ext = 'mobi' if self.selected_ebook_type == EbookType.MOBI else 'epub'
path = self.app.view.query_save_path("Select a destination for the e-book", [allowed_ext])
if not path:
return
hi = HTMLInput()
html_path = self._generate_html()
mi = Metadata(self.ebook_title, [self.ebook_author])
oeb = hi.create_oebbook(html_path, mi)
if self.selected_ebook_type == EbookType.EPUB:
convert2epub(oeb, path)
else:
convert2mobi(oeb, path)
#--- Events
def file_opened(self):
self.lastgen_desc = ''
self.post_processing_enabled = False
self.view.refresh()
|
TalkingCactus/Citadel-Station-13-5th-Port | refs/heads/master | bot/Namecheck.py | 55 |
def Namecheck_allinone(name, against, sender=None):
__doc__ = "False = No match, True = Match"
if not isinstance(against, iterable):
return False
if isinstance(against, dict):
for key, value in against.iteritems():
if value.lower() in name.lower() and (sender and sender.lower() not in name.lower()):
return True, key # Not sure why you need the index with the result.
for item in against:
if item.lower() in name.lower() and sender.lower() not in name.lower():
return True
return False
def Namecheck(name,against,sender):
__doc__ = "False = No match, True = Match"
for i in against:
if i.lower() in name.lower() and sender.lower() not in name.lower():
return True
else:
pass
def Namecheck_dict(name,against):
__doc__ = "False = No match, True = Match"
fuse = False
for a,i in against.items():
if i.lower() in name.lower():
fuse = True
break
else:
pass
return fuse,a
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/tangible/wearables/necklace/shared_necklace_adorned_beads.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/necklace/shared_necklace_adorned_beads.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","necklace_adorned_beads")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
poppogbr/genropy | refs/heads/master | gnrpy/gnr/web/gnrwebpage_proxy/apphandler.py | 1 | # -*- coding: UTF-8 -*-
#--------------------------------------------------------------------------
# package : GenroPy web - see LICENSE for details
# module gnrwebcore : core module for genropy web framework
# Copyright (c) : 2004 - 2007 Softwell sas - Milano
# Written by : Giovanni Porcari, Michele Bertoldi
# Saverio Porcari, Francesco Porcari , Francesco Cavazzana
#--------------------------------------------------------------------------
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#apphandler.py
#Created by Giovanni Porcari on 2007-03-24.
#Copyright (c) 2007 Softwell. All rights reserved.
import os
import re
import time
from gnr.core.gnrlang import gnrImport
import logging
gnrlogger = logging.getLogger(__name__)
from gnr.core.gnrbag import Bag
from gnr.core import gnrlist
from gnr.core.gnrlang import getUuid
from gnr.core.gnrstring import templateReplace, splitAndStrip, toText, toJson
from gnr.web.gnrwebpage_proxy.gnrbaseproxy import GnrBaseProxy
ESCAPE_SPECIAL = re.compile(r'[\[\\\^\$\.\|\?\*\+\(\)\]\{\}]')
class GnrWebAppHandler(GnrBaseProxy):
def init(self, **kwargs):
self.gnrapp = self.page.site.gnrapp
siteStatus = self.page.siteStatus
if siteStatus['resetLocalizationTime'] and self.gnrapp.localizationTime < siteStatus['resetLocalizationTime']:
self.gnrapp.buildLocalization()
def event_onEnd(self):
self._finalize(self)
def _finalize(self, page):
self.db.closeConnection()
@property
def db(self):
return self.page.db
def getDb(self, dbId=None):
return self.db # TODO: is a __getitem__ for back compatibility: see gnrsqldata DataResolver
__getitem__ = getDb
def _getAppId(self):
if not hasattr(self, '_appId'):
instances = self.page.site.config['instances'].keys()
if len(instances) == 1:
self._appId = instances[0]
else:
self._appId = self.page.request.uri.split['/'][2]
if not self._appId in instances:
self._appId = instances[0]
return self._appId
appId = property(_getAppId)
def getPackages(self):
return [[pkgobj.name_full, pkg] for pkg, pkgobj in self.db.packages.items()]
rpc_getPackages = getPackages
def getTables(self, pkg=None):
tables = self.db.package(pkg).tables
if tables:
return [[tblobj.name_full.capitalize(), tbl] for tbl, tblobj in tables.items()]
return []
rpc_getTables = getTables
def getTablesTree(self):
result = Bag()
for pkg, pkgobj in self.db.packages.items():
if pkgobj.attributes.get('reserved', 'n').upper() != 'Y':
tblbag = Bag()
label = pkgobj.name_full.capitalize()
result.setItem(pkg, tblbag, label=label)
for tbl, tblobj in pkgobj.tables.items():
label = tblobj.name_full.capitalize()
tblbag.setItem(tbl, None, label=label, tableid='%s.%s' % (pkg, tbl))
return result
rpc_getTablesTree = getTablesTree
def getTableFields(self, pkg='', table='', **kwargs):
if not pkg:
pkg, table = table.split('.')
return self.dbStructure(path='%s.tables.%s.relations' % (pkg, table))
rpc_getTableFields = getTableFields
def dbStructure(self, path='', **kwargs):
curr = self.db.packages
if path:
curr = curr[path]
path = path + '.'
return self._dbStructureInner(curr, path)
rpc_dbStructure = dbStructure
def _dbStructureInner(self, where, path):
result = Bag()
for elem in where:
if hasattr(elem, 'resolver'):
attributes = {}
attributes.update(elem.getAttr())
if 'joiner' in attributes:
joiner = attributes.pop('joiner')
attributes.update(joiner[0] or {})
label = elem.label
attributes['caption'] = attributes.get('name_long')
if elem.resolver != None:
result.setItem(label, "genro.rpc.remoteResolver('app.dbStructure',{path:'%s'})" % (path + label),
attributes, _T='JS')
else:
value = elem.value
if hasattr(value, '__len__'):
if len(value):
result.setItem(label,
"genro.rpc.remoteResolver('app.dbStructure',{path:'%s'})" % (path + label),
attributes, _T='JS')
else:
result.setItem(label, None)
else:
result.setItem(label, elem.value, attributes)
elif hasattr(where, '__getitem__'):
if isinstance(where, Bag):
n = where.getNode(elem)
value = n.value
attributes = n.getAttr()
else:
value = where[elem]
attributes = getattr(value, 'attributes', {})
label = elem
attributes['caption'] = attributes.get('name_long')
if len(value):
result.setItem(label, "genro.rpc.remoteResolver('app.dbStructure',{path:'%s'})" % (path + label),
attributes, _T='JS')
else:
result.setItem(label, None, attributes)
else:
result.setItem(elem, None)
return result
def rpc_batchDo(self, batch, resultpath, forked=False, **kwargs):
if forked:
from processing import Process
p = Process(target=self._batchExecutor, args=(batch, resultpath, forked), kwargs=kwargs)
p.start()
return None
else:
return self._batchExecutor(batch, resultpath, forked, **kwargs)
def _batchExecutor(self, batch, resultpath, forked, **kwargs):
batchClass = self._batchFinder(batch)
batch = batchClass(self.page)
if forked:
try:
result = batch.run(**kwargs)
error = None
_cls = None
except Exception, err:
result = self.page._errorPage(err, batch, kwargs)
result._page = None
error = 'serverError'
_cls = 'domsource'
self.page.setInClientData(resultpath, result, attributes=dict(_error=error, __cls=_cls))
else:
return batch.run(**kwargs)
def _batchFinder(self, batch):
modName, clsName = batch.split(':')
modPath = self.page.getResource(modName, 'py') or []
if modPath:
m = gnrImport(modPath)
return getattr(m, clsName)
else:
raise Exception('Cannot import component %s' % modName)
def rpc_getRecordCount(self, field=None, value=None,
table='', distinct=False, columns='', where='',
relationDict=None, sqlparams=None, condition=None,
**kwargs):
#sqlargs = dict(kwargs)
if field:
if not table:
pkg, table, field = splitAndStrip(field, '.', fixed=-3)
table = '%s.%s' % (pkg, table)
where = '$%s = :value' % field
kwargs['value'] = value
tblobj = self.db.table(table)
if isinstance(where, Bag):
where, kwargs = self._decodeWhereBag(tblobj, where, kwargs)
if condition:
where = '(%s) AND (%s)' % (where, condition)
return tblobj.query(columns=columns, distinct=distinct, where=where,
relationDict=relationDict, sqlparams=sqlparams, **kwargs).count()
def rpc_selectionCall(self, table, selectionName, method, freeze=False, **kwargs):
tblobj = self.db.table(table)
selection = self.page.unfreezeSelection(tblobj, selectionName)
if hasattr(selection, method):
result = getattr(selection, method)(**kwargs)
if freeze:
selection.freezeUpdate()
return result
def rpc_getRelatedRecord(self, from_fld=None, target_fld=None, pkg=None, pkey=None, ignoreMissing=True,
ignoreDuplicate=True,
js_resolver_one='relOneResolver', js_resolver_many='relManyResolver',
sqlContextName=None, one_one=None, virtual_columns=None, **kwargs):
if one_one is not None:
raise 'error'
pkg, tbl, related_field = target_fld.split('.')
table = '%s.%s' % (pkg, tbl)
if pkey is None:
tbl_pkey = self.db.table(table).pkey
pkey = kwargs.pop(tbl_pkey, None)
if pkey in (None,
'') and not related_field in kwargs: # and (not kwargs): # related record from a newrecord or record without link
pkey = '*newrecord*'
record, recInfo = self.rpc_getRecord(table=table, from_fld=from_fld, target_fld=target_fld, pkey=pkey,
ignoreMissing=ignoreMissing, ignoreDuplicate=ignoreDuplicate,
js_resolver_one=js_resolver_one, js_resolver_many=js_resolver_many,
sqlContextName=sqlContextName, virtual_columns=virtual_columns, **kwargs)
if sqlContextName:
joinBag = self._getSqlContextConditions(sqlContextName, target_fld=target_fld, from_fld=from_fld)
if joinBag and joinBag['applymethod']:
applyPars = self._getApplyMethodPars(kwargs)
self.page.getPublicMethod('rpc', joinBag['applymethod'])(record, **applyPars)
return (record, recInfo)
#def setContextJoinColumns(self, table, contextName='', reason=None, path=None, columns=None):
# tblobj = self.db.table(table)
# relation = tblobj.model.getRelation(path)
# if not relation:
# return
# target_fld = relation['many'].replace('.', '_')
# from_fld = relation['one'].replace('.', '_')
# ctxpath = '_sqlctx.columns.%s.%s_%s' % (contextName, target_fld, from_fld)
# with self.page.pageStore() as store:
# reasons = store.getItem('%s._reasons' % ctxpath)
# if reasons is None:
# reasons = Bag()
# store.setItem('%s._reasons' % ctxpath, reasons)
# reasons.setItem(reason or '*', columns)
# query_set = set()
# for columns in reasons.values():
# query_set.update(columns.split(','))
# store.setItem(ctxpath, ','.join(query_set))
def rpc_getRelatedSelection(self, from_fld, target_fld, relation_value=None,
columns='', query_columns=None,
condition=None, js_resolver_one='relOneResolver',
sqlContextName=None, **kwargs):
if query_columns:
print 'QUERY COLUMNS PARAMETER NOT EXPECTED!!'
columns = columns or query_columns
t = time.time()
joinBag = None
if sqlContextName:
joinBag = self._getSqlContextConditions(sqlContextName, target_fld=target_fld, from_fld=from_fld)
# if not columns:
# columns = self._getSqlContextColumns(sqlContextName, target_fld=target_fld, from_fld=from_fld)
columns = columns or '*'
pkg, tbl, related_field = target_fld.split('.')
dbtable = '%s.%s' % (pkg, tbl)
if not relation_value:
kwargs['limit'] = 0
where = "$%s = :val_%s" % (related_field, related_field)
kwargs[str('val_%s' % related_field)] = relation_value
if condition:
where = '(%s) AND (%s)' % (where, condition)
query = self.db.query(dbtable, columns=columns, where=where,
sqlContextName=sqlContextName, **kwargs)
joinBag = None
if sqlContextName:
self._joinConditionsFromContext(query, sqlContextName)
conditionKey = '%s_%s' % (target_fld.replace('.', '_'), from_fld.replace('.', '_'))
rootCond = query.joinConditions.get(conditionKey)
if rootCond:
query.setJoinCondition(target_fld='*', from_fld='*', condition=rootCond['condition'],
one_one=rootCond['one_one'], **rootCond['params'])
sel = query.selection()
if joinBag and joinBag.get('applymethod'):
applyPars = self._getApplyMethodPars(kwargs)
self.page.getPublicMethod('rpc', joinBag['applymethod'])(sel, **applyPars)
result = Bag()
relOneParams = dict(_target_fld='%s.%s' % (dbtable, self.db.table(dbtable).pkey),
_from_fld='',
_resolver_name=js_resolver_one,
_sqlContextName=sqlContextName
)
for j, row in enumerate(sel):
row = dict(row)
pkey = row.pop('pkey')
spkey = toText(pkey)
result.setItem('%s' % spkey, None, _pkey=spkey, _relation_value=pkey,
_attributes=row, _removeNullAttributes=False, **relOneParams)
relOneParams.update(dict([(k, None) for k in sel.colAttrs.keys() if not k == 'pkey']))
resultAttributes = dict(dbtable=dbtable, totalrows=len(sel))
resultAttributes.update({'servertime': int((time.time() - t) * 1000),
'newproc': getattr(self, 'self.newprocess', 'no'),
'childResolverParams': '%s::JS' % toJson(relOneParams)
})
return (result, resultAttributes)
def rpc_runSelectionBatch(self, table, selectionName=None, batchFactory=None, pkeys=None,
thermoId=None, thermofield=None,
stopOnError=False, forUpdate=False, onRow=None, **kwargs):
""" batchFactory: name of the Class, plugin of table, which executes the batch action
thermoId:
thermofield: the field of the main table to use for thermo display or * for record caption
stopOnError: at the first error stop execution
forUpdate: load records for update and commit at end (always use for writing batch)
onRow: optional method to execute on each record in selection, use if no batchFactory is given
"""
tblobj = self.db.table(table)
if not pkeys:
selection = self.page.unfreezeSelection(tblobj, selectionName)
pkeys = selection.output('pkeylist')
batch = tblobj.getPlugin(name=batchFactory or 'batch', thermoCb=self.setThermo,
thermoId=thermoId, thermofield=thermofield,
stopOnError=stopOnError, forUpdate=forUpdate, onRow=onRow, **kwargs)
return batch.run(pkeyList=pkeys)
def setThermo(self, thermoId, progress_1=None, message_1=None,
maximum_1=None, command=None, **kwargs):
with self.page.pageStore() as store:
if command == 'init':
thermoBag = Bag()
else:
thermoBag = store.getItem('thermo_%s' % thermoId) or Bag()
max = maximum_1 or thermoBag['t1.maximum']
prog = progress_1 or thermoBag['t1.maximum']
if max and prog > max:
command == 'end'
if command == 'end':
thermoBag['status'] = 'end'
thermoBag['message'] = '!!Execution completed'
elif command == 'stopped':
thermoBag['status'] = 'stopped'
thermoBag['message'] = '!!Execution stopped'
else:
params = dict(progress_1=progress_1, message_1=message_1, maximum_1=maximum_1)
params.update(kwargs)
for k, v in params.items():
if v is not None:
key, thermo = k.split('_')
thermoBag['t%s.%s' % (thermo, key)] = v
store.setItem('thermo_%s' % thermoId, thermoBag)
if thermoBag['stop']:
return 'stop'
def rpc_getThermo(self, thermoId, flag=None):
with self.page.pageStore() as store:
if flag == 'stop':
thermoBag = store.getItem('thermo_%s' % thermoId) or Bag()
thermoBag['stop'] = True
store.setItem('thermo_%s' % thermoId, thermoBag)
else:
thermoBag = store.getItem('thermo_%s' % thermoId) or Bag()
return thermoBag
def rpc_onSelectionDo(self, table, selectionName, command, callmethod=None, selectedRowidx=None, recordcall=False,
**kwargs):
result = None
tblobj = self.db.table(table)
selection = self.page.getUserSelection(table=tblobj, selectionName=selectionName, selectedRowidx=selectedRowidx)
callmethod = callmethod or 'standard'
if command in ('print', 'rpc', 'export', 'action', 'pdf'):
handler = getattr(self.page, '%s_%s' % (command, callmethod), None)
if not handler:
handler = getattr(tblobj, '%s_%s' % (command, callmethod), None)
if handler:
if recordcall:
result = []
for r in selection:
onres = handler(tblobj.record(r['pkey']), locale=self.page.locale, **kwargs)
if onres != None:
result.append(onres)
else:
result = handler(selection, locale=self.page.locale, **kwargs)
return result
def export_standard(self, selection, locale=None, columns=None, filename=None, **kwargs):
filename = filename or self.maintable or self.request.uri.split('/')[-1]
content = selection.output('tabtext', columns=columns, locale=locale)
self.page.utils.sendFile(content, filename, 'xls')
def print_standard(self, selection, locale=None, **kwargs):
columns = None # get columns from current view on client !
if not columns:
columns = [c for c in selection.allColumns if not c in ('pkey', 'rowidx')]
outdata = selection.output('dictlist', columns=columns, asIterator=True)
colAttrs = selection.colAttrs
return self.page.pluginhandler.get_plugin('mako')(path='standard_print.tpl', striped='odd_row,even_row',
outdata=outdata, colAttrs=colAttrs,
title='Print List', header='Print List', columns=columns)
def pdf_standard(self, selection, locale=None, **kwargs):
columns = None # get columns from current view on client !
if not columns:
columns = [c for c in selection.allColumns if not c in ('pkey', 'rowidx')]
outdata = selection.output('dictlist', columns=columns, asIterator=True)
colAttrs = selection.colAttrs
return self.page.rmlTemplate('standard_print.rml', outdata=outdata, colAttrs=colAttrs,
title='Print List', header='Print List', columns=columns)
def _getSqlContextConditions(self, contextName, target_fld=None, from_fld=None):
result = self.page.pageStore().getItem('_sqlctx.conditions.%s' % contextName)
if result and target_fld and from_fld:
result = result[('%s_%s' % (target_fld, from_fld)).replace('.', '_')]
return result
#def _getSqlContextColumns(self, contextName, target_fld, from_fld):
# result = self.page.pageStore().getItem('_sqlctx.columns.%s' % contextName)
# if result:
# return result[('%s_%s' % (target_fld, from_fld)).replace('.', '_')]
def _joinConditionsFromContext(self, obj, sqlContextName):
sqlContextBag = self._getSqlContextConditions(sqlContextName)
storedata = self.page.pageStore().data
if sqlContextBag:
for joinBag in sqlContextBag.values():
if joinBag['condition']: # may be a relatedcolumns only
params = (joinBag['params'] or Bag()).asDict(ascii=True)
for k, v in params.items():
if isinstance(v, basestring):
if v.startswith('^'):
params[k] = storedata[v[1:]]
elif hasattr(self, '%s_%s' % (sqlContextName, v)):
params[k] = getattr(self, '%s_%s' % (sqlContextName, v))()
obj.setJoinCondition(target_fld=joinBag['target_fld'], from_fld=joinBag['from_fld'],
condition=joinBag['condition'],
one_one=joinBag['one_one'], **params)
def _getApplyMethodPars(self, kwargs, **optkwargs):
result = dict([(k[6:], v) for k, v in kwargs.items() if k.startswith('apply_')])
if optkwargs:
result.update(optkwargs)
return result
def rpc_checkFreezedSelection(self,changelist=None,selectionName=None,where=None,table=None,**kwargs):
selection = self.page.unfreezeSelection(dbtable=table, name=selectionName)
needUpdate = False
if selection is not None:
kwargs.pop('where_attr',None)
tblobj = self.db.table(table)
if isinstance(where,Bag):
where, kwargs = self._decodeWhereBag(tblobj, where, kwargs)
where = " ( %s ) AND ( $%s IN :_pkeys ) " % (where,tblobj.pkey)
eventdict = {}
for change in changelist:
eventdict.setdefault(change['dbevent'],[]).append(change['pkey'])
for dbevent,pkeys in eventdict.items():
wasInSelection = bool(filter(lambda r: r['pkey'] in pkeys,selection.data))
if dbevent=='D' and not wasInSelection:
continue
willBeInSelection = bool(tblobj.query(where=where,_pkeys=pkeys,limit=1,**kwargs).fetch())
if dbevent=='I' and not willBeInSelection:
continue
if dbevent=='U' and not wasInSelection and not willBeInSelection:
continue
needUpdate = True
break
return needUpdate
def rpc_getSelection(self, table='', distinct=False, columns='', where='', condition=None,
order_by=None, limit=None, offset=None, group_by=None, having=None,
relationDict=None, sqlparams=None, row_start='0', row_count='0',
recordResolver=True, selectionName='', structure=False, numberedRows=True,
pkeys=None, fromSelection=None, applymethod=None, totalRowCount=False,
selectmethod=None, selectmethod_prefix='rpc', expressions=None, sum_columns=None,
sortedBy=None, excludeLogicalDeleted=True,savedQuery=None,savedView=None, externalChanges=None,**kwargs):
t = time.time()
tblobj = self.db.table(table)
if externalChanges is not None:
self.page.subscribeTable(table,externalChanges)
row_start = int(row_start)
row_count = int(row_count)
newSelection = True
formats = {}
for k in kwargs.keys():
if k.startswith('format_'):
formats[7:] = kwargs.pop(k)
if selectionName.startswith('*'):
if selectionName == '*':
selectionName = self.page.page_id
else:
selectionName = selectionName[1:]
elif selectionName:
selection = self.page.unfreezeSelection(tblobj, selectionName)
if selection is not None:
if sortedBy and ','.join(selection.sortedBy or []) != sortedBy:
selection.sort(sortedBy)
selection.freezeUpdate()
debug = 'fromPickle'
resultAttributes = {}
newSelection = False
if newSelection:
debug = 'fromDb'
if savedQuery:
where = tblobj.pkg.loadUserObject(code=savedQuery, objtype='query', tbl=tblobj.fullname)[0]
if savedView:
columns = tblobj.pkg.loadUserObject(code=savedView, objtype='view', tbl=tblobj.fullname)[0]
if selectmethod:
selecthandler = self.page.getPublicMethod(selectmethod_prefix, selectmethod)
else:
selecthandler = self._default_getSelection
columns = self._getSelection_columns(tblobj, columns, expressions=expressions)
selection = selecthandler(tblobj=tblobj, table=table, distinct=distinct, columns=columns, where=where,
condition=condition,
order_by=order_by, limit=limit, offset=offset, group_by=group_by, having=having,
relationDict=relationDict, sqlparams=sqlparams, row_start=row_start,
row_count=row_count,
recordResolver=recordResolver, selectionName=selectionName,
pkeys=pkeys, fromSelection=fromSelection,
sortedBy=sortedBy, excludeLogicalDeleted=excludeLogicalDeleted, **kwargs)
if applymethod:
applyPars = self._getApplyMethodPars(kwargs)
self.page.getPublicMethod('rpc', applymethod)(selection, **applyPars)
if selectionName:
selection.setKey('rowidx')
selectionPath = self.page.freezeSelection(selection, selectionName)
with self.page.userStore() as store:
store.setItem('current.table.%s.last_selection_path' % table.replace('.', '_'), selectionPath)
resultAttributes = dict(table=table, method='app.getSelection', selectionName=selectionName,
row_count=row_count,
totalrows=len(selection))
generator = selection.output(mode='generator', offset=row_start, limit=row_count, formats=formats)
_addClassesDict = dict([(k, v['_addClass']) for k, v in selection.colAttrs.items() if '_addClass' in v])
data = self.gridSelectionData(selection, generator, logicalDeletionField=tblobj.logicalDeletionField,
recordResolver=recordResolver, numberedRows=numberedRows,
_addClassesDict=_addClassesDict)
if not structure:
result = data
else:
result = Bag()
result['data'] = data
result['structure'] = self.gridSelectionStruct(selection)
resultAttributes.update({'debug': debug, 'servertime': int((time.time() - t) * 1000),
'newproc': getattr(self, 'self.newprocess', 'no')})
#ADDED CONDITION AND **KWARGS (PARAMETERS FOR CONDITION)
if totalRowCount:
resultAttributes['totalRowCount'] = tblobj.query(where=condition,
excludeLogicalDeleted=excludeLogicalDeleted,
**kwargs).count()
if sum_columns:
for col in sum_columns.split(','):
col = col.strip()
resultAttributes['sum_%s' % col] = data.sum('#a.%s' % col)
return (result, resultAttributes)
def _getSelection_columns(self, tblobj, columns, expressions=None):
if isinstance(columns, Bag):
columns = self._columnsFromStruct(columns)
if not columns:
columns = tblobj.attributes.get('baseview') or '*'
if '[' in columns:
columns = columns.replace(' ', '').replace('\n', '').replace('\t', '')
maintable = []
colaux = columns.split(',')
columns = []
for col in colaux:
if '[' in col:
tbl, col = col.split('[')
maintable = [tbl]
if col.endswith(']'):
col = col[:-1]
columns.append('.'.join(maintable + [col.rstrip(']')]))
if col.endswith(']'):
maintable = []
columns = ','.join(columns)
if expressions:
expr_dict = getattr(self.page, 'expr_%s' % expressions)()
expr_dict = dict([(k, '%s AS %s' % (v, k)) for k, v in expr_dict.items()])
columns = templateReplace(columns, expr_dict, safeMode=True)
return columns
def _default_getSelection(self, tblobj=None, table=None, distinct=None, columns=None, where=None, condition=None,
order_by=None, limit=None, offset=None, group_by=None, having=None,
relationDict=None, sqlparams=None, row_start=None, row_count=None,
recordResolver=None, selectionName=None, pkeys=None, fromSelection=None,
sortedBy=None, sqlContextName=None,
excludeLogicalDeleted=True,**kwargs):
sqlContextBag = None
if sqlContextName:
sqlContextBag = self._getSqlContextConditions(sqlContextName)
if fromSelection:
fromSelection = self.page.unfreezeSelection(tblobj, fromSelection)
pkeys = fromSelection.output('pkeylist')
if pkeys:
if isinstance(pkeys, basestring):
pkeys = pkeys.split(',')
if len(pkeys)==0:
kwargs['limit'] = 0
elif len(pkeys)==1:
where = 't0.%s =:_pkey' % tblobj.pkey
kwargs['_pkey'] = pkeys[0]
else:
where = 't0.%s in :pkeys' % tblobj.pkey
kwargs['pkeys'] = pkeys
elif isinstance(where, Bag):
kwargs.pop('where_attr',None)
where, kwargs = self._decodeWhereBag(tblobj, where, kwargs)
if condition and not pkeys:
where = '( %s ) AND ( %s )' % (where, condition)
query = tblobj.query(columns=columns, distinct=distinct, where=where,
order_by=order_by, limit=limit, offset=offset, group_by=group_by, having=having,
relationDict=relationDict, sqlparams=sqlparams, locale=self.page.locale,
excludeLogicalDeleted=excludeLogicalDeleted, **kwargs)
if sqlContextName:
self._joinConditionsFromContext(query, sqlContextName)
selection = query.selection(sortedBy=sortedBy, _aggregateRows=True)
#if sqlContextBag:
# THIS BLOCK SHOULD ALLOW US TO HAVE AN APPLYMETHOD INSIDE SQLCONTEXT.
# IT DOES NOT WORK BUT WE THINK IT'S USELESS
# joinBag = sqlContextBag['%s_%s' % (target_fld.replace('.','_'), from_fld.replace('.','_'))]
# if joinBag and joinBag.get('applymethod'):
# applyPars = self._getApplyMethodPars(kwargs)
# self.page.getPublicMethod('rpc', joinBag['applymethod'])(selection,**applyPars)
#
return selection
def rpc_createSelection(self, table='', selectionName='', distinct=False, columns='', where='', condition=None,
order_by=None, limit=None, offset=None, group_by=None, having=None,
relationDict=None, sqlparams=None, pkeys=None,
selectmethod=None, expressions=None, apply=None, sortedBy=None, **kwargs):
"""Create a new selection and freezes
@param table: tbale name
@param selectionName: the name of the selection, empty or '*' will default to a new uuid
@param pkeys: a json or comma separated list of pkey to find (overwrite the where parameter)
@param selectmethod: a page method with rpc_ prefix which receive all parameters and has to return a selection object
@param expressions: comma separated list of expr_ methods which returns the sql string for a column (probably a formula)
@param apply: a page method with rpc_ prefix which will be applied to the selection (see gnrsqldata.SqlSelection.apply)
@param sortedBy: sort the selection after apply, for sort in python with calculated columns available
"""
t = time.time()
tblobj = self.db.table(table)
if selectionName == '*' or not selectionName:
selectionName = getUuid()
if selectmethod:
selectmethod = getattr(self.page, 'rpc_%s' % selectmethod)
else:
selectmethod = self._default_getSelection
selection = selectmethod(tblobj=tblobj, table=table, distinct=distinct, columns=columns, where=where,
condition=condition,
order_by=order_by, limit=limit, offset=offset, group_by=group_by, having=having,
relationDict=relationDict, sqlparams=sqlparams,
pkeys=pkeys, expressions=expressions, **kwargs)
if apply:
selection.apply(getattr(self.page, 'rpc_%s' % apply))
if sortedBy:
selection.sort(sortedBy)
self.page.freezeSelection(selection, selectionName)
resultAttributes = dict(table=table, selectionName=selectionName,
servertime=int((time.time() - t) * 1000),
newproc=getattr(self, 'self.newprocess', 'no'))
return (len(selection), resultAttributes)
def _decodeWhereBag(self, tblobj, where, kwargs):
if hasattr(self.page, 'getSelection_filters'):
selection_filters = self.page.getSelection_filters()
if selection_filters:
new_where = Bag()
new_where.setItem('filter', selection_filters)
new_where.setItem('where', where, jc='and')
where = new_where
page = self.page
customOpCbDict = dict([(x[12:], getattr(page, x)) for x in dir(page) if x.startswith('customSqlOp_')])
return tblobj.sqlWhereFromBag(where, kwargs, customOpCbDict=customOpCbDict)
def _columnsFromStruct(self, viewbag, columns=None):
if columns is None:
columns = []
if not viewbag:
return
for node in viewbag:
fld = node.getAttr('field')
if fld:
if not (fld[0] in ('$', '@')):
fld = '$' + fld
columns.append(fld)
if isinstance(node.value, Bag):
self._columnsFromStruct(node.value, columns)
return ','.join(columns)
def gridSelectionData(self, selection, outsource, recordResolver, numberedRows, logicalDeletionField,
_addClassesDict=None):
result = Bag()
for j, row in enumerate(outsource):
row = dict(row)
_customClasses = (row.get('_customClasses', '') or '').split(' ')
pkey = row.pop('pkey', None)
isDeleted = row.pop('_isdeleted', None)
if isDeleted:
_customClasses.append('logicalDeleted')
if _addClassesDict:
for fld, _class in _addClassesDict.items():
if row[fld]:
_customClasses.append(_class)
if numberedRows or not pkey:
row_key = 'r_%i' % j
else:
row_key = toText(pkey).replace('.', '_')
result.setItem(row_key, None, _pkey=pkey or row_key,
_target_fld='%s.%s' % (selection.dbtable.fullname, selection.dbtable.pkey),
_relation_value=pkey, _resolver_name='relOneResolver',
_attributes=row, _removeNullAttributes=False, _customClasses=' '.join(_customClasses))
return result
def gridSelectionStruct(self, selection):
structure = Bag()
r = structure.child('view').child('row')
for colname in selection.columns:
if ((colname != 'pkey') and( colname != 'rowidx')):
kwargs = dict(selection.colAttrs.get(colname, {}))
kwargs.pop('tag', None)
kwargs['name'] = kwargs.pop('label')
if kwargs['dataType'] == 'D':
kwargs['format_date'] = 'short'
size = kwargs.pop('size', None)
size = kwargs.pop('print_width', size)
if size:
if isinstance(size, basestring):
if ':' in size:
size = size.split(':')[1]
size = int(size)
if size < 3:
width = size * 1.1
if size < 6:
width = size
elif size < 10:
width = size * .8
elif size < 20:
width = size * .7
else:
width = size * .6
kwargs['width'] = '%iem' % (1 + int(int(width) * .7))
r.child('cell', childname=colname, field=colname, **kwargs)
return structure
#@timer_call()
#
def _getRecord_locked(self, tblobj, record, recInfo):
#locked,aux=self.page.site.lockRecord(self.page,tblobj.fullname,record[tblobj.pkey])
locked = False
aux = []
if locked:
recInfo['lockId'] = aux
return
for f in aux:
recInfo['locking_%s' % f] = aux[f]
def rpc_getRecord(self, table=None, dbtable=None, pkg=None, pkey=None,
ignoreMissing=True, ignoreDuplicate=True, lock=False, readOnly=False,
from_fld=None, target_fld=None, sqlContextName=None, applymethod=None,
js_resolver_one='relOneResolver', js_resolver_many='relManyResolver',
loadingParameters=None, eager=None, virtual_columns=None, **kwargs):
t = time.time()
dbtable = dbtable or table
if pkg:
dbtable = '%s.%s' % (pkg, dbtable)
tblobj = self.db.table(dbtable)
if pkey is not None:
kwargs['pkey'] = pkey
elif lock:
lock = False
if lock:
kwargs['for_update'] = True
rec = tblobj.record(eager=eager or self.page.eagers.get(dbtable),
ignoreMissing=ignoreMissing, ignoreDuplicate=ignoreDuplicate,
sqlContextName=sqlContextName, virtual_columns=virtual_columns, **kwargs)
if sqlContextName:
self._joinConditionsFromContext(rec, sqlContextName)
if (pkey == '*newrecord*'):
record = rec.output('newrecord', resolver_one=js_resolver_one, resolver_many=js_resolver_many)
else:
record = rec.output('bag', resolver_one=js_resolver_one, resolver_many=js_resolver_many)
pkey = record[tblobj.pkey] or '*newrecord*'
newrecord = pkey == '*newrecord*'
recInfo = dict(_pkey=pkey,
caption=tblobj.recordCaption(record, newrecord),
_newrecord=newrecord, sqlContextName=sqlContextName)
#if lock and not newrecord:
if not newrecord and not readOnly:
recInfo['updatable'] = tblobj.check_updatable(record)
recInfo['deletable'] = tblobj.check_deletable(record)
if lock:
self._getRecord_locked(tblobj, record, recInfo)
loadingParameters = loadingParameters or {}
defaultParameters = dict([(k[8:], v) for k, v in kwargs.items() if k.startswith('default_')])
loadingParameters.update(defaultParameters)
method = None
if loadingParameters:
method = loadingParameters.pop('method', None)
if method:
handler = self.page.getPublicMethod('rpc', method)
else:
if dbtable == self.page.maintable:
method = 'onLoading' # TODO: fall back on the next case if onLoading is missing?
# (or maybe execute both if they exist)
else:
#self.page.gnotify('getRecord', dbtable, True)
method = 'onLoading_%s' % dbtable.replace('.', '_')
handler = getattr(self.page, method, None)
if handler:
if defaultParameters and newrecord:
self.setRecordDefaults(record, defaultParameters)
handler(record, newrecord, loadingParameters, recInfo)
elif newrecord and loadingParameters:
self.setRecordDefaults(record, loadingParameters)
if applymethod:
applyPars = self._getApplyMethodPars(kwargs, newrecord=newrecord, loadingParameters=loadingParameters,
recInfo=recInfo, tblobj=tblobj)
self.page.getPublicMethod('rpc', applymethod)(record, **applyPars)
recInfo['servertime'] = int((time.time() - t) * 1000)
if tblobj.lastTS:
recInfo['lastTS'] = str(record[tblobj.lastTS])
recInfo['table'] = dbtable
return (record, recInfo)
def setRecordDefaults(self, record, defaults):
for k, v in defaults.items():
if k in record:
record[k] = v
#pass
def rpc_dbSelect(self, dbtable=None, columns=None, auxColumns=None, hiddenColumns=None, rowcaption=None,
_id=None, _querystring='', querystring=None, ignoreCase=True, exclude=None,
condition=None, limit=None, alternatePkey=None, order_by=None, selectmethod=None,
notnull=None, weakCondition=False, **kwargs):
"""
* dbtable: table source for the query
* columns: columns that are involved into the query
* auxColumns: showed only as result, not involved in the search.
* hiddenColumns: data that is retrieved but is not showed.
* rowcaption: what you see into the field. Often is different from
what you set with dbselect
* condition: more condition into the query. Every kwargs params that
starts with condition_ are the variables involved in the 'where' clause.
* selectmethod: custom rpc_method you can use to make the query on the server.
* weakCondition: will apply the condition if there is a result, but if there is no result for the condition
then the condition will not be used. A selectmethod over-rides this attribute.
"""
resultClass = ''
if selectmethod or not condition:
weakCondition = False
t0 = time.time()
querystring = _querystring or querystring # da cambiare nella gnrstores.js invece?
if limit is None:
limit = self.gnrapp.config.get('dbselect?limit', 10)
limit = int(limit)
result = Bag()
tblobj = self.db.table(dbtable)
captioncolumns = tblobj.rowcaptionDecode(rowcaption)[0]
querycolumns = tblobj.getQueryFields(columns, captioncolumns)
showcolumns = gnrlist.merge(captioncolumns, tblobj.columnsFromString(auxColumns))
resultcolumns = gnrlist.merge(showcolumns, captioncolumns, tblobj.columnsFromString(hiddenColumns))
if alternatePkey and not alternatePkey in resultcolumns:
resultcolumns.append("$%s" % alternatePkey if not alternatePkey.startswith('$') else alternatePkey)
selection = None
identifier = 'pkey'
rows = []
if _id:
if alternatePkey:
where = '$%s = :id' % alternatePkey
else:
where = '$%s = :id' % identifier
selection = tblobj.query(columns=','.join(resultcolumns),
where=where, excludeLogicalDeleted=False,
limit=1, id=_id).selection()
elif querystring:
querystring = querystring.strip('*')
if querystring.isdigit():
querystring = "%s%s" % ('%', querystring)
if selectmethod:
selectHandler = self.page.getPublicMethod('rpc', selectmethod)
else:
selectHandler = self.rpc_dbSelect_default
selection = selectHandler(tblobj=tblobj, querycolumns=querycolumns, querystring=querystring,
resultcolumns=resultcolumns, condition=condition, exclude=exclude,
limit=limit, order_by=order_by,
identifier=identifier, ignoreCase=ignoreCase, **kwargs)
if not selection and weakCondition:
resultClass = 'relaxedCondition'
selection = selectHandler(tblobj=tblobj, querycolumns=querycolumns, querystring=querystring,
resultcolumns=resultcolumns, exclude=exclude,
limit=limit, order_by=order_by,
identifier=identifier, ignoreCase=ignoreCase, **kwargs)
_attributes = {}
resultAttrs = {}
if selection:
showcols = [tblobj.colToAs(c.lstrip('$')) for c in showcolumns]
result = selection.output('selection', locale=self.page.locale, caption=rowcaption or True)
colHeaders = [selection.colAttrs[k]['label'] for k in showcols]
colHeaders = [self.page._(c) for c in colHeaders]
resultAttrs = {'columns': ','.join(showcols), 'headers': ','.join(colHeaders)}
if not notnull:
result.setItem('null_row', None, caption='', _pkey=None)
resultAttrs['resultClass'] = resultClass
resultAttrs['dbselect_time'] = time.time() - t0
return (result, resultAttrs)
def rpc_dbSelect_selection(self, tblobj, querystring, columns=None, auxColumns=None, **kwargs):
querycolumns = tblobj.getQueryFields(columns)
showcolumns = gnrlist.merge(querycolumns, tblobj.columnsFromString(auxColumns))
captioncolumns = tblobj.rowcaptionDecode()[0]
resultcolumns = gnrlist.merge(showcolumns, captioncolumns)
querystring = querystring or ''
querystring = querystring.strip('*')
return self.rpc_dbSelect_default(tblobj, querycolumns, querystring, resultcolumns, **kwargs)
def rpc_dbSelect_default(self, tblobj, querycolumns, querystring, resultcolumns,
condition=None, exclude=None, limit=None, order_by=None,
identifier=None, ignoreCase=None, **kwargs):
def getSelection(where, **searchargs):
whereargs = {}
whereargs.update(kwargs)
whereargs.update(searchargs)
if where and condition:
where = '%s AND %s' % (where, condition)
else:
where = where or condition
return tblobj.query(where=where, columns=','.join(resultcolumns), limit=limit,
order_by=order_by or querycolumns[0], exclude_list=exclude_list,
**whereargs).selection()
exclude_list = None
if exclude:
if isinstance(exclude, basestring):
exclude_list = [t.strip() for t in exclude.split(',')]
else:
exclude_list = [t for t in exclude if t] # None values break the query
if exclude_list:
exclude_cond = 'NOT ($pkey IN :exclude_list )'
if condition:
condition = '%s AND %s' % (condition, exclude_cond)
else:
condition = exclude_cond
kwargs.pop('where', None)
srclist = querystring.split()
if not srclist:
return getSelection(None)
result = getSelection("%s ILIKE :searchval" % querycolumns[0], searchval='%s%%' % ('%% '.join(srclist)))
columns_concat = "ARRAY_TO_STRING(ARRAY[%s], ' ')" % ','.join(querycolumns)
if len(result) == 0: # few results from the startswith query on first col
#self.page.gnotify('dbselect','filter')
regsrc = [x for x in re.split(" ", ESCAPE_SPECIAL.sub('', querystring)) if x]
if regsrc:
whereargs = dict([('w%i' % i, '(^|\\W)%s' % w.strip()) for i, w in enumerate(regsrc)])
#where =" AND ".join(["(%s) ~* :w%i" % (" || ' ' || ".join(querycolumns), i) for i,w in enumerate(regsrc)])
where = " AND ".join(["(%s) ~* :w%i" % (columns_concat, i) for i, w in enumerate(regsrc)])
result = getSelection(where, **whereargs)
if len(result) == 0:
#self.page.gnotify('dbselect','contained')
whereargs = dict([('w%i' % i, '%%%s%%' % w.strip()) for i, w in enumerate(srclist)])
#where =" AND ".join(["(%s) ILIKE :w%i" % (" || ' ' || ".join(querycolumns), i) for i,w in enumerate(srclist)])
where = " AND ".join(["(%s) ILIKE :w%i" % (columns_concat, i) for i, w in enumerate(srclist)])
result = getSelection(where, **whereargs)
return result
def _relPathToCaption(self, table, relpath):
if not relpath: return ''
tbltree = self.db.relationExplorer(table, dosort=False, pyresolver=True)
fullcaption = tbltree.cbtraverse(relpath, lambda node: self.page._(node.getAttr('name_long')))
return ':'.join(fullcaption)
def rpc_getRecordForm(self, dbtable=None, fields=None, **kwargs):
self.getRecordForm(self.newSourceRoot(), dbtable=dbtable, fields=fields, **kwargs)
def formAuto(self, pane, table, columns='', cols=2):
fb = pane.formbuilder(cols=cols)
tblobj = self.db.table(table)
if not columns:
columns = [colname for colname, col in tblobj.columns.items() if
not col.isReserved and not col.dtype == 'X'and not col.dtype == 'Z']
elif isinstance(columns, basestring):
columns = splitAndStrip(columns)
fb.placeFields(','.join(columns))
def rpc_pdfmaker(self, pdfmode, txt, **kwargs):
filename = '%s.pdf' % self.page.getUuid()
fpath = self.page.pageLocalDocument(filename)
getattr(self.page, 'pdf_%s' % pdfmode)(fpath, txt, **kwargs)
return filename
def rpc_downloadPDF(self, filename, forcedownload=False, **kwargs):
response = self.page.response
response.content_type = "application/pdf"
if forcedownload:
response.add_header("Content-Disposition", str("attachment; filename=%s" % filename))
else:
response.add_header("Content-Disposition", str("filename=%s" % filename))
fpath = self.page.pageLocalDocument(filename)
response.sendfile(fpath)
os.remove(fpath)
def _exportFileNameClean(self, filename=None):
filename = filename or self.page.maintable or self.page.request.path_info.split('/')[-1]
filename = filename.replace(' ', '_').replace('.', '_').replace('/', '_')[:64]
filename = filename.encode('ascii', 'ignore')
return filename
def _getStoreBag(self, storebag):
# da finire
if isinstance(storebag, basestring):
if storebag.startswith('gnrsel:'):
x, tbl, filename = storebag.split(':', 2)
sel = self.unfreezeSelection(self.app.db.table(tbl), filename)
storebag = sel.output('grid')
else:
storebag = Bag(self.pageLocalDocument(storebag))
return storebag
def _printCellStyle(self, colAttr):
style = [colAttr.get('style')]
styleAttrNames = ('height', 'width', 'top', 'left', 'right', 'bottom',
'visibility', 'overflow', 'float', 'clear', 'display',
'z_index', 'border', 'position', 'padding', 'margin',
'color', 'white_space', 'vertical_align')
def isStyleAttr(name):
for st in styleAttrNames:
if name == st or name.startswith('%s_' % st):
return True
for k, v in colAttr.items():
if isStyleAttr(k):
style.append('%s: %s;' % (k.replace('_', '-'), v))
style = ' '.join([v for v in style if v])
return style
def rpc_printStaticGrid(self, structbag, storebag, filename=None, makotemplate='standard_print.tpl', **kwargs):
filename = self._exportFileNameClean(filename)
if not filename.lower().endswith('.html') or filename.lower().endswith('.htm'):
filename += '.html'
storebag = self._getStoreBag(storebag)
columns = []
colAttrs = {}
for view in structbag.values():
for row in view.values():
for cell in row:
col = self.db.colToAs(cell.getAttr('field'))
columns.append(col)
colAttr = cell.getAttr()
dtype = colAttr.get('dtype')
if dtype and not ('format' in colAttr):
colAttr['format'] = 'auto_%s' % dtype
colAttr['style'] = self._printCellStyle(colAttr)
colAttrs[col] = colAttr
outdata = []
for row in storebag:
outdata.append(row.getAttr())
result = self.page.pluginhandler.get_plugin('mako')(path=makotemplate, striped='odd_row,even_row',
outdata=outdata, colAttrs=colAttrs,
columns=columns, meta=kwargs)
#fpath = self.page.pageLocalDocument(filename)
fpath = self.page.temporaryDocument(filename)
f = open(fpath, 'w')
if isinstance(result, unicode):
result = result.encode('utf-8')
f.write(result)
f.close()
return self.page.temporaryDocumentUrl(filename)
#return filename
def rpc_printStaticGridDownload(self, filename, **kwargs):
fpath = self.page.pageLocalDocument(filename)
f = open(fpath, 'r')
result = f.read()
f.close()
os.remove(fpath)
return result.decode('utf-8')
def rpc_recordToPDF(self, table, pkey, template, **kwargs):
record = self.db.table(table).record(pkey).output('bag')
return self.page.rmlTemplate(path=template, record=record)
def rpc_includedViewAction(self, action=None, export_mode=None, respath=None, table=None, data=None, struct=None,
datamode=None, downloadAs=None, **kwargs):
page = self.page
if downloadAs:
import mimetypes
page.response.content_type = mimetypes.guess_type(downloadAs)[0]
page.response.add_header("Content-Disposition", str("attachment; filename=%s" % downloadAs))
if not respath:
respath = 'action/%s' % action
res_obj = self.page.site.loadTableScript(page=self.page, table=table,
respath=respath, class_name='Main')
return res_obj.gridcall(data=data, struct=struct, export_mode=export_mode, datamode=datamode)
class BatchExecutor(object):
def __init__(self, page):
#self._page = weakref.ref(page)
self._page = page
def _get_page(self):
if self._page:
#return self._page()
return self._page
page = property(_get_page)
|
vuteam/BlackHole-New | refs/heads/master | lib/python/Components/Converter/RdsInfo.py | 163 | from enigma import iRdsDecoder, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
class RdsInfo(Converter, object):
RASS_INTERACTIVE_AVAILABLE = 0
RTP_TEXT_CHANGED = 1
RADIO_TEXT_CHANGED = 2
def __init__(self, type):
Converter.__init__(self, type)
self.type, self.interesting_events = {
"RadioText": (self.RADIO_TEXT_CHANGED, (iPlayableService.evUpdatedRadioText,)),
"RtpText": (self.RTP_TEXT_CHANGED, (iPlayableService.evUpdatedRtpText,)),
"RasInteractiveAvailable": (self.RASS_INTERACTIVE_AVAILABLE, (iPlayableService.evUpdatedRassInteractivePicMask,))
}[type]
@cached
def getText(self):
decoder = self.source.decoder
text = ""
if decoder:
if self.type == self.RADIO_TEXT_CHANGED:
text = decoder.getText(iRdsDecoder.RadioText)
elif self.type == self.RTP_TEXT_CHANGED:
text = decoder.getText(iRdsDecoder.RtpText)
else:
print "unknown RdsInfo Converter type", self.type
return text
text = property(getText)
@cached
def getBoolean(self):
decoder = self.source.decoder
if self.type == self.RASS_INTERACTIVE_AVAILABLE:
mask = decoder and decoder.getRassInteractiveMask()
return (mask and mask[0] & 1 and True) or False
elif self.type == self.RADIO_TEXT_CHANGED:
return (len(decoder.getText(iRdsDecoder.RadioText)) and True) or False
elif self.type == self.RTP_TEXT_CHANGED:
return (len(decoder.getText(iRdsDecoder.RtpText)) and True) or False
boolean = property(getBoolean)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in self.interesting_events:
Converter.changed(self, what)
|
colinnewell/odoo | refs/heads/8.0 | addons/document/__openerp__.py | 260 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Document Management System',
'version': '2.1',
'category': 'Knowledge Management',
'description': """
This is a complete document management system.
==============================================
* User Authentication
* Document Indexation:- .pptx and .docx files are not supported in Windows platform.
* Dashboard for Document that includes:
* New Files (list)
* Files by Resource Type (graph)
* Files by Partner (graph)
* Files Size by Month (graph)
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['knowledge', 'mail'],
'data': [
'security/document_security.xml',
'document_view.xml',
'document_data.xml',
'wizard/document_configuration_view.xml',
'security/ir.model.access.csv',
'report/document_report_view.xml',
'views/document.xml',
],
'demo': [ 'document_demo.xml' ],
'test': ['test/document_test2.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Juanlu001/CBC.Solve | refs/heads/master | cbc/swing/utils.py | 3 | "This module provides various utility functions"
__author__ = "Kristoffer Selim and Anders Logg"
__copyright__ = "Copyright (C) 2010 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
# Last changed: 2011-02-04
from os import mkdir
from time import strftime
from dolfin import CellFunction
def array_to_meshfunction(x, mesh):
"Convert array x to cell function on Omega"
f = CellFunction("double", mesh)
if not f.size() == x.size:
raise RuntimeError, "Size of vector does not match number of cells."
for i in range(x.size):
f[i] = x[i]
return f
def date():
"Return string for current date."
return strftime("%Y-%m-%d-%H-%M-%S")
|
doheekim/chuizonetest | refs/heads/master | lib/sqlalchemy/engine/__init__.py | 10 | # engine/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and its public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
from .interfaces import (
Connectable,
Dialect,
ExecutionContext,
ExceptionContext,
# backwards compat
Compiled,
TypeCompiler
)
from .base import (
Connection,
Engine,
NestedTransaction,
RootTransaction,
Transaction,
TwoPhaseTransaction,
)
from .result import (
BufferedColumnResultProxy,
BufferedColumnRow,
BufferedRowResultProxy,
FullyBufferedResultProxy,
ResultProxy,
RowProxy,
)
from .util import (
connection_memoize
)
from . import util, strategies
# backwards compat
from ..sql import ddl
default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
.. versionchanged:: 0.8
By default, result row names match case-sensitively.
In version 0.7 and prior, all matches were case-insensitive.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a repr() of their parameter lists to the engines
logger, which defaults to sys.stdout. The ``echo`` attribute of
``Engine`` can be modified at any time to turn logging on and
off. If set to the string ``"debug"``, result rows will be
printed to the standard output as well. This flag ultimately
controls a Python logger; see :ref:`dbengine_logging` for
information on how to configure logging directly.
:param echo_pool=False: if True, the connection pool will log
all checkouts/checkins to the logging stream, which defaults to
sys.stdout. This flag ultimately controls a Python logger; see
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`.Engine`.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`.
.. versionadded:: 0.7.6
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ
<http://www.sqlalchemy.org/trac/wiki/FAQ#HowcanIgettheCREATETABLEDROPTABLEoutputasastring>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
"""
strategy = kwargs.pop('strategy', default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file where keys
are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The
'prefix' argument indicates the prefix to be searched for.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. In a future release, this
functionality will be expanded and include dialect-specific
arguments.
"""
options = dict((key[len(prefix):], configuration[key])
for key in configuration
if key.startswith(prefix))
options['_coerce_config'] = True
options.update(kwargs)
url = options.pop('url')
return create_engine(url, **options)
__all__ = (
'create_engine',
'engine_from_config',
)
|
Diaoul/subliminal | refs/heads/master | tests/test_podnapisi.py | 2 | # -*- coding: utf-8 -*-
import os
from babelfish import Language
import pytest
from vcr import VCR
from subliminal.providers.podnapisi import PodnapisiProvider, PodnapisiSubtitle
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
cassette_library_dir=os.path.realpath(os.path.join('tests', 'cassettes', 'podnapisi')))
def test_get_matches_movie(movies):
subtitle_releases = [
'Man.Of.Steel.2013.720p.BRRip.x264.AAC-ViSiON', 'Man.Of.Steel.2013.720p.BluRay.x264-Felony',
'Man.Of.Steel.2013.1080p.BluRay.x264-SECTOR7', 'Man.Of.Steel.2013.720p.BRRip.x264.AC3-UNDERCOVER',
'Man.Of.Steel.2013.BDRip.XviD.MP3-RARBG', 'Man.Of.Steel.(2013).BDRip.600MB.Ganool',
'Man.of.Steel.2013.BDRip.x264.700MB-Micromkv', 'Man.Of.Steel.2013.BRRip.AAC.x264-SSDD',
'Man.Of.Steel.2013.BDRip.x264-Larceny', 'Man.Of.Steel.2013.BDRiP.XViD-NoGRP',
'Man.Of.Steel.2013.720p.BRRip.x264.AC3-EVO', 'Man.of.Steel.2013.720p.BRRip.h264.AAC-RARBG',
'Man.Of.Steel.[2013].BRRip.XviD-ETRG', 'Man.of.Steel.[2013].BRRip.XViD.[AC3]-ETRG',
'Man.Of.Steel.2013.BRRiP.XVID.AC3-MAJESTIC', 'Man.of.steel.2013.BRRip.XviD.AC3-RARBG',
'Man.Of.Steel.2013.720p.BRRip.x264.AC3-SUPERM4N', 'Man.Of.Steel.2013.720p.BRRip.XviD.AC3-ViSiON',
'Man.Of.Steel.2013.720p.BRRip.x264.AC3-JYK', 'Man.of.Steel.[2013].DVDRIP.DIVX.[Eng]-DUQA',
'Man.of.Steel.2013.1080p.BluRay.x264.YIFY'
]
subtitle = PodnapisiSubtitle(Language('eng'), True, None, 'EMgo', subtitle_releases, 'Man of Steel', None, None,
2013)
matches = subtitle.get_matches(movies['man_of_steel'])
assert matches == {'title', 'year', 'country', 'video_codec', 'resolution', 'source', 'release_group'}
def test_get_matches_episode(episodes):
subtitle_releases = [
'The.Big.Bang.Theory.S07E05.HDTV.x264-LOL', 'The.Big.Bang.Theory.S07E05.720p.HDTV.x264-DIMENSION',
'The.Big.Bang.Theory.S07E05.480p.HDTV.x264-mSD', 'The.Big.Bang.Theory.S07E05.HDTV.XviD-AFG'
]
subtitle = PodnapisiSubtitle(Language('eng'), False, None, 'EdQo', subtitle_releases, 'The Big Bang Theory', 7, 5,
2007)
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'season', 'episode', 'video_codec', 'resolution', 'source', 'release_group', 'year',
'country'}
def test_get_matches_episode_year(episodes):
subtitle_releases = ['Dallas.2012.S01E03.HDTV.x264-LOL']
subtitle = PodnapisiSubtitle(Language('eng'), True, None, '-5oa', subtitle_releases, 'Dallas', 1, 3, 2012)
matches = subtitle.get_matches(episodes['dallas_2012_s01e03'])
assert matches == {'series', 'year', 'season', 'episode'}
def test_get_matches_no_match(episodes):
subtitle_releases = ['The.Big.Bang.Theory.S07E05.1080p.HDTV.DIMENSION']
subtitle = PodnapisiSubtitle(Language('eng'), False, None, 'EdQo', subtitle_releases, 'The Big Bang Theory', 7, 5,
2007)
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == {'year', 'country'}
@pytest.mark.integration
@vcr.use_cassette
def test_query_movie(movies):
video = movies['man_of_steel']
language = Language('eng')
expected_subtitles = {'Nv0l', 'EMgo', '8RIm', 'whQm', 'aoYm', 'WMgp', 'Tsko', 'uYcm', 'XnUm', 'NLUo', 'ZmIm',
'MOko'}
with PodnapisiProvider() as provider:
subtitles = provider.query(language, video.title, year=video.year)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == {language}
@pytest.mark.integration
@vcr.use_cassette
def test_query_episode(episodes):
video = episodes['bbt_s07e05']
language = Language('eng')
expected_subtitles = {'EdQo', '2581', 'w581', 'ftUo', 'WNMo'}
with PodnapisiProvider() as provider:
subtitles = provider.query(language, video.series, season=video.season, episode=video.episode,
year=video.year)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == {language}
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_movie(movies):
video = movies['man_of_steel']
languages = {Language('eng'), Language('fra')}
expected_subtitles = {'Tsko', 'Nv0l', 'XnUm', 'EMgo', 'ZmIm', 'whQm', 'MOko', 'aoYm', 'WMgp', 'd_Im', 'GMso',
'8RIm', 'NLUo', 'uYcm'}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode(episodes):
video = episodes['got_s03e10']
languages = {Language('eng'), Language('fra')}
expected_subtitles = {'8cMl', '6MMl', 'jcYl', 'am0s', 'msYl', '7sMl', 'k8Yl', '8BM5', 'Eaom', 'z8Ml', 'lMYl',
'78Ml', '0MMl', 'a1I8'}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(movies):
video = movies['man_of_steel']
languages = {Language('eng'), Language('fra')}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
subtitle = [s for s in subtitles if s.pid == 'GMso'][0]
provider.download_subtitle(subtitle)
assert subtitle.content is not None
assert subtitle.is_valid() is True
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode_alternative_series(episodes):
video = episodes['marvels_jessica_jones_s01e13']
languages = {Language('eng')}
expected_subtitles = {'JPY-', 'BURB', 'm_c-', 'wFFC', 'tVFC', 'wlFC',
'iZk-', 'w_g-', 'CJw-', 'v5c-', 's1FC', 'u5c-'}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_subtitles_with_title_unicode(movies):
video = movies['café_society']
languages = {Language('fra')}
expected_subtitles = {'iOlD', 'iulD', '2o5B', 'ielD'}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
wanted_subtitle = [s for s in subtitles if s.pid == 'iOlD'][0]
matches = wanted_subtitle.get_matches(movies['café_society'])
provider.download_subtitle(wanted_subtitle)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
assert matches == {'title', 'year', 'country'}
assert wanted_subtitle.content is not None
assert wanted_subtitle.is_valid() is True
|
barbarubra/Don-t-know-What-i-m-doing. | refs/heads/master | python-build/python-libs/gdata/tests/gdata_tests/spreadsheet_test.py | 92 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Laura Beth Lincoln)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata
import gdata.spreadsheet
SPREADSHEETS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>http://spreadsheets.google.com/feeds/spreadsheets/private/full</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Available Spreadsheets</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>[email protected]</email>
</author>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<entry>
<id>http://spreadsheets.google.com/feeds/spreadsheets/private/full/key</id>
<updated>2006-11-17T18:24:18.231Z</updated>
<title type="text">Groceries R Us</title>
<content type="text">Groceries R Us</content>
<link rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/spreadsheets/private/full/key"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>[email protected]</email>
</author>
</entry>
</feed>
"""
WORKSHEETS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>http://spreadsheets.google.com/feeds/worksheets/key/private/full</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Groceries R Us</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>[email protected]</email>
</author>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<entry>
<id>http://spreadsheets.google.com/feeds/worksheets/key/private/full/od6</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Sheet1</title>
<content type="text">Sheet1</content>
<link rel="http://schemas.google.com/spreadsheets/2006#listfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full"/>
<link rel="http://schemas.google.com/spreadsheets/2006#cellsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full/od6"/>
<gs:rowCount>100</gs:rowCount>
<gs:colCount>20</gs:colCount>
</entry>
</feed>
"""
CELLS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>http://spreadsheets.google.com/feeds/cells/key/od6/private/full</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<title type="text">Sheet1</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>[email protected]</email>
</author>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<gs:rowCount>100</gs:rowCount>
<gs:colCount>20</gs:colCount>
<entry>
<id>http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C1</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">A1</title>
<content type="text">Name</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C1"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C1/bgvjf"/>
<gs:cell row="1" col="1" inputValue="Name">Name</gs:cell>
</entry>
<entry>
<id>http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C2</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">B1</title>
<content type="text">Hours</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C2"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C2/1pn567"/>
<gs:cell row="1" col="2" inputValue="Hours">Hours</gs:cell>
</entry>
</feed>
"""
LIST_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gsx="http://schemas.google.com/spreadsheets/2006/extended">
<id>http://spreadsheets.google.com/feeds/list/key/od6/private/full</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Sheet1</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>[email protected]</email>
</author>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>2</openSearch:itemsPerPage>
<entry>
<id>http://spreadsheets.google.com/feeds/list/key/od6/private/full/cokwr</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Bingley</title>
<content type="text">Hours: 10, Items: 2, IPM: 0.0033</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full/cokwr"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full/cokwr/2ehkc2oh7d"/>
<gsx:name>Bingley</gsx:name>
<gsx:hours>10</gsx:hours>
<gsx:items>2</gsx:items>
<gsx:ipm>0.0033</gsx:ipm>
</entry>
<entry>
<id>http://spreadsheets.google.com/feeds/list/key/od6/private/full/cyevm</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Charlotte</title>
<content type="text">Hours: 60, Items: 18000, IPM: 5</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full/cyevm"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full/cyevm/64rl27px3zyn"/>
<gsx:name>Charlotte</gsx:name>
<gsx:hours>60</gsx:hours>
<gsx:items>18000</gsx:items>
<gsx:ipm>5</gsx:ipm>
</entry>
</feed>
"""
class ColCountTest(unittest.TestCase):
def setUp(self):
self.col_count = gdata.spreadsheet.ColCount()
def testToAndFromString(self):
self.col_count.text = '20'
self.assert_(self.col_count.text == '20')
new_col_count = gdata.spreadsheet.ColCountFromString(self.col_count.ToString())
self.assert_(self.col_count.text == new_col_count.text)
class RowCountTest(unittest.TestCase):
def setUp(self):
self.row_count = gdata.spreadsheet.RowCount()
def testToAndFromString(self):
self.row_count.text = '100'
self.assert_(self.row_count.text == '100')
new_row_count = gdata.spreadsheet.RowCountFromString(self.row_count.ToString())
self.assert_(self.row_count.text == new_row_count.text)
class CellTest(unittest.TestCase):
def setUp(self):
self.cell = gdata.spreadsheet.Cell()
def testToAndFromString(self):
self.cell.text = 'test cell'
self.assert_(self.cell.text == 'test cell')
self.cell.row = '1'
self.assert_(self.cell.row == '1')
self.cell.col = '2'
self.assert_(self.cell.col == '2')
self.cell.inputValue = 'test input value'
self.assert_(self.cell.inputValue == 'test input value')
self.cell.numericValue = 'test numeric value'
self.assert_(self.cell.numericValue == 'test numeric value')
new_cell = gdata.spreadsheet.CellFromString(self.cell.ToString())
self.assert_(self.cell.text == new_cell.text)
self.assert_(self.cell.row == new_cell.row)
self.assert_(self.cell.col == new_cell.col)
self.assert_(self.cell.inputValue == new_cell.inputValue)
self.assert_(self.cell.numericValue == new_cell.numericValue)
class CustomTest(unittest.TestCase):
def setUp(self):
self.custom = gdata.spreadsheet.Custom()
def testToAndFromString(self):
self.custom.text = 'value'
self.custom.column = 'column_name'
self.assert_(self.custom.text == 'value')
self.assert_(self.custom.column == 'column_name')
new_custom = gdata.spreadsheet.CustomFromString(self.custom.ToString())
self.assert_(self.custom.text == new_custom.text)
self.assert_(self.custom.column == new_custom.column)
class SpreadsheetsWorksheetTest(unittest.TestCase):
def setUp(self):
self.worksheet = gdata.spreadsheet.SpreadsheetsWorksheet()
def testToAndFromString(self):
self.worksheet.row_count = gdata.spreadsheet.RowCount(text='100')
self.assert_(self.worksheet.row_count.text == '100')
self.worksheet.col_count = gdata.spreadsheet.ColCount(text='20')
self.assert_(self.worksheet.col_count.text == '20')
new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheetFromString(
self.worksheet.ToString())
self.assert_(self.worksheet.row_count.text == new_worksheet.row_count.text)
self.assert_(self.worksheet.col_count.text == new_worksheet.col_count.text)
class SpreadsheetsCellTest(unittest.TestCase):
def setUp(self):
self.entry = gdata.spreadsheet.SpreadsheetsCell()
def testToAndFromString(self):
self.entry.cell = gdata.spreadsheet.Cell(text='my cell', row='1', col='2',
inputValue='my input value', numericValue='my numeric value')
self.assert_(self.entry.cell.text == 'my cell')
self.assert_(self.entry.cell.row == '1')
self.assert_(self.entry.cell.col == '2')
self.assert_(self.entry.cell.inputValue == 'my input value')
self.assert_(self.entry.cell.numericValue == 'my numeric value')
new_cell = gdata.spreadsheet.SpreadsheetsCellFromString(self.entry.ToString())
self.assert_(self.entry.cell.text == new_cell.cell.text)
self.assert_(self.entry.cell.row == new_cell.cell.row)
self.assert_(self.entry.cell.col == new_cell.cell.col)
self.assert_(self.entry.cell.inputValue == new_cell.cell.inputValue)
self.assert_(self.entry.cell.numericValue == new_cell.cell.numericValue)
class SpreadsheetsListTest(unittest.TestCase):
def setUp(self):
self.row = gdata.spreadsheet.SpreadsheetsList()
def testToAndFromString(self):
self.row.custom['column_1'] = gdata.spreadsheet.Custom(column='column_1',
text='my first column')
self.row.custom['column_2'] = gdata.spreadsheet.Custom(column='column_2',
text='my second column')
self.assert_(self.row.custom['column_1'].column == 'column_1')
self.assert_(self.row.custom['column_1'].text == 'my first column')
self.assert_(self.row.custom['column_2'].column == 'column_2')
self.assert_(self.row.custom['column_2'].text == 'my second column')
new_row = gdata.spreadsheet.SpreadsheetsListFromString(self.row.ToString())
self.assert_(self.row.custom['column_1'].column == new_row.custom['column_1'].column)
self.assert_(self.row.custom['column_1'].text == new_row.custom['column_1'].text)
self.assert_(self.row.custom['column_2'].column == new_row.custom['column_2'].column)
self.assert_(self.row.custom['column_2'].text == new_row.custom['column_2'].text)
class SpreadsheetsSpreadsheetsFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.spreadsheet.SpreadsheetSpreadsheetsFeed()
self.feed = gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString(
SPREADSHEETS_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 1)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsSpreadsheet))
new_feed = gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString(
str(self.feed))
for an_entry in new_feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsSpreadsheet))
class SpreadsheetsWorksheetsFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.spreadsheet.SpreadsheetWorksheetsFeed()
self.feed = gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString(
WORKSHEETS_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 1)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsWorksheet))
new_feed = gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString(
str(self.feed))
for an_entry in new_feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsWorksheet))
class SpreadsheetsCellsFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.spreadsheet.SpreadsheetCellsFeed()
self.feed = gdata.spreadsheet.SpreadsheetsCellsFeedFromString(
CELLS_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 2)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsCell))
new_feed = gdata.spreadsheet.SpreadsheetsCellsFeedFromString(str(self.feed))
self.assert_(isinstance(new_feed.row_count,
gdata.spreadsheet.RowCount))
self.assert_(new_feed.row_count.text == '100')
self.assert_(isinstance(new_feed.col_count,
gdata.spreadsheet.ColCount))
self.assert_(new_feed.col_count.text == '20')
for an_entry in new_feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsCell))
class SpreadsheetsListFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.spreadsheet.SpreadsheetListFeed()
self.feed = gdata.spreadsheet.SpreadsheetsListFeedFromString(
LIST_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 2)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsList))
new_feed = gdata.spreadsheet.SpreadsheetsListFeedFromString(str(self.feed))
for an_entry in new_feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsList))
if __name__ == '__main__':
unittest.main()
|
ian-ross/fp-legacy | refs/heads/modernize | decoder/matchup.py | 4 | import sys
import math
import numpy
import os.path
import PIL.Image
import PIL.ImageDraw
class Feature:
def __init__(self, x, y, scale, rotation, descriptors):
self.x = x
self.y = y
self.s = scale
self.r = rotation
self.d = descriptors
def __repr__(self):
return 'F(%(x)d, %(y)d, %(s).3f, %(r).3f)' % self.__dict__
def relativeScale(self, other):
return self.s / other.s
def relativeRotation(self, other):
r = other.r - self.r
while r > math.pi:
r -= 2 * math.pi
while r < -math.pi:
r += 2 * math.pi
return r
def relativeDistance(self, other):
d = math.hypot(self.x - other.x, self.y - other.y)
return d / self.s
def relativeBearing(self, other):
r = math.atan2(other.y - self.y, other.x - self.x)
r = r - self.r
while r > math.pi:
r -= 2 * math.pi
while r < -math.pi:
r += 2 * math.pi
return r
def main(hImage, hData, nImage, nData):
""" Given a haystack image and descriptor file, and a need image and
descriptor file, return a new image that shows the two together
with instances of the needle marked on the haystack.
"""
# do the work part
hFeatures = [row2feature(row) for row in hData]
nFeatures = [row2feature(row) for row in nData]
matches = find_matches(hFeatures, nFeatures)
matches_graph = group_matches(matches, hFeatures, nFeatures)
needles = find_needles(matches, matches_graph, hFeatures, nFeatures)
# now do some drawing
out = PIL.Image.new('RGB', (hImage.size[0] + nImage.size[0], max(hImage.size[1], nImage.size[1])), 0x00)
out.paste(hImage, (0, 0))
out.paste(nImage, (hImage.size[0], 0))
canvas = PIL.ImageDraw.ImageDraw(out)
color = 0xFF, 0x99, 0x00
canvas.line((hImage.size[0], 0, hImage.size[0] + nImage.size[0], nImage.size[1]), fill=color)
canvas.line((hImage.size[0] + nImage.size[0], 0, hImage.size[0], nImage.size[1]), fill=color)
for (hKey, nKey) in matches.items():
n1 = nFeatures[nKey]
h1 = hFeatures[hKey]
color = 0x00, 0x66, 0xFF
draw_feature(canvas, h1, color)
draw_feature(canvas, n1, color, hImage.size[0])
for ((n1, n2), (h1, h2), transform) in needles:
color = 0xFF, 0x00, 0xFF
draw_feature(canvas, h2, color)
draw_feature(canvas, n2, color, hImage.size[0])
canvas.line((h1.x, h1.y, h2.x, h2.y), fill=color)
color = 0xFF, 0xFF, 0x00
draw_feature(canvas, h1, color)
draw_feature(canvas, n1, color, hImage.size[0])
points = (0, 0), (0, nImage.size[1]), (nImage.size[0], nImage.size[1]), (nImage.size[0], 0), (0, 0)
points = [transform(x, y) for (x, y) in points]
color = 0xFF, 0x99, 0x00
canvas.line((points[0], points[1]), fill=color)
canvas.line((points[1], points[2]), fill=color)
canvas.line((points[2], points[3]), fill=color)
canvas.line((points[3], points[0]), fill=color)
canvas.line((points[0], points[2]), fill=color)
canvas.line((points[1], points[3]), fill=color)
return out
def feature2row(feature):
""" Given a feature with x, y, scale, rotation list of descriptors,
return a row as a string as if it had come from bin/sift.
"""
location = ['%.6f' % f for f in (feature.x, feature.y, feature.s, feature.r)]
description = ['%d' % i for i in feature.d[:]]
return ' '.join(location + description)
def row2feature(row):
""" Given a row as string, split and convert into a feature:
x, y, scale, rotation, list of descriptors.
"""
feature = row.split()
x, y = int(float(feature[0])), int(float(feature[1]))
s, r = float(feature[2]), float(feature[3])
desc = [int(float(d)) for d in feature[4:]]
return Feature(x, y, s, r, numpy.array(desc))
def find_matches(hFeatures, nFeatures):
""" Given two lists of features (lists of x, y, scale, rotation, and descriptors)
return a dictionary of matches where keys are haystack indexes and values
are needle indexes, so that multiple needles can be found.
"""
matches = {}
# create an array of all needle descriptors
nArray = numpy.zeros((len(nFeatures), len(nFeatures[0].d)))
for (i, nFeature) in enumerate(nFeatures):
nArray[i,:] = nFeature.d
# iterate over all haystack descriptors, finding the closest needle match
for (hKey, hFeature) in enumerate(hFeatures):
assert len(hFeature.d) == nArray.shape[1]
#print >> sys.stderr, hKey,
# stretch the haystack array vertically so its size matches the needle array
hArray = numpy.resize(numpy.array(hFeature.d), nArray.shape)
# compute squared distances for all needle/haystack descriptor pairs
dist = nArray - hArray
diffqsums = numpy.sum(dist * dist, 1)
# extract the two closest squared distances
first, next, nKey = 10000000, 10000000, None
for (i, diffqsum) in enumerate(diffqsums.tolist()):
if diffqsum < first:
first, next, nKey = diffqsum, first, i
elif diffqsum < next:
next = diffqsum
# check whether closest distance is less than 0.6 of second.
if 10 * 10 * first > 6 * 6 * next:
# it's not, so they're too ambiguous, so nevermind
#print >> sys.stderr, '-'
continue
# yay found a match
#print >> sys.stderr, nKey
matches[hKey] = nKey
return matches
def find_single_match(hFeatures, nFeatures):
""" Given two lists of features (lists of x, y, scale, rotation, and descriptors)
return a dictionary of matches where keys are haystack indexes and values
are needle indexes, so that a single needle can be found.
Generally, don't use unless you're sure that there's exactly one match in the haystack.
"""
matches = {}
# create an array of all haystack descriptors
hArray = numpy.zeros((len(hFeatures), len(hFeatures[0].d)))
for (i, hFeature) in enumerate(hFeatures):
hArray[i,:] = hFeature.d
# iterate over all haystack descriptors, finding the closest needle match
for (nKey, nFeature) in enumerate(nFeatures):
assert len(nFeature.d) == hArray.shape[1]
#print >> sys.stderr, nKey,
# stretch the haystack array vertically so its size matches the needle array
nArray = numpy.resize(numpy.array(nFeature.d), hArray.shape)
# compute squared distances for all needle/haystack descriptor pairs
dist = nArray - hArray
diffqsums = numpy.sum(dist * dist, 1)
# extract the two closest squared distances
first, next, hKey = 10000000, 10000000, None
for (i, diffqsum) in enumerate(diffqsums.tolist()):
if diffqsum < first:
first, next, hKey = diffqsum, first, i
elif diffqsum < next:
next = diffqsum
# check whether closest distance is less than 0.6 of second.
if 10 * 10 * first > 6 * 6 * next:
# it's not, so they're too ambiguous, so nevermind
#print >> sys.stderr, '-'
continue
# yay found a match
#print >> sys.stderr, hKey
matches[hKey] = nKey
return matches
def group_matches(matches, hFeatures, nFeatures):
""" Given a collection of matches, create a graph of matches that are
mutually-agreeable based on relative position, size, and rotation.
The resulting array is a graph whose indexes correspond to
matches.keys, and indicate edges in a graph.
"""
matches_graph = numpy.zeros((len(matches), len(matches)))
# haystack feature index -> grid index
hIndexes = dict([(h, i) for (i, h) in enumerate(matches.keys())])
for (i, j) in matches.items():
for (k, l) in matches.items():
if i != k and j != l:
hThis, nThis = hFeatures[i], nFeatures[j]
hThat, nThat = hFeatures[k], nFeatures[l]
hRelativeBearing = hThis.relativeBearing(hThat)
nRelativeBearing = nThis.relativeBearing(nThat)
compareRelativeBearing = abs(hRelativeBearing - nRelativeBearing)
# within 10deg okay
if compareRelativeBearing < math.pi / 18:
pass
else:
# no
continue
hRelativeScale = hThis.relativeScale(hThat)
nRelativeScale = nThis.relativeScale(nThat)
compareRelativeScale = hRelativeScale / nRelativeScale
# within 25% okay
if 0.8 <= compareRelativeScale and compareRelativeScale <= 1.25:
pass
else:
# no
continue
hRelativeRotation = hThis.relativeRotation(hThat)
nRelativeRotation = nThis.relativeRotation(nThat)
compareRelativeRotation = abs(hRelativeRotation - nRelativeRotation)
# within 10deg okay
if compareRelativeRotation < math.pi / 18:
pass
else:
# no
continue
hRelativeDistance = hThis.relativeDistance(hThat)
nRelativeDistance = nThis.relativeDistance(nThat)
if nRelativeDistance == 0:
# no
continue
compareRelativeDistance = hRelativeDistance / nRelativeDistance
# within 25% okay
if 0.8 <= compareRelativeDistance and compareRelativeDistance <= 1.25:
pass
else:
# no
continue
# this and that feature may be self-consistent
matches_graph[hIndexes[i], hIndexes[k]] = 1
# be optimistic, too
matches_graph[hIndexes[k], hIndexes[i]] = 1
return matches_graph
def find_needles(matches, matches_graph, hFeatures, nFeatures):
""" Look through a big bag of matches and an associated connection graph
of mutually-agreeable hypotheses, and find needles in the form of
two pairs of features and a 2D transformation function.
"""
# grid index -> haystack feature index
hIndexes = dict([(i, h) for (i, h) in enumerate(matches.keys())])
needles = []
if matches_graph.shape == (0, 0):
return needles
# while any feature in the graphs seems to be connected to more than two others...
while max(numpy.sum(matches_graph, 0)) > 2:
# get the most-connected, largest haystack/needle matched pair
c, s, i = sorted([(c, hFeatures[hIndexes[i]].s, i) for (i, c) in enumerate(numpy.sum(matches_graph, 0).tolist())], reverse=True)[0]
# primary haystack/needle pair
h1 = hFeatures[hIndexes[i]]
n1 = nFeatures[matches[hIndexes[i]]]
# get the most-connected, closest haystack/needle matched pair that's connected to the above
c, d, j = sorted([(c, math.hypot(h1.x - hFeatures[hIndexes[j]].x, h1.y - hFeatures[hIndexes[j]].y), j)
for (j, c) in enumerate(matches_graph[i, :].tolist()) if c], reverse=True)[0]
# secondary haystack/needle pair
h2 = hFeatures[hIndexes[j]]
n2 = nFeatures[matches[hIndexes[j]]]
# delete all connections to the haystack points above
for (j, on) in enumerate(matches_graph[i, :].tolist()):
if on:
matches_graph[j, :] = 0
matches_graph[:, j] = 0
# really delete all connections to the haystack points above
matches_graph[i, :] = 0
matches_graph[:, i] = 0
# save for later
needle = (n1, n2), (h1, h2), derive_transform(n1, n2, h1, h2)
needles.append(needle)
return needles
def derive_transform(a1, a2, b1, b2):
""" Given two pairs of features, return a 2D transformation
function that will convert the first pair to the second.
"""
affine = numpy.identity(3)
# translate A point to (0, 0)
affine = numpy.dot(numpy.array([[1, 0, -a1.x], [0, 1, -a1.y], [0, 0, 1]]), affine)
# scale to B size
scale = math.hypot(b2.x - b1.x, b2.y - b1.y) / math.hypot(a2.x - a1.x, a2.y - a1.y)
affine = numpy.dot(numpy.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]]), affine)
# rotate to B orientation
theta = math.atan2(a2.x - a1.x, a2.y - a1.y) - math.atan2(b2.x - b1.x, b2.y - b1.y)
affine = numpy.dot(numpy.array([[math.cos(theta), -math.sin(theta), 0], [math.sin(theta), math.cos(theta), 0], [0, 0, 1]]), affine)
# translate back to B point
affine = numpy.dot(numpy.array([[1, 0, b1.x], [0, 1, b1.y], [0, 0, 1]]), affine)
return make_transform(affine)
def make_transform(affine):
""" Given an affine transformation matrix return the associated 2D transform function.
"""
ax, bx, cx, ay, by, cy = affine[0,0], affine[0,1], affine[0,2], affine[1,0], affine[1,1], affine[1,2]
return lambda x, y: (ax * x + bx * y + cx, ay * x + by * y + cy)
def draw_feature(canvas, feature, color, offset=0):
"""
"""
x, y = feature.x + offset, feature.y
canvas.ellipse((x - feature.s, y - feature.s, x + feature.s, y + feature.s), outline=color)
canvas.line((x, y, x + feature.s * math.cos(feature.r) / 2, y + feature.s * math.sin(feature.r) / 2), fill=color)
if __name__ == '__main__':
try:
hImage, hData = PIL.Image.open(sys.argv[1]).convert('RGB'), open(sys.argv[2], 'r')
nImage, nData = PIL.Image.open(sys.argv[3]).convert('RGB'), open(sys.argv[4], 'r')
except:
print >> sys.stderr, 'Usage: %s <haystack image> <haystack descriptors> <needle image> <needle descriptors>' % os.path.basename(__file__)
sys.exit(1)
else:
out = main(hImage, hData, nImage, nData)
out.save('out.png')
|
googleapis/googleapis-gen | refs/heads/master | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/keyword_view_service/transports/grpc.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import keyword_view
from google.ads.googleads.v8.services.types import keyword_view_service
from .base import KeywordViewServiceTransport, DEFAULT_CLIENT_INFO
class KeywordViewServiceGrpcTransport(KeywordViewServiceTransport):
"""gRPC backend transport for KeywordViewService.
Service to manage keyword views.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_keyword_view(self) -> Callable[
[keyword_view_service.GetKeywordViewRequest],
keyword_view.KeywordView]:
r"""Return a callable for the get keyword view method over gRPC.
Returns the requested keyword view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetKeywordViewRequest],
~.KeywordView]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_keyword_view' not in self._stubs:
self._stubs['get_keyword_view'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v8.services.KeywordViewService/GetKeywordView',
request_serializer=keyword_view_service.GetKeywordViewRequest.serialize,
response_deserializer=keyword_view.KeywordView.deserialize,
)
return self._stubs['get_keyword_view']
__all__ = (
'KeywordViewServiceGrpcTransport',
)
|
adrianholovaty/django | refs/heads/master | django/contrib/sessions/tests.py | 10 | from datetime import datetime, timedelta
import shutil
import string
import tempfile
import warnings
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import SessionStore as CookieSession
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils import timezone
from django.utils import unittest
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertTrue('some key' in self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(self.session.values(), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(self.session.values(), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iterkeys()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.itervalues()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iteritems()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(self.session.items(), [('x', 1)])
self.session.clear()
self.assertEqual(self.session.items(), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = self.session.items()
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(self.session.items(), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail("The session object did not save properly. Middleware may be saving cache items without namespaces.")
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
# Using seconds
self.session.set_expiry(10)
delta = self.session.get_expiry_date() - timezone.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_timedelta(self):
# Using timedelta
self.session.set_expiry(timedelta(seconds=10))
delta = self.session.get_expiry_date() - timezone.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_datetime(self):
# Using fixed datetime
self.session.set_expiry(timezone.now() + timedelta(seconds=10))
delta = self.session.get_expiry_date() - timezone.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
def test_load_overlong_key(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
self.assertEqual(len(w), 1)
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
super(FileSessionTests, self).setUp()
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
def tearDown(self):
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
super(FileSessionTests, self).tearDown()
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend("a\\b\\c").load)
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend("a/b/c").load)
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
def test_load_overlong_key(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
self.assertEqual(len(w), 1)
class SessionMiddlewareTests(unittest.TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
class CookieSessionTests(SessionTestsMixin, TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
|
JohnOmernik/pimeup | refs/heads/master | dotstar/JO_fire.py | 1 | #!/usr/bin/python
from collections import OrderedDict
import time
import random
from dotstar import Adafruit_DotStar
COOLING = 60
SPARKING = 60
numpixels = 60 # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = 23
clockpin = 24
strip = Adafruit_DotStar(numpixels, datapin, clockpin)
strip.begin() # Initialize pins for output
strip.setBrightness(255) # Limit brightness to ~1/4 duty cycle
mydelay = [0.01, 0.02, 0.03, 0.1]
#Setting color to: 0xFF0000 # Green
#Setting color to: 0xCC00CC # Bright Teal
#Setting color to: 0x66CC00 # Orange
#Setting color to: 0x33FFFF # Magenta
#Setting color to: 0xFF00 # Red
#Setting color to: 0x330099 # Lightish Blue
#Setting color to: 0xFFFF00 # YEllow
#Setting color to: 0xFF # Bright Blue
#Setting color to: 0xFF9900 # YEllower Gren
#Setting color to: 0x33 # Dark BLue
heat = []
for x in range(numpixels):
heat.append(random.randint(0, 5))
#fire_colors = [ 0x000000, 0xFF0000, 0xFFFF00, 0xFFFFFF ]
fire_colors = [ "#000500", "#00FF00", "#48FF00", "#48FF32" ]
gsparkitup = True
num_colors = 100
my_colors = []
colors_dict = OrderedDict()
allcolors = []
def main():
global strip
global fire_colors
global my_colors
global colors_dict
global allcolors
for x in range(len(fire_colors)):
if x == len(fire_colors) -1:
pass
else:
print("Adding gradient for %s (%s) to %s (%s) with %s colors" % (fire_colors[x], hex_to_RGB(fire_colors[x]), fire_colors[x+1], hex_to_RGB(fire_colors[x+1]), num_colors))
gtmp = linear_gradient(fire_colors[x], fire_colors[x+1], num_colors)
my_colors.append(gtmp['hex'])
colors_dict[fire_colors[x] + "_2_" + fire_colors[x+1]] = gtmp['hex']
for x in colors_dict:
for y in colors_dict[x]:
allcolors.append(y)
# while True:
# for z in allcolors:
# time.sleep(0.01)
# setAllLEDS(strip, [int(z.replace("#", ''), 16)])
try:
while True: # Loop forever
time.sleep(random.choice(mydelay)) # Pause 20 milliseconds (~50 fps)
FirePlace()
# for x in range(len(fire_colors)):
# if x == len(fire_colors) - 1:
# endcolor = fire_colors[0]
# else:
# endcolor = fire_colors[x + 1]
# startcolor = fire_colors[x]
# grad = linear_gradient(startcolor, endcolor, 200)
#
# for x in grad['hex']:
# time.sleep(0.1)
# c = int(x.replace("#", ''), 16)
# print("Setting color %s to: 0x%0.2X" % (x, c))
# setAllLEDS(strip, [c])
# print grad
except KeyboardInterrupt:
print("")
print("exiting and shutting down strip")
setAllLEDS(strip, [0x000000])
def FirePlace():
global gsparkitup
global numpixels
global SPARKING
global COOLING
global strip
global allcolors
global heat
#for i in range(numpixels):
# heat[i] = 0
# Heat is a value for each pixel that is 0 to 255
# Every cycle there will be some random cololing
# Consider adding a degree of random whether a pixel cools
for i in range(numpixels):
if random.randint(0, 255) < COOLING:
tval = heat[i] - random.randint(0, ((COOLING * 10) / numpixels) + 2)
heat[i] = tval
# This is supposed to be a diffusing effect I think
k = numpixels -3
while k > 2:
if random.randint(0, 255) < COOLING:
tval = (heat[k-1] + heat[ k- 2 ] + heat[ k- 2] ) / 3
heat[k] = tval
k = k - 1
# Now let's see if we set any sparks!
if gsparkitup == True:
if random.randint(0, 255) < SPARKING:
rval = random.randint(0, numpixels - 1)
sparkval = random.randint(160, 255)
print("Sparking LED %s to %s" % (rval, sparkval))
heat[rval] = heat[rval] + random.randint(160,255)
# Now, actually set the pixels based on a scaled representation of all pixels
for j in range(numpixels):
if heat[j] > 255:
heat[j] = 255
if heat[j] < 0:
heat[j] = 0
newcolor = int((heat[j] * len(allcolors)) / 256)
# print("Pixel: %s has a heat value of %s and a newcolor idx of %s" % (j, heat[j], newcolor))
strip.setPixelColor(j, int(allcolors[newcolor].replace("#", ''), 16))
strip.show()
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))
for j in range(3)
]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def setAllLEDS(strip, colorlist):
numcolors = len(colorlist)
for x in range(numpixels):
idx = x % numcolors
strip.setPixelColor(x, colorlist[idx])
strip.show()
if __name__ == "__main__":
main()
|
Zlash65/erpnext | refs/heads/develop | erpnext/education/doctype/article/article.py | 6 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Article(Document):
def get_article(self):
pass
|
marhar/mavlink | refs/heads/master | pymavlink/DFReader.py | 14 | #!/usr/bin/env python
'''
APM DataFlash log file reader
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
Partly based on SDLog2Parser by Anton Babushkin
'''
import struct, time, os
from . import mavutil
FORMAT_TO_STRUCT = {
"b": ("b", None, int),
"B": ("B", None, int),
"h": ("h", None, int),
"H": ("H", None, int),
"i": ("i", None, int),
"I": ("I", None, int),
"f": ("f", None, float),
"n": ("4s", None, str),
"N": ("16s", None, str),
"Z": ("64s", None, str),
"c": ("h", 0.01, float),
"C": ("H", 0.01, float),
"e": ("i", 0.01, float),
"E": ("I", 0.01, float),
"L": ("i", 1.0e-7, float),
"d": ("d", None, float),
"M": ("b", None, int),
"q": ("q", None, long),
"Q": ("Q", None, long),
}
class DFFormat(object):
def __init__(self, type, name, flen, format, columns):
self.type = type
self.name = name
self.len = flen
self.format = format
self.columns = columns.split(',')
if self.columns == ['']:
self.columns = []
msg_struct = "<"
msg_mults = []
msg_types = []
for c in format:
if ord(c) == 0:
break
try:
(s, mul, type) = FORMAT_TO_STRUCT[c]
msg_struct += s
msg_mults.append(mul)
msg_types.append(type)
except KeyError as e:
raise Exception("Unsupported format char: '%s' in message %s" % (c, name))
self.msg_struct = msg_struct
self.msg_types = msg_types
self.msg_mults = msg_mults
self.colhash = {}
for i in range(len(self.columns)):
self.colhash[self.columns[i]] = i
def __str__(self):
return "DFFormat(%s,%s,%s,%s)" % (self.type, self.name, self.format, self.columns)
def null_term(str):
'''null terminate a string'''
idx = str.find("\0")
if idx != -1:
str = str[:idx]
return str
class DFMessage(object):
def __init__(self, fmt, elements, apply_multiplier):
self.fmt = fmt
self._elements = elements
self._apply_multiplier = apply_multiplier
self._fieldnames = fmt.columns
def to_dict(self):
d = {'mavpackettype': self.fmt.name}
for field in self._fieldnames:
d[field] = self.__getattr__(field)
return d
def __getattr__(self, field):
'''override field getter'''
try:
i = self.fmt.colhash[field]
except Exception:
raise AttributeError(field)
v = self._elements[i]
if self.fmt.format[i] != 'M' or self._apply_multiplier:
v = self.fmt.msg_types[i](v)
if self.fmt.msg_types[i] == str:
v = null_term(v)
if self.fmt.msg_mults[i] is not None and self._apply_multiplier:
v *= self.fmt.msg_mults[i]
return v
def get_type(self):
return self.fmt.name
def __str__(self):
ret = "%s {" % self.fmt.name
col_count = 0
for c in self.fmt.columns:
ret += "%s : %s, " % (c, self.__getattr__(c))
col_count += 1
if col_count != 0:
ret = ret[:-2]
return ret + '}'
def get_msgbuf(self):
'''create a binary message buffer for a message'''
values = []
for i in range(len(self.fmt.columns)):
if i >= len(self.fmt.msg_mults):
continue
mul = self.fmt.msg_mults[i]
name = self.fmt.columns[i]
if name == 'Mode' and 'ModeNum' in self.fmt.columns:
name = 'ModeNum'
v = self.__getattr__(name)
if mul is not None:
v /= mul
values.append(v)
return struct.pack("BBB", 0xA3, 0x95, self.fmt.type) + struct.pack(self.fmt.msg_struct, *values)
class DFReaderClock():
'''base class for all the different ways we count time in logs'''
def __init__(self):
self.set_timebase(0)
self.timestamp = 0
def _gpsTimeToTime(self, week, msec):
'''convert GPS week and TOW to a time in seconds since 1970'''
epoch = 86400*(10*365 + (1980-1969)/4 + 1 + 6 - 2)
return epoch + 86400*7*week + msec*0.001 - 15
def set_timebase(self, base):
self.timebase = base
def message_arrived(self, m):
pass
def rewind_event(self):
pass
class DFReaderClock_usec(DFReaderClock):
'''DFReaderClock_usec - use microsecond timestamps from messages'''
def __init__(self):
DFReaderClock.__init__(self)
def find_time_base(self, gps, first_us_stamp):
'''work out time basis for the log - even newer style'''
t = self._gpsTimeToTime(gps.GWk, gps.GMS)
self.set_timebase(t - gps.TimeUS*0.000001)
# this ensures FMT messages get appropriate timestamp:
self.timestamp = self.timebase + first_us_stamp*0.000001
def type_has_good_TimeMS(self, type):
'''The TimeMS in some messages is not from *our* clock!'''
if type.startswith('ACC'):
return False;
if type.startswith('GYR'):
return False;
return True
def should_use_msec_field0(self, m):
if not self.type_has_good_TimeMS(m.get_type()):
return False
if 'TimeMS' != m._fieldnames[0]:
return False
if self.timebase + m.TimeMS*0.001 < self.timestamp:
return False
return True;
def set_message_timestamp(self, m):
if 'TimeUS' == m._fieldnames[0]:
# only format messages don't have a TimeUS in them...
m._timestamp = self.timebase + m.TimeUS*0.000001
elif self.should_use_msec_field0(m):
# ... in theory. I expect there to be some logs which are not
# "pure":
m._timestamp = self.timebase + m.TimeMS*0.001
else:
m._timestamp = self.timestamp
self.timestamp = m._timestamp
class DFReaderClock_msec(DFReaderClock):
'''DFReaderClock_msec - a format where many messages have TimeMS in their formats, and GPS messages have a "T" field giving msecs '''
def find_time_base(self, gps, first_ms_stamp):
'''work out time basis for the log - new style'''
t = self._gpsTimeToTime(gps.Week, gps.TimeMS)
self.set_timebase(t - gps.T*0.001)
self.timestamp = self.timebase + first_ms_stamp*0.001
def set_message_timestamp(self, m):
if 'TimeMS' == m._fieldnames[0]:
m._timestamp = self.timebase + m.TimeMS*0.001
elif m.get_type() in ['GPS','GPS2']:
m._timestamp = self.timebase + m.T*0.001
else:
m._timestamp = self.timestamp
self.timestamp = m._timestamp
class DFReaderClock_px4(DFReaderClock):
'''DFReaderClock_px4 - a format where a starting time is explicitly given in a message'''
def __init__(self):
DFReaderClock.__init__(self)
self.px4_timebase = 0
def find_time_base(self, gps):
'''work out time basis for the log - PX4 native'''
t = gps.GPSTime * 1.0e-6
self.timebase = t - self.px4_timebase
def set_px4_timebase(self, time_msg):
self.px4_timebase = time_msg.StartTime * 1.0e-6
def set_message_timestamp(self, m):
m._timestamp = self.timebase + self.px4_timebase
def message_arrived(self, m):
type = m.get_type()
if type == 'TIME' and 'StartTime' in m._fieldnames:
self.set_px4_timebase(m)
class DFReaderClock_gps_interpolated(DFReaderClock):
'''DFReaderClock_gps_interpolated - for when the only real references in a message are GPS timestamps '''
def __init__(self):
DFReaderClock.__init__(self)
self.msg_rate = {}
self.counts = {}
self.counts_since_gps = {}
def rewind_event(self):
'''reset counters on rewind'''
self.counts = {}
self.counts_since_gps = {}
def message_arrived(self, m):
type = m.get_type()
if not type in self.counts:
self.counts[type] = 1
else:
self.counts[type] += 1
# this preserves existing behaviour - but should we be doing this
# if type == 'GPS'?
if not type in self.counts_since_gps:
self.counts_since_gps[type] = 1
else:
self.counts_since_gps[type] += 1
if type == 'GPS' or type == 'GPS2':
self.gps_message_arrived(m)
def gps_message_arrived(self, m):
'''adjust time base from GPS message'''
# msec-style GPS message?
gps_week = getattr(m, 'Week', None)
gps_timems = getattr(m, 'TimeMS', None)
if gps_week is None:
# usec-style GPS message?
gps_week = getattr(m, 'GWk', None)
gps_timems = getattr(m, 'GMS', None)
if gps_week is None:
if getattr(m, 'GPSTime', None) is not None:
# PX4-style timestamp; we've only been called
# because we were speculatively created in case no
# better clock was found.
return;
t = self._gpsTimeToTime(gps_week, gps_timems)
deltat = t - self.timebase
if deltat <= 0:
return
for type in self.counts_since_gps:
rate = self.counts_since_gps[type] / deltat
if rate > self.msg_rate.get(type, 0):
self.msg_rate[type] = rate
self.msg_rate['IMU'] = 50.0
self.timebase = t
self.counts_since_gps = {}
def set_message_timestamp(self, m):
rate = self.msg_rate.get(m.fmt.name, 50.0)
if int(rate) == 0:
rate = 50
count = self.counts_since_gps.get(m.fmt.name, 0)
m._timestamp = self.timebase + count/rate
class DFReader(object):
'''parse a generic dataflash file'''
def __init__(self):
# read the whole file into memory for simplicity
self.clock = None
self.timestamp = 0
self.mav_type = mavutil.mavlink.MAV_TYPE_FIXED_WING
self.verbose = False
self.params = {}
def _rewind(self):
'''reset state on rewind'''
self.messages = { 'MAV' : self }
self.flightmode = "UNKNOWN"
self.percent = 0
if self.clock:
self.clock.rewind_event()
def init_clock_px4(self, px4_msg_time, px4_msg_gps):
self.clock = DFReaderClock_px4()
if not self._zero_time_base:
self.clock.set_px4_timebase(px4_msg_time)
self.clock.find_time_base(px4_msg_gps)
return True
def init_clock_msec(self):
# it is a new style flash log with full timestamps
self.clock = DFReaderClock_msec()
def init_clock_usec(self):
self.clock = DFReaderClock_usec()
def init_clock_gps_interpolated(self, clock):
self.clock = clock
def init_clock(self):
'''work out time basis for the log'''
self._rewind()
# speculatively create a gps clock in case we don't find anything
# better
gps_clock = DFReaderClock_gps_interpolated()
self.clock = gps_clock
px4_msg_time = None
px4_msg_gps = None
gps_interp_msg_gps1 = None
gps_interp_msg_gps2 = None
first_us_stamp = None
first_ms_stamp = None
have_good_clock = False
while True:
m = self.recv_msg()
if m is None:
break;
type = m.get_type()
if first_us_stamp is None:
first_us_stamp = getattr(m, "TimeUS", None);
if first_ms_stamp is None and (type != 'GPS' and type != 'GPS2'):
# Older GPS messages use TimeMS for msecs past start
# of gps week
first_ms_stamp = getattr(m, "TimeMS", None);
if type == 'GPS' or type == 'GPS2':
if getattr(m, "TimeUS", 0) != 0 and \
getattr(m, "GWk", 0) != 0: # everything-usec-timestamped
self.init_clock_usec()
if not self._zero_time_base:
self.clock.find_time_base(m, first_us_stamp)
have_good_clock = True
break
if getattr(m, "T", 0) != 0 and \
getattr(m, "Week", 0) != 0: # GPS is msec-timestamped
if first_ms_stamp is None:
first_ms_stamp = m.T
self.init_clock_msec()
if not self._zero_time_base:
self.clock.find_time_base(m, first_ms_stamp)
have_good_clock = True
break
if getattr(m, "GPSTime", 0) != 0: # px4-style-only
px4_msg_gps = m
if getattr(m, "Week", 0) != 0:
if gps_interp_msg_gps1 is not None and \
(gps_interp_msg_gps1.TimeMS != m.TimeMS or \
gps_interp_msg_gps1.Week != m.Week):
# we've received two distinct, non-zero GPS
# packets without finding a decent clock to
# use; fall back to interpolation. Q: should
# we wait a few more messages befoe doing
# this?
self.init_clock_gps_interpolated(gps_clock)
have_good_clock = True
break
gps_interp_msg_gps1 = m
elif type == 'TIME':
'''only px4-style logs use TIME'''
if getattr(m, "StartTime", None) != None:
px4_msg_time = m;
if px4_msg_time is not None and px4_msg_gps is not None:
self.init_clock_px4(px4_msg_time, px4_msg_gps)
have_good_clock = True
break
# print("clock is " + str(self.clock))
if not have_good_clock:
# we failed to find any GPS messages to set a time
# base for usec and msec clocks. Also, not a
# PX4-style log
if first_us_stamp is not None:
self.init_clock_usec()
elif first_ms_stamp is not None:
self.init_clock_msec()
self._rewind()
return
def _set_time(self, m):
'''set time for a message'''
# really just left here for profiling
m._timestamp = self.timestamp
if len(m._fieldnames) > 0 and self.clock is not None:
self.clock.set_message_timestamp(m)
def recv_msg(self):
return self._parse_next()
def _add_msg(self, m):
'''add a new message'''
type = m.get_type()
self.messages[type] = m
if self.clock:
self.clock.message_arrived(m)
if type == 'MSG':
if m.Message.find("Rover") != -1:
self.mav_type = mavutil.mavlink.MAV_TYPE_GROUND_ROVER
elif m.Message.find("Plane") != -1:
self.mav_type = mavutil.mavlink.MAV_TYPE_FIXED_WING
elif m.Message.find("Copter") != -1:
self.mav_type = mavutil.mavlink.MAV_TYPE_QUADROTOR
elif m.Message.startswith("Antenna"):
self.mav_type = mavutil.mavlink.MAV_TYPE_ANTENNA_TRACKER
if type == 'MODE':
if isinstance(m.Mode, str):
self.flightmode = m.Mode.upper()
elif 'ModeNum' in m._fieldnames:
mapping = mavutil.mode_mapping_bynumber(self.mav_type)
if mapping is not None and m.ModeNum in mapping:
self.flightmode = mapping[m.ModeNum]
else:
self.flightmode = mavutil.mode_string_acm(m.Mode)
if type == 'STAT' and 'MainState' in m._fieldnames:
self.flightmode = mavutil.mode_string_px4(m.MainState)
if type == 'PARM' and getattr(m, 'Name', None) is not None:
self.params[m.Name] = m.Value
self._set_time(m)
def recv_match(self, condition=None, type=None, blocking=False):
'''recv the next message that matches the given condition
type can be a string or a list of strings'''
if type is not None and not isinstance(type, list):
type = [type]
while True:
m = self.recv_msg()
if m is None:
return None
if type is not None and not m.get_type() in type:
continue
if not mavutil.evaluate_condition(condition, self.messages):
continue
return m
def check_condition(self, condition):
'''check if a condition is true'''
return mavutil.evaluate_condition(condition, self.messages)
def param(self, name, default=None):
'''convenient function for returning an arbitrary MAVLink
parameter with a default'''
if not name in self.params:
return default
return self.params[name]
class DFReader_binary(DFReader):
'''parse a binary dataflash file'''
def __init__(self, filename, zero_time_base=False):
DFReader.__init__(self)
# read the whole file into memory for simplicity
f = open(filename, mode='rb')
self.data = f.read()
self.data_len = len(self.data)
f.close()
self.HEAD1 = 0xA3
self.HEAD2 = 0x95
self.formats = {
0x80 : DFFormat(0x80, 'FMT', 89, 'BBnNZ', "Type,Length,Name,Format,Columns")
}
self._zero_time_base = zero_time_base
self.init_clock()
self._rewind()
def _rewind(self):
'''rewind to start of log'''
DFReader._rewind(self)
self.offset = 0
self.remaining = self.data_len
def _parse_next(self):
'''read one message, returning it as an object'''
if self.data_len - self.offset < 3:
return None
hdr = self.data[self.offset:self.offset+3]
skip_bytes = 0
skip_type = None
# skip over bad messages
while (ord(hdr[0]) != self.HEAD1 or ord(hdr[1]) != self.HEAD2 or ord(hdr[2]) not in self.formats):
if skip_type is None:
skip_type = (ord(hdr[0]), ord(hdr[1]), ord(hdr[2]))
skip_bytes += 1
self.offset += 1
if self.data_len - self.offset < 3:
return None
hdr = self.data[self.offset:self.offset+3]
msg_type = ord(hdr[2])
if skip_bytes != 0:
if self.remaining < 528:
return None
print("Skipped %u bad bytes in log %s remaining=%u" % (skip_bytes, skip_type, self.remaining))
self.remaining -= skip_bytes
self.offset += 3
self.remaining -= 3
if not msg_type in self.formats:
if self.verbose:
print("unknown message type %02x" % msg_type)
raise Exception("Unknown message type %02x" % msg_type)
fmt = self.formats[msg_type]
if self.remaining < fmt.len-3:
# out of data - can often happen half way through a message
if self.verbose:
print("out of data")
return None
body = self.data[self.offset:self.offset+(fmt.len-3)]
elements = None
try:
elements = list(struct.unpack(fmt.msg_struct, body))
except Exception:
if self.remaining < 528:
# we can have garbage at the end of an APM2 log
return None
# we should also cope with other corruption; logs
# transfered via DataFlash_MAVLink may have blocks of 0s
# in them, for example
print("Failed to parse %s/%s with len %u (remaining %u)" % (fmt.name, fmt.msg_struct, len(body), self.remaining))
if elements is None:
return self._parse_next()
name = null_term(fmt.name)
if name == 'FMT':
# add to formats
# name, len, format, headings
self.formats[elements[0]] = DFFormat(elements[0],
null_term(elements[2]), elements[1],
null_term(elements[3]), null_term(elements[4]))
self.offset += fmt.len-3
self.remaining -= fmt.len-3
m = DFMessage(fmt, elements, True)
self._add_msg(m)
self.percent = 100.0 * (self.offset / float(self.data_len))
return m
def DFReader_is_text_log(filename):
'''return True if a file appears to be a valid text log'''
f = open(filename)
ret = (f.read(8000).find('FMT, ') != -1)
f.close()
return ret
class DFReader_text(DFReader):
'''parse a text dataflash file'''
def __init__(self, filename, zero_time_base=False):
DFReader.__init__(self)
# read the whole file into memory for simplicity
f = open(filename, mode='r')
self.lines = f.readlines()
f.close()
self.formats = {
'FMT' : DFFormat(0x80, 'FMT', 89, 'BBnNZ', "Type,Length,Name,Format,Columns")
}
self._rewind()
self._zero_time_base = zero_time_base
self.init_clock()
self._rewind()
def _rewind(self):
'''rewind to start of log'''
DFReader._rewind(self)
self.line = 0
# find the first valid line
while self.line < len(self.lines):
if self.lines[self.line].startswith("FMT, "):
break
self.line += 1
def _parse_next(self):
'''read one message, returning it as an object'''
while self.line < len(self.lines):
s = self.lines[self.line].rstrip()
elements = s.split(", ")
# move to next line
self.line += 1
if len(elements) >= 2:
break
if self.line >= len(self.lines):
return None
# cope with empty structures
if len(elements) == 5 and elements[-1] == ',':
elements[-1] = ''
elements.append('')
self.percent = 100.0 * (self.line / float(len(self.lines)))
msg_type = elements[0]
if not msg_type in self.formats:
return self._parse_next()
fmt = self.formats[msg_type]
if len(elements) < len(fmt.format)+1:
# not enough columns
return self._parse_next()
elements = elements[1:]
name = fmt.name.rstrip('\0')
if name == 'FMT':
# add to formats
# name, len, format, headings
self.formats[elements[2]] = DFFormat(int(elements[0]), elements[2], int(elements[1]), elements[3], elements[4])
try:
m = DFMessage(fmt, elements, False)
except ValueError:
return self._parse_next()
self._add_msg(m)
return m
if __name__ == "__main__":
import sys
use_profiler = False
if use_profiler:
from line_profiler import LineProfiler
profiler = LineProfiler()
profiler.add_function(DFReader_binary._parse_next)
profiler.add_function(DFReader_binary._add_msg)
profiler.add_function(DFReader._set_time)
profiler.enable_by_count()
filename = sys.argv[1]
if filename.endswith('.log'):
log = DFReader_text(filename)
else:
log = DFReader_binary(filename)
while True:
m = log.recv_msg()
if m is None:
break
#print(m)
if use_profiler:
profiler.print_stats()
|
GyrosOfWar/servo | refs/heads/master | tests/wpt/css-tests/tools/pywebsocket/src/test/test_stream.py | 496 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for stream module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket import stream
class StreamTest(unittest.TestCase):
"""A unittest for stream module."""
def test_create_header(self):
# more, rsv1, ..., rsv4 are all true
header = stream.create_header(common.OPCODE_TEXT, 1, 1, 1, 1, 1, 1)
self.assertEqual('\xf1\x81', header)
# Maximum payload size
header = stream.create_header(
common.OPCODE_TEXT, (1 << 63) - 1, 0, 0, 0, 0, 0)
self.assertEqual('\x01\x7f\x7f\xff\xff\xff\xff\xff\xff\xff', header)
# Invalid opcode 0x10
self.assertRaises(ValueError,
stream.create_header,
0x10, 0, 0, 0, 0, 0, 0)
# Invalid value 0xf passed to more parameter
self.assertRaises(ValueError,
stream.create_header,
common.OPCODE_TEXT, 0, 0xf, 0, 0, 0, 0)
# Too long payload_length
self.assertRaises(ValueError,
stream.create_header,
common.OPCODE_TEXT, 1 << 63, 0, 0, 0, 0, 0)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
mo-norant/FinHeartBel | refs/heads/master | website/node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
cypreess/PyrateDice | refs/heads/master | game_server/game_server/game_server/celery.py | 1 | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'game_server.settings')
app = Celery('game_server')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) |
diorcety/intellij-community | refs/heads/master | python/testData/optimizeImports/importStar.py | 83 | from sys import *
from re import *
compile
|
jawilson/home-assistant | refs/heads/master | homeassistant/components/automation/sun.py | 1 | """
Offer sun based automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#sun-trigger
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_EVENT, CONF_OFFSET, CONF_PLATFORM, SUN_EVENT_SUNRISE)
from homeassistant.helpers.event import track_sunrise, track_sunset
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['sun']
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'sun',
vol.Required(CONF_EVENT): cv.sun_event,
vol.Required(CONF_OFFSET, default=timedelta(0)): cv.time_period,
})
def trigger(hass, config, action):
"""Listen for events based on configuration."""
event = config.get(CONF_EVENT)
offset = config.get(CONF_OFFSET)
def call_action():
"""Call action with right context."""
action({
'trigger': {
'platform': 'sun',
'event': event,
'offset': offset,
},
})
# Do something to call action
if event == SUN_EVENT_SUNRISE:
return track_sunrise(hass, call_action, offset)
else:
return track_sunset(hass, call_action, offset)
|
tkaitchuck/nupic | refs/heads/master | examples/opf/experiments/anomaly/spatial/2field_few_6040/description.py | 10 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nta/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
################################################################################
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by OpfRunExperiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*grokScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb],
# 'setup' : [claModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
waseem18/oh-mainline | refs/heads/master | vendor/packages/beautifulsoup4/bs4/__init__.py | 417 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson ([email protected])"
__version__ = "4.3.2"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(markup, from_encoding)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
Netuitive/Diamond | refs/heads/master | src/collectors/jbossapi/test/testjbossapi.py | 31 | #!/usr/bin/python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from jbossapi import JbossApiCollector
###############################################################################
class TestJbossApiCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('JbossApiCollector', {
})
self.collector = JbossApiCollector(config, None)
def test_import(self):
self.assertTrue(JbossApiCollector)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
attilahorvath/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/gdb/webkit.py | 115 | # Copyright (C) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""GDB support for WebKit types.
Add this to your gdb by amending your ~/.gdbinit as follows:
python
import sys
sys.path.insert(0, "/path/to/tools/gdb/")
import webkit
"""
import gdb
import re
import struct
def guess_string_length(ptr):
"""Guess length of string pointed by ptr.
Returns a tuple of (length, an error message).
"""
# Try to guess at the length.
for i in xrange(0, 2048):
try:
if int((ptr + i).dereference()) == 0:
return i, ''
except RuntimeError:
# We indexed into inaccessible memory; give up.
return i, ' (gdb hit inaccessible memory)'
return 256, ' (gdb found no trailing NUL)'
def ustring_to_string(ptr, length=None):
"""Convert a pointer to UTF-16 data into a Python string encoded with utf-8.
ptr and length are both gdb.Value objects.
If length is unspecified, will guess at the length."""
error_message = ''
if length is None:
length, error_message = guess_string_length(ptr)
else:
length = int(length)
char_vals = [int((ptr + i).dereference()) for i in xrange(length)]
string = struct.pack('H' * length, *char_vals).decode('utf-16', 'replace').encode('utf-8')
return string + error_message
def lstring_to_string(ptr, length=None):
"""Convert a pointer to LChar* data into a Python (non-Unicode) string.
ptr and length are both gdb.Value objects.
If length is unspecified, will guess at the length."""
error_message = ''
if length is None:
length, error_message = guess_string_length(ptr)
else:
length = int(length)
string = ''.join([chr((ptr + i).dereference()) for i in xrange(length)])
return string + error_message
class StringPrinter(object):
"Shared code between different string-printing classes"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
class UCharStringPrinter(StringPrinter):
"Print a UChar*; we must guess at the length"
def to_string(self):
return ustring_to_string(self.val)
class LCharStringPrinter(StringPrinter):
"Print a LChar*; we must guess at the length"
def to_string(self):
return lstring_to_string(self.val)
class WTFAtomicStringPrinter(StringPrinter):
"Print a WTF::AtomicString"
def to_string(self):
return self.val['m_string']
class WTFCStringPrinter(StringPrinter):
"Print a WTF::CString"
def to_string(self):
# The CString holds a buffer, which is a refptr to a WTF::CStringBuffer.
data = self.val['m_buffer']['m_ptr']['m_data'].cast(gdb.lookup_type('char').pointer())
length = self.val['m_buffer']['m_ptr']['m_length']
return ''.join([chr((data + i).dereference()) for i in xrange(length)])
class WTFStringImplPrinter(StringPrinter):
"Print a WTF::StringImpl"
def get_length(self):
return self.val['m_length']
def to_string(self):
if self.is_8bit():
return lstring_to_string(self.val['m_data8'], self.get_length())
return ustring_to_string(self.val['m_data16'], self.get_length())
def is_8bit(self):
return self.val['m_hashAndFlags'] & self.val['s_hashFlag8BitBuffer']
class WTFStringPrinter(StringPrinter):
"Print a WTF::String"
def stringimpl_ptr(self):
return self.val['m_impl']['m_ptr']
def get_length(self):
if not self.stringimpl_ptr():
return 0
return WTFStringImplPrinter(self.stringimpl_ptr().dereference()).get_length()
def to_string(self):
if not self.stringimpl_ptr():
return '(null)'
return self.stringimpl_ptr().dereference()
class JSCIdentifierPrinter(StringPrinter):
"Print a JSC::Identifier"
def to_string(self):
return WTFStringPrinter(self.val['m_string']).to_string()
class JSCJSStringPrinter(StringPrinter):
"Print a JSC::JSString"
def to_string(self):
if self.val['m_length'] == 0:
return ''
return WTFStringImplPrinter(self.val['m_value']).to_string()
class WebCoreKURLGooglePrivatePrinter(StringPrinter):
"Print a WebCore::KURLGooglePrivate"
def to_string(self):
return WTFCStringPrinter(self.val['m_utf8']).to_string()
class WebCoreQualifiedNamePrinter(StringPrinter):
"Print a WebCore::QualifiedName"
def __init__(self, val):
super(WebCoreQualifiedNamePrinter, self).__init__(val)
self.prefix_length = 0
self.length = 0
if self.val['m_impl']:
self.prefix_printer = WTFStringPrinter(
self.val['m_impl']['m_prefix']['m_string'])
self.local_name_printer = WTFStringPrinter(
self.val['m_impl']['m_localName']['m_string'])
self.prefix_length = self.prefix_printer.get_length()
if self.prefix_length > 0:
self.length = (self.prefix_length + 1 +
self.local_name_printer.get_length())
else:
self.length = self.local_name_printer.get_length()
def get_length(self):
return self.length
def to_string(self):
if self.get_length() == 0:
return "(null)"
else:
if self.prefix_length > 0:
return (self.prefix_printer.to_string() + ":" +
self.local_name_printer.to_string())
else:
return self.local_name_printer.to_string()
class WTFVectorPrinter:
"""Pretty Printer for a WTF::Vector.
The output of this pretty printer is similar to the output of std::vector's
pretty printer, which is bundled in gcc.
Example gdb session should look like:
(gdb) p v
$3 = WTF::Vector of length 7, capacity 16 = {7, 17, 27, 37, 47, 57, 67}
(gdb) set print elements 3
(gdb) p v
$6 = WTF::Vector of length 7, capacity 16 = {7, 17, 27...}
(gdb) set print array
(gdb) p v
$7 = WTF::Vector of length 7, capacity 16 = {
7,
17,
27
...
}
(gdb) set print elements 200
(gdb) p v
$8 = WTF::Vector of length 7, capacity 16 = {
7,
17,
27,
37,
47,
57,
67
}
"""
class Iterator:
def __init__(self, start, finish):
self.item = start
self.finish = finish
self.count = 0
def __iter__(self):
return self
def next(self):
if self.item == self.finish:
raise StopIteration
count = self.count
self.count += 1
element = self.item.dereference()
self.item += 1
return ('[%d]' % count, element)
def __init__(self, val):
self.val = val
def children(self):
start = self.val['m_buffer']
return self.Iterator(start, start + self.val['m_size'])
def to_string(self):
return ('%s of length %d, capacity %d'
% ('WTF::Vector', self.val['m_size'], self.val['m_capacity']))
def display_hint(self):
return 'array'
def add_pretty_printers():
pretty_printers = (
(re.compile("^WTF::Vector<.*>$"), WTFVectorPrinter),
(re.compile("^WTF::AtomicString$"), WTFAtomicStringPrinter),
(re.compile("^WTF::CString$"), WTFCStringPrinter),
(re.compile("^WTF::String$"), WTFStringPrinter),
(re.compile("^WTF::StringImpl$"), WTFStringImplPrinter),
(re.compile("^WebCore::KURLGooglePrivate$"), WebCoreKURLGooglePrivatePrinter),
(re.compile("^WebCore::QualifiedName$"), WebCoreQualifiedNamePrinter),
(re.compile("^JSC::Identifier$"), JSCIdentifierPrinter),
(re.compile("^JSC::JSString$"), JSCJSStringPrinter),
)
def lookup_function(val):
"""Function used to load pretty printers; will be passed to GDB."""
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
tag = type.tag
if tag:
for function, pretty_printer in pretty_printers:
if function.search(tag):
return pretty_printer(val)
if type.code == gdb.TYPE_CODE_PTR:
name = str(type.target().unqualified())
if name == 'UChar':
return UCharStringPrinter(val)
if name == 'LChar':
return LCharStringPrinter(val)
return None
gdb.pretty_printers.append(lookup_function)
add_pretty_printers()
class PrintPathToRootCommand(gdb.Command):
"""Command for printing WebKit Node trees.
Usage: printpathtoroot variable_name"""
def __init__(self):
super(PrintPathToRootCommand, self).__init__("printpathtoroot",
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
element_type = gdb.lookup_type('WebCore::Element')
node_type = gdb.lookup_type('WebCore::Node')
frame = gdb.selected_frame()
try:
val = gdb.Frame.read_var(frame, arg)
except:
print "No such variable, or invalid type"
return
target_type = str(val.type.target().strip_typedefs())
if target_type == str(node_type):
stack = []
while val:
stack.append([val,
val.cast(element_type.pointer()).dereference()['m_tagName']])
val = val.dereference()['m_parent']
padding = ''
while len(stack) > 0:
pair = stack.pop()
print padding, pair[1], pair[0]
padding = padding + ' '
else:
print 'Sorry: I don\'t know how to deal with %s yet.' % target_type
PrintPathToRootCommand()
|
rsennrich/nematus | refs/heads/master | nematus/learning_schedule.py | 1 | import tensorflow as tf
"""Implements a trivial learning schedule with a fixed learning rate."""
class ConstantSchedule(object):
def __init__(self, learning_rate):
self._learning_rate = tf.constant(learning_rate)
@property
def learning_rate(self):
return self._learning_rate
"""Implements the learning schedule from the original Transformer paper.
See Section 5.3 of "Attention Is All You Need" (Vaswani et al., 2017).
"""
class TransformerSchedule(object):
def __init__(self, global_step, dim, warmup_steps):
t = tf.cast(global_step+1, tf.float32)
a = tf.pow(t, -0.5)
b = t * (warmup_steps ** (-1.5))
self._learning_rate = dim ** (-0.5) * tf.minimum(a, b)
@property
def learning_rate(self):
return self._learning_rate
|
xushuwei202/Vintageous | refs/heads/master | ex/parser/scanner_command_tab_only_command.py | 9 | from .state import EOF
from .tokens import TokenEof
from .tokens_base import TOKEN_COMMAND_TAB_ONLY_COMMAND
from .tokens_base import TokenOfCommand
from Vintageous import ex
@ex.command('tabonly', 'tabo')
class TokenTabOnlyCommand(TokenOfCommand):
def __init__(self, *args, **kwargs):
super().__init__([],
TOKEN_COMMAND_TAB_ONLY_COMMAND,
'tabonly', *args, **kwargs)
self.target_command = 'ex_tabonly'
def scan_command_tab_only_command(state):
c = state.consume()
if c == EOF:
return None, [TokenTabOnlyCommand(), TokenEof()]
bang = c == '!'
return None, [TokenTabOnlyCommand(forced=bang), TokenEof()]
|
hurricup/intellij-community | refs/heads/master | python/testData/override/dunderNew.py | 79 | class BaseMeta(type):
def __new__(cls, name, bases, namespace):
return super(BaseMeta, cls).__new__(cls, name, bases, namespace)
class MyMeta(BaseMeta):
<caret>pass |
heli522/scikit-learn | refs/heads/master | doc/tutorial/text_analytics/data/movie_reviews/fetch_data.py | 278 | """Script to download the movie review dataset"""
import os
import tarfile
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
URL = ("http://www.cs.cornell.edu/people/pabo/"
"movie-review-data/review_polarity.tar.gz")
ARCHIVE_NAME = URL.rsplit('/', 1)[1]
DATA_FOLDER = "txt_sentoken"
if not os.path.exists(DATA_FOLDER):
if not os.path.exists(ARCHIVE_NAME):
print("Downloading dataset from %s (3 MB)" % URL)
opener = urlopen(URL)
open(ARCHIVE_NAME, 'wb').write(opener.read())
print("Decompressing %s" % ARCHIVE_NAME)
tarfile.open(ARCHIVE_NAME, "r:gz").extractall(path='.')
os.remove(ARCHIVE_NAME)
|
helenst/django | refs/heads/master | django/utils/html_parser.py | 18 | from django.utils.six.moves import html_parser as _html_parser
import re
import sys
current_version = sys.version_info
use_workaround = (
(current_version < (2, 7, 3)) or
(current_version >= (3, 0) and current_version < (3, 2, 3))
)
try:
HTMLParseError = _html_parser.HTMLParseError
except AttributeError:
# create a dummy class for Python 3.5+ where it's been removed
class HTMLParseError(Exception):
pass
if not use_workaround:
if current_version >= (3, 4):
class HTMLParser(_html_parser.HTMLParser):
"""Explicitly set convert_charrefs to be False.
This silences a deprecation warning on Python 3.4, but we can't do
it at call time because Python 2.7 does not have the keyword
argument.
"""
def __init__(self, convert_charrefs=False, **kwargs):
_html_parser.HTMLParser.__init__(self, convert_charrefs=convert_charrefs, **kwargs)
else:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
|
petrjasek/superdesk-ntb | refs/heads/master | server/ntb/io/feed_parsers/ritzau.py | 2 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013 - 2018 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.io.feed_parsers.ritzau import RitzauFeedParser
from superdesk.io.registry import register_feed_parser
from superdesk.errors import ParserError
from . import utils
class RitzauFeedParser(RitzauFeedParser):
"""
Feed Parser which can parse Ritzau XML feed
"""
_subjects_map = None
NAME = 'ntb_ritzau'
label = "NTB Ritzau feed"
def parse(self, xml, provider=None):
item = super().parse(xml, provider)
try:
category = utils.ingest_category_from_subject(item.get('subject'))
item.setdefault('subject', []).append(category)
utils.set_default_service(item)
except Exception as ex:
raise ParserError.parseMessageError(ex, provider)
return item
register_feed_parser(RitzauFeedParser.NAME, RitzauFeedParser())
|
nhomar/odoo | refs/heads/8.0 | addons/mass_mailing/wizard/test_mailing.py | 148 | # -*- coding: utf-8 -*-
from openerp import tools
from openerp.osv import osv, fields
class TestMassMailing(osv.TransientModel):
_name = 'mail.mass_mailing.test'
_description = 'Sample Mail Wizard'
_columns = {
'email_to': fields.char('Recipients', required=True,
help='Comma-separated list of email addresses.'),
'mass_mailing_id': fields.many2one('mail.mass_mailing', 'Mailing', required=True),
}
_defaults = {
'email_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
}
def send_mail_test(self, cr, uid, ids, context=None):
Mail = self.pool['mail.mail']
for wizard in self.browse(cr, uid, ids, context=context):
mailing = wizard.mass_mailing_id
test_emails = tools.email_split(wizard.email_to)
mail_ids = []
for test_mail in test_emails:
mail_values = {
'email_from': mailing.email_from,
'reply_to': mailing.reply_to,
'email_to': test_mail,
'subject': mailing.name,
'body_html': '',
'notification': True,
'mailing_id': mailing.id,
}
mail_mail_obj = Mail.browse(cr, uid, Mail.create(cr, uid, mail_values, context=context), context=context)
unsubscribe_url = Mail._get_unsubscribe_url(cr, uid, mail_mail_obj, test_mail, context=context)
body = tools.append_content_to_html(mailing.body_html, unsubscribe_url, plaintext=False, container_tag='p')
Mail.write(cr, uid, mail_mail_obj.id, {'body_html': mailing.body_html}, context=context)
mail_ids.append(mail_mail_obj.id)
Mail.send(cr, uid, mail_ids, context=context)
self.pool['mail.mass_mailing'].write(cr, uid, [mailing.id], {'state': 'test'}, context=context)
return True
|
manazhao/tf_recsys | refs/heads/r1.0 | tensorflow/contrib/layers/python/layers/utils_test.py | 65 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConstantValueTest(test.TestCase):
def test_value(self):
for v in [True, False, 1, 0, 1.0]:
value = utils.constant_value(v)
self.assertEqual(value, v)
def test_constant(self):
for v in [True, False, 1, 0, 1.0]:
c = constant_op.constant(v)
value = utils.constant_value(c)
self.assertEqual(value, v)
with self.test_session():
self.assertEqual(c.eval(), v)
def test_variable(self):
for v in [True, False, 1, 0, 1.0]:
with ops.Graph().as_default() as g, self.test_session(g) as sess:
x = variables.Variable(v)
value = utils.constant_value(x)
self.assertEqual(value, None)
sess.run(variables.global_variables_initializer())
self.assertEqual(x.eval(), v)
def test_placeholder(self):
for v in [True, False, 1, 0, 1.0]:
p = array_ops.placeholder(np.dtype(type(v)), [])
x = array_ops.identity(p)
value = utils.constant_value(p)
self.assertEqual(value, None)
with self.test_session():
self.assertEqual(x.eval(feed_dict={p: v}), v)
class StaticCondTest(test.TestCase):
def test_value(self):
fn1 = lambda: 'fn1'
fn2 = lambda: 'fn2'
expected = lambda v: 'fn1' if v else 'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
self.assertEqual(o, expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(), expected(v))
class SmartCondStaticTest(test.TestCase):
def test_value(self):
fn1 = lambda: 'fn1'
fn2 = lambda: 'fn2'
expected = lambda v: 'fn1' if v else 'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
self.assertEqual(o, expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(), expected(v))
class SmartCondDynamicTest(test.TestCase):
def test_value(self):
fn1 = lambda: ops.convert_to_tensor('fn1')
fn2 = lambda: ops.convert_to_tensor('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
class CollectNamedOutputsTest(test.TestCase):
def test_collect(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
self.assertEqual(ops.get_collection('end_points'), [t1, t2])
def test_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
self.assertEqual(t1.aliases, ['a1'])
self.assertEqual(t2.aliases, ['a2'])
def test_multiple_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a11', t1)
utils.collect_named_outputs('end_points', 'a12', t1)
utils.collect_named_outputs('end_points', 'a21', t2)
utils.collect_named_outputs('end_points', 'a22', t2)
self.assertEqual(t1.aliases, ['a11', 'a12'])
self.assertEqual(t2.aliases, ['a21', 'a22'])
def test_gather_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
t3 = constant_op.constant(2.0, name='t3')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
ops.add_to_collection('end_points', t3)
aliases = utils.gather_tensors_aliases(ops.get_collection('end_points'))
self.assertEqual(aliases, ['a1', 'a2', 't3'])
def test_convert_collection_to_dict(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a21', t2)
utils.collect_named_outputs('end_points', 'a22', t2)
end_points = utils.convert_collection_to_dict('end_points')
self.assertEqual(end_points['a1'], t1)
self.assertEqual(end_points['a21'], t2)
self.assertEqual(end_points['a22'], t2)
class NPositiveIntegersTest(test.TestCase):
def test_invalid_input(self):
with self.assertRaises(ValueError):
utils.n_positive_integers('3', [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(3.3, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(-1, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(0, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [1, 2])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [-1])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [0])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [0])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, [1, 2, 3])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, ['hello', 2])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, tensor_shape.TensorShape([2, 3, 1]))
with self.assertRaises(ValueError):
utils.n_positive_integers(3, tensor_shape.TensorShape([2, None, 1]))
with self.assertRaises(ValueError):
utils.n_positive_integers(3, tensor_shape.TensorShape(None))
def test_valid_input(self):
self.assertEqual(utils.n_positive_integers(1, 2), (2,))
self.assertEqual(utils.n_positive_integers(2, 2), (2, 2))
self.assertEqual(utils.n_positive_integers(2, (2, 3)), (2, 3))
self.assertEqual(utils.n_positive_integers(3, (2, 3, 1)), (2, 3, 1))
self.assertEqual(utils.n_positive_integers(3, (2, 3, 1)), (2, 3, 1))
self.assertEqual(
utils.n_positive_integers(3, tensor_shape.TensorShape([2, 3, 1])),
(2, 3, 1))
if __name__ == '__main__':
test.main()
|
Wuteyan/VTK | refs/heads/master | Examples/Infovis/Python/geoview.py | 8 | from vtk import *
import os.path
data_dir = "../../../../VTKData/Data/Infovis/Images/"
if not os.path.exists(data_dir):
data_dir = "../../../../../VTKData/Data/Infovis/Images/"
source = vtkGeoRandomGraphSource()
source.DirectedOff()
source.SetNumberOfVertices(100)
source.SetEdgeProbability(0.00) # produces a tree
source.SetUseEdgeProbability(True)
source.AllowParallelEdgesOn()
source.AllowSelfLoopsOn()
source.SetStartWithTree(True)
# Create a 3D geospatial view
view = vtkGeoView()
view.GetRenderWindow().SetSize(600, 600)
# Create the background image
reader = vtkJPEGReader()
reader.SetFileName(data_dir + "NE2_ps_bath.jpg")
reader.Update()
view.AddDefaultImageRepresentation(reader.GetOutput())
# Create graph
graph_rep = vtkRenderedGraphRepresentation()
graph_rep.SetInputConnection(source.GetOutputPort())
graph_rep.SetVertexColorArrayName("vertex id")
graph_rep.ColorVerticesByArrayOn()
graph_rep.SetEdgeColorArrayName("edge id")
graph_rep.ColorEdgesByArrayOn()
graph_rep.SetVertexLabelArrayName("vertex id")
graph_rep.VertexLabelVisibilityOn()
graph_rep.SetLayoutStrategyToAssignCoordinates("longitude", "latitude", None)
strategy = vtkGeoEdgeStrategy()
strategy.SetExplodeFactor(.1)
graph_rep.SetEdgeLayoutStrategy(strategy)
view.AddRepresentation(graph_rep)
# Make a normal graph layout view
view2 = vtkGraphLayoutView()
view2.GetRenderWindow().SetSize(600, 600)
view2.AddRepresentationFromInputConnection(source.GetOutputPort())
view2.SetVertexColorArrayName("vertex id")
view2.ColorVerticesOn()
view2.SetEdgeColorArrayName("edge id")
view2.ColorEdgesOn()
view2.SetVertexLabelArrayName("vertex id")
view2.VertexLabelVisibilityOn()
# Apply a theme to the views
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(4)
theme.SetPointSize(8)
theme.SetCellSaturationRange(.5,.5)
theme.SetSelectedCellColor(1,0,1)
theme.SetSelectedPointColor(1,0,1)
view.ApplyViewTheme(theme)
graph_rep.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
link = vtkAnnotationLink()
graph_rep.SetAnnotationLink(link)
view2.GetRepresentation(0).SetAnnotationLink(link)
updater = vtkViewUpdater()
updater.AddView(view)
updater.AddView(view2)
view.ResetCamera()
view2.ResetCamera()
view.Render()
view2.Render()
view.GetInteractor().Initialize()
view.GetInteractor().Start()
|
levkar/odoo | refs/heads/10.0 | addons/board/models/__init__.py | 23 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import board
|
liberatorqjw/scikit-learn | refs/heads/master | benchmarks/bench_plot_fastkmeans.py | 294 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
|
knowsis/django | refs/heads/nonrel-1.6 | django/contrib/gis/gdal/geomtype.py | 219 | from django.contrib.gis.gdal.error import OGRException
from django.utils import six
#### OGRGeomType ####
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0 : 'Unknown',
1 : 'Point',
2 : 'LineString',
3 : 'Polygon',
4 : 'MultiPoint',
5 : 'MultiLineString',
6 : 'MultiPolygon',
7 : 'GeometryCollection',
100 : 'None',
101 : 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit : 'MultiLineString25D',
6 + wkb25bit : 'MultiPolygon25D',
7 + wkb25bit : 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = dict([(v.lower(), k) for k, v in _types.items()])
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, six.string_types):
type_input = type_input.lower()
if type_input == 'geometry': type_input='unknown'
num = self._str_types.get(type_input, None)
if num is None:
raise OGRException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if not type_input in self._types:
raise OGRException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, six.string_types):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
|
DavidLP/home-assistant | refs/heads/dev | homeassistant/components/rocketchat/notify.py | 7 | """Rocket.Chat notification service."""
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD, CONF_ROOM, CONF_URL, CONF_USERNAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (ATTR_DATA, PLATFORM_SCHEMA,
BaseNotificationService)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_URL): vol.Url(),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_ROOM): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Return the notify service."""
from rocketchat_API.APIExceptions.RocketExceptions import (
RocketConnectionException, RocketAuthenticationException)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
url = config.get(CONF_URL)
room = config.get(CONF_ROOM)
try:
return RocketChatNotificationService(url, username, password, room)
except RocketConnectionException:
_LOGGER.warning(
"Unable to connect to Rocket.Chat server at %s", url)
except RocketAuthenticationException:
_LOGGER.warning(
"Rocket.Chat authentication failed for user %s", username)
_LOGGER.info("Please check your username/password")
return None
class RocketChatNotificationService(BaseNotificationService):
"""Implement the notification service for Rocket.Chat."""
def __init__(self, url, username, password, room):
"""Initialize the service."""
from rocketchat_API.rocketchat import RocketChat
self._room = room
self._server = RocketChat(username, password, server_url=url)
def send_message(self, message="", **kwargs):
"""Send a message to Rocket.Chat."""
data = kwargs.get(ATTR_DATA) or {}
resp = self._server.chat_post_message(
message, channel=self._room, **data)
if resp.status_code == 200:
success = resp.json()["success"]
if not success:
_LOGGER.error("Unable to post Rocket.Chat message")
else:
_LOGGER.error("Incorrect status code when posting message: %d",
resp.status_code)
|
tntnatbry/tensorflow | refs/heads/master | tensorflow/python/debug/cli/curses_ui.py | 6 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Curses-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import curses
from curses import textpad
import signal
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import base_ui
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
_SCROLL_REFRESH = "refresh"
_SCROLL_UP = "up"
_SCROLL_DOWN = "down"
_SCROLL_UP_A_LINE = "up_a_line"
_SCROLL_DOWN_A_LINE = "down_a_line"
_SCROLL_HOME = "home"
_SCROLL_END = "end"
_SCROLL_TO_LINE_INDEX = "scroll_to_line_index"
def _get_command_from_line_attr_segs(mouse_x, attr_segs):
"""Attempt to extract command from the attribute segments of a line.
Args:
mouse_x: (int) x coordinate of the mouse event.
attr_segs: (list) The list of attribute segments of a line from a
RichTextLines object.
Returns:
(str or None) If a command exists: the command as a str; otherwise, None.
"""
for seg in attr_segs:
if seg[0] <= mouse_x < seg[1]:
attributes = seg[2] if isinstance(seg[2], list) else [seg[2]]
for attr in attributes:
if isinstance(attr, debugger_cli_common.MenuItem):
return attr.content
class ScrollBar(object):
"""Vertical ScrollBar for Curses-based CLI.
An object of this class has knowledge of the location of the scroll bar
in the screen coordinates, the current scrolling position, and the total
number of text lines in the screen text. By using this information, it
can generate text rendering of the scroll bar, which consists of and UP
button on the top and a DOWN button on the bottom, in addition to a scroll
block in between, whose exact location is determined by the scrolling
position. The object can also calculate the scrolling command (e.g.,
_SCROLL_UP_A_LINE, _SCROLL_DOWN) from the coordinate of a mouse click
event in the screen region it occupies.
"""
BASE_ATTR = "black_on_white"
def __init__(self,
min_x,
min_y,
max_x,
max_y,
scroll_position,
output_num_rows):
"""Constructor of ScrollBar.
Args:
min_x: (int) left index of the scroll bar on the screen (inclusive).
min_y: (int) top index of the scroll bar on the screen (inclusive).
max_x: (int) right index of the scroll bar on the screen (inclusive).
max_y: (int) bottom index of the scroll bar on the screen (inclusive).
scroll_position: (int) 0-based location of the screen output. For example,
if the screen output is scrolled to the top, the value of
scroll_position should be 0. If it is scrolled to the bottom, the value
should be output_num_rows - 1.
output_num_rows: (int) Total number of output rows.
Raises:
ValueError: If the width or height of the scroll bar, as determined
by min_x, max_x, min_y and max_y, is too small.
"""
self._min_x = min_x
self._min_y = min_y
self._max_x = max_x
self._max_y = max_y
self._scroll_position = scroll_position
self._output_num_rows = output_num_rows
self._scroll_bar_height = max_y - min_y + 1
if self._max_x < self._min_x:
raise ValueError("Insufficient width for ScrollBar (%d)" %
(self._max_x - self._min_x + 1))
if self._max_y < self._min_y + 3:
raise ValueError("Insufficient height for ScrollBar (%d)" %
(self._max_y - self._min_y + 1))
def _block_y(self):
"""Get the 0-based y coordinate of the scroll block.
This y coordinate takes into account the presence of the UP and DN buttons
present at the top and bottom of the ScrollBar. For example, at the home
location, the return value will be 1; at the bottom location, the return
value will be self._scroll_bar_height - 2.
Returns:
(int) 0-based y coordinate of the scroll block, in the ScrollBar
coordinate system, i.e., not the screen coordinate system. For example,
when scroll position is at the top, this return value will be 1 (not 0,
because of the presence of the UP button). When scroll position is at
the bottom, this return value will be self._scroll_bar_height - 2
(not self._scroll_bar_height - 1, because of the presence of the DOWN
button).
"""
return int(float(self._scroll_position) / (self._output_num_rows - 1) *
(self._scroll_bar_height - 3)) + 1
def layout(self):
"""Get the RichTextLines layout of the scroll bar.
Returns:
(debugger_cli_common.RichTextLines) The text layout of the scroll bar.
"""
width = self._max_x - self._min_x + 1
empty_line = " " * width
foreground_font_attr_segs = [(0, width, self.BASE_ATTR)]
if self._output_num_rows > 1:
block_y = self._block_y()
if width == 1:
up_text = "U"
down_text = "D"
elif width == 2:
up_text = "UP"
down_text = "DN"
elif width == 3:
up_text = "UP "
down_text = "DN "
else:
up_text = " UP "
down_text = "DOWN"
layout = debugger_cli_common.RichTextLines(
[up_text], font_attr_segs={0: [(0, width, self.BASE_ATTR)]})
for i in xrange(1, self._scroll_bar_height - 1):
font_attr_segs = foreground_font_attr_segs if i == block_y else None
layout.append(empty_line, font_attr_segs=font_attr_segs)
layout.append(down_text, font_attr_segs=foreground_font_attr_segs)
else:
layout = debugger_cli_common.RichTextLines(
[empty_line] * self._scroll_bar_height)
return layout
def get_click_command(self, mouse_y):
# TODO(cais): Support continuous scrolling when the mouse button is held
# down.
if self._output_num_rows <= 1:
return None
elif mouse_y == self._min_y:
return _SCROLL_UP_A_LINE
elif mouse_y == self._max_y:
return _SCROLL_DOWN_A_LINE
elif mouse_y > self._block_y() and mouse_y < self._max_y:
return _SCROLL_DOWN
elif mouse_y < self._block_y() and mouse_y > self._min_y:
return _SCROLL_UP
else:
return None
class CursesUI(base_ui.BaseUI):
"""Curses-based Command-line UI.
In this class, the methods with the prefix "_screen_" are the methods that
interact with the actual terminal using the curses library.
"""
CLI_TERMINATOR_KEY = 7 # Terminator key for input text box.
CLI_TAB_KEY = ord("\t")
BACKSPACE_KEY = ord("\b")
REGEX_SEARCH_PREFIX = "/"
TENSOR_INDICES_NAVIGATION_PREFIX = "@"
# Limit screen width to work around the limitation of the curses library that
# it may return invalid x coordinates for large values.
_SCREEN_WIDTH_LIMIT = 220
# Possible Enter keys. 343 is curses key code for the num-pad Enter key when
# num lock is off.
CLI_CR_KEYS = [ord("\n"), ord("\r"), 343]
_FOREGROUND_COLORS = {
"white": curses.COLOR_WHITE,
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"cyan": curses.COLOR_CYAN,
"magenta": curses.COLOR_MAGENTA,
"black": curses.COLOR_BLACK,
}
_BACKGROUND_COLORS = {
"white": curses.COLOR_WHITE,
"black": curses.COLOR_BLACK,
}
# Font attribute for search and highlighting.
_SEARCH_HIGHLIGHT_FONT_ATTR = "black_on_white"
_ARRAY_INDICES_COLOR_PAIR = "black_on_white"
_ERROR_TOAST_COLOR_PAIR = "red_on_white"
_INFO_TOAST_COLOR_PAIR = "blue_on_white"
_STATUS_BAR_COLOR_PAIR = "black_on_white"
_UI_WAIT_COLOR_PAIR = "magenta_on_white"
_UI_WAIT_MESSAGE = "Processing..."
def __init__(self, on_ui_exit=None):
"""Constructor of CursesUI.
Args:
on_ui_exit: (Callable) Callback invoked when the UI exits.
"""
base_ui.BaseUI.__init__(self, on_ui_exit=on_ui_exit)
self._screen_init()
self._screen_refresh_size()
# TODO(cais): Error out if the size of the screen is too small.
# Initialize some UI component size and locations.
self._init_layout()
self._command_history_store = debugger_cli_common.CommandHistory()
# Active list of command history, used in history navigation.
# _command_handler_registry holds all the history commands the CLI has
# received, up to a size limit. _active_command_history is the history
# currently being navigated in, e.g., using the Up/Down keys. The latter
# can be different from the former during prefixed or regex-based history
# navigation, e.g., when user enter the beginning of a command and hit Up.
self._active_command_history = []
# Pointer to the current position in the history sequence.
# 0 means it is a new command being keyed in.
self._command_pointer = 0
self._command_history_limit = 100
self._pending_command = ""
# State related to screen output.
self._output_pad = None
self._output_pad_row = 0
self._output_array_pointer_indices = None
self._curr_unwrapped_output = None
self._curr_wrapped_output = None
# Register signal handler for SIGINT.
signal.signal(signal.SIGINT, self._interrupt_handler)
self.register_command_handler(
"mouse",
self._mouse_mode_command_handler,
"Get or set the mouse mode of this CLI: (on|off)",
prefix_aliases=["m"])
def _init_layout(self):
"""Initialize the layout of UI components.
Initialize the location and size of UI components such as command textbox
and output region according to the terminal size.
"""
# NamedTuple for rectangular locations on screen
self.rectangle = collections.namedtuple("rectangle",
"top left bottom right")
# Height of command text box
self._command_textbox_height = 2
self._title_row = 0
# Top row index of the output pad.
# A "pad" is a curses object that holds lines of text and not limited to
# screen size. It can be rendered on the screen partially with scroll
# parameters specified.
self._output_top_row = 1
# Number of rows that the output pad has.
self._output_num_rows = (
self._max_y - self._output_top_row - self._command_textbox_height - 1)
# Row index of scroll information line: Taking into account the zero-based
# row indexing and the command textbox area under the scroll information
# row.
self._output_scroll_row = self._max_y - 1 - self._command_textbox_height
# Tab completion bottom row.
self._candidates_top_row = self._output_scroll_row - 4
self._candidates_bottom_row = self._output_scroll_row - 1
# Maximum number of lines the candidates display can have.
self._candidates_max_lines = int(self._output_num_rows / 2)
self.max_output_lines = 10000
# Regex search state.
self._curr_search_regex = None
self._unwrapped_regex_match_lines = []
# Size of view port on screen, which is always smaller or equal to the
# screen size.
self._output_pad_screen_height = self._output_num_rows - 1
self._output_pad_screen_width = self._max_x - 2
self._output_pad_screen_location = self.rectangle(
top=self._output_top_row,
left=0,
bottom=self._output_top_row + self._output_num_rows,
right=self._output_pad_screen_width)
def _screen_init(self):
"""Screen initialization.
Creates curses stdscr and initialize the color pairs for display.
"""
self._stdscr = curses.initscr()
self._command_window = None
# Prepare color pairs.
curses.start_color()
self._color_pairs = {}
color_index = 0
for fg_color in self._FOREGROUND_COLORS:
for bg_color in self._BACKGROUND_COLORS:
color_index += 1
curses.init_pair(color_index, self._FOREGROUND_COLORS[fg_color],
self._BACKGROUND_COLORS[bg_color])
color_name = fg_color
if bg_color != "black":
color_name += "_on_" + bg_color
self._color_pairs[color_name] = curses.color_pair(color_index)
# A_BOLD or A_BLINK is not really a "color". But place it here for
# convenience.
self._color_pairs["bold"] = curses.A_BOLD
self._color_pairs["blink"] = curses.A_BLINK
self._color_pairs["underline"] = curses.A_UNDERLINE
# Default color pair to use when a specified color pair does not exist.
self._default_color_pair = self._color_pairs["white"]
def _screen_launch(self, enable_mouse_on_start):
"""Launch the curses screen."""
curses.noecho()
curses.cbreak()
self._stdscr.keypad(1)
self._mouse_enabled = enable_mouse_on_start
self._screen_set_mousemask()
self._screen_create_command_window()
def _screen_create_command_window(self):
"""Create command window according to screen size."""
if self._command_window:
del self._command_window
self._command_window = curses.newwin(
self._command_textbox_height, self._max_x - len(self.CLI_PROMPT),
self._max_y - self._command_textbox_height, len(self.CLI_PROMPT))
def _screen_refresh(self):
self._stdscr.refresh()
def _screen_terminate(self):
"""Terminate the curses screen."""
self._stdscr.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
# Remove SIGINT handler.
signal.signal(signal.SIGINT, signal.SIG_DFL)
def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details."""
self._screen_launch(enable_mouse_on_start=enable_mouse_on_start)
# Optional initial command.
if init_command is not None:
self._dispatch_command(init_command)
if title is not None:
self._title(title, title_color=title_color)
# CLI main loop.
exit_token = self._ui_loop()
if self._on_ui_exit:
self._on_ui_exit()
self._screen_terminate()
return exit_token
def get_help(self):
return self._command_handler_registry.get_help()
def _screen_create_command_textbox(self, existing_command):
"""Create command textbox on screen.
Args:
existing_command: (str) A command string to put in the textbox right
after its creation.
"""
# Display the tfdbg prompt.
self._stdscr.addstr(self._max_y - self._command_textbox_height, 0,
self.CLI_PROMPT, curses.A_BOLD)
self._stdscr.refresh()
self._command_window.clear()
# Command text box.
self._command_textbox = textpad.Textbox(
self._command_window, insert_mode=True)
# Enter existing command.
self._auto_key_in(existing_command)
def _ui_loop(self):
"""Command-line UI loop.
Returns:
An exit token of arbitrary type. The token can be None.
"""
while True:
# Enter history command if pointer is in history (> 0):
if self._command_pointer > 0:
existing_command = self._active_command_history[-self._command_pointer]
else:
existing_command = self._pending_command
self._screen_create_command_textbox(existing_command)
try:
command, terminator, pending_command_changed = self._get_user_command()
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
if not command and terminator != self.CLI_TAB_KEY:
continue
if terminator in self.CLI_CR_KEYS or terminator == curses.KEY_MOUSE:
exit_token = self._dispatch_command(command)
if exit_token is not None:
return exit_token
elif terminator == self.CLI_TAB_KEY:
tab_completed = self._tab_complete(command)
self._pending_command = tab_completed
self._cmd_ptr = 0
elif pending_command_changed:
self._pending_command = command
return
def _get_user_command(self):
"""Get user command from UI.
Returns:
command: (str) The user-entered command.
terminator: (str) Terminator type for the command.
If command is a normal command entered with the Enter key, the value
will be the key itself. If this is a tab completion call (using the
Tab key), the value will reflect that as well.
pending_command_changed: (bool) If the pending command has changed.
Used during command history navigation.
"""
# First, reset textbox state variables.
self._textbox_curr_terminator = None
self._textbox_pending_command_changed = False
command = self._screen_get_user_command()
command = self._strip_terminator(command)
return (command, self._textbox_curr_terminator,
self._textbox_pending_command_changed)
def _screen_get_user_command(self):
return self._command_textbox.edit(validate=self._on_textbox_keypress)
def _strip_terminator(self, command):
if not command:
return command
for v in self.CLI_CR_KEYS:
if v < 256:
command = command.replace(chr(v), "")
return command.strip()
def _screen_refresh_size(self):
self._max_y, self._max_x = self._stdscr.getmaxyx()
if self._max_x > self._SCREEN_WIDTH_LIMIT:
self._max_x = self._SCREEN_WIDTH_LIMIT
def _dispatch_command(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if self._output_pad:
self._toast(self._UI_WAIT_MESSAGE, color=self._UI_WAIT_COLOR_PAIR)
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
if command:
self._command_history_store.add_command(command)
if (command.startswith(self.REGEX_SEARCH_PREFIX) and
self._curr_unwrapped_output):
if len(command) > len(self.REGEX_SEARCH_PREFIX):
# Command is like "/regex". Perform regex search.
regex = command[len(self.REGEX_SEARCH_PREFIX):]
self._curr_search_regex = regex
self._display_output(self._curr_unwrapped_output, highlight_regex=regex)
elif self._unwrapped_regex_match_lines:
# Command is "/". Continue scrolling down matching lines.
self._display_output(
self._curr_unwrapped_output,
is_refresh=True,
highlight_regex=self._curr_search_regex)
self._command_pointer = 0
self._pending_command = ""
return
elif command.startswith(self.TENSOR_INDICES_NAVIGATION_PREFIX):
indices_str = command[1:].strip()
if indices_str:
try:
indices = command_parser.parse_indices(indices_str)
omitted, line_index, _, _ = tensor_format.locate_tensor_element(
self._curr_wrapped_output, indices)
if not omitted:
self._scroll_output(
_SCROLL_TO_LINE_INDEX, line_index=line_index)
except Exception as e: # pylint: disable=broad-except
self._error_toast(str(e))
else:
self._error_toast("Empty indices.")
return
try:
prefix, args, output_file_path = self._parse_command(command)
except SyntaxError as e:
self._error_toast(str(e))
return
if not prefix:
# Empty command: take no action. Should not exit.
return
screen_info = {"cols": self._max_x}
exit_token = None
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=screen_info)
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
self.ERROR_MESSAGE_PREFIX + "Invalid command prefix \"%s\"" % prefix
])
# Clear active command history. Until next up/down history navigation
# occurs, it will stay empty.
self._active_command_history = []
if exit_token is not None:
return exit_token
self._display_output(screen_output)
if output_file_path:
try:
screen_output.write_to_file(output_file_path)
self._info_toast("Wrote output to %s" % output_file_path)
except Exception: # pylint: disable=broad-except
self._error_toast("Failed to write output to %s" % output_file_path)
self._command_pointer = 0
self._pending_command = ""
def _screen_gather_textbox_str(self):
"""Gather the text string in the command text box.
Returns:
(str) the current text string in the command textbox, excluding any
return keys.
"""
txt = self._command_textbox.gather()
return txt.strip()
def _on_textbox_keypress(self, x):
"""Text box key validator: Callback of key strokes.
Handles a user's keypress in the input text box. Translates certain keys to
terminator keys for the textbox to allow its edit() method to return.
Also handles special key-triggered events such as PgUp/PgDown scrolling of
the screen output.
Args:
x: (int) Key code.
Returns:
(int) A translated key code. In most cases, this is identical to the
input x. However, if x is a Return key, the return value will be
CLI_TERMINATOR_KEY, so that the text box's edit() method can return.
Raises:
TypeError: If the input x is not of type int.
debugger_cli_common.CommandLineExit: If a mouse-triggered command returns
an exit token when dispatched.
"""
if not isinstance(x, int):
raise TypeError("Key validator expected type int, received type %s" %
type(x))
if x in self.CLI_CR_KEYS:
# Make Enter key the terminator
self._textbox_curr_terminator = x
return self.CLI_TERMINATOR_KEY
elif x == self.CLI_TAB_KEY:
self._textbox_curr_terminator = self.CLI_TAB_KEY
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_PPAGE:
self._scroll_output(_SCROLL_UP_A_LINE)
return x
elif x == curses.KEY_NPAGE:
self._scroll_output(_SCROLL_DOWN_A_LINE)
return x
elif x == curses.KEY_HOME:
self._scroll_output(_SCROLL_HOME)
return x
elif x == curses.KEY_END:
self._scroll_output(_SCROLL_END)
return x
elif x in [curses.KEY_UP, curses.KEY_DOWN]:
# Command history navigation.
if not self._active_command_history:
hist_prefix = self._screen_gather_textbox_str()
self._active_command_history = (
self._command_history_store.lookup_prefix(
hist_prefix, self._command_history_limit))
if self._active_command_history:
if x == curses.KEY_UP:
if self._command_pointer < len(self._active_command_history):
self._command_pointer += 1
elif x == curses.KEY_DOWN:
if self._command_pointer > 0:
self._command_pointer -= 1
else:
self._command_pointer = 0
self._textbox_curr_terminator = x
# Force return from the textbox edit(), so that the textbox can be
# redrawn with a history command entered.
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_RESIZE:
# Respond to terminal resize.
self._screen_refresh_size()
self._init_layout()
self._screen_create_command_window()
self._redraw_output()
# Force return from the textbox edit(), so that the textbox can be
# redrawn.
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_MOUSE and self._mouse_enabled:
try:
_, mouse_x, mouse_y, _, mouse_event_type = self._screen_getmouse()
except curses.error:
mouse_event_type = None
if mouse_event_type == curses.BUTTON1_RELEASED:
# Logic for mouse-triggered scrolling.
if mouse_x >= self._max_x - 2:
scroll_command = self._scroll_bar.get_click_command(mouse_y)
if scroll_command is not None:
self._scroll_output(scroll_command)
return x
else:
command = self._fetch_hyperlink_command(mouse_x, mouse_y)
if command:
exit_token = self._dispatch_command(command)
if exit_token is not None:
raise debugger_cli_common.CommandLineExit(exit_token=exit_token)
else:
# Mark the pending command as modified.
self._textbox_pending_command_changed = True
# Invalidate active command history.
self._command_pointer = 0
self._active_command_history = []
return x
def _screen_getmouse(self):
return curses.getmouse()
def _redraw_output(self):
if self._curr_unwrapped_output is not None:
self._display_main_menu(self._curr_unwrapped_output)
self._display_output(self._curr_unwrapped_output, is_refresh=True)
def _fetch_hyperlink_command(self, mouse_x, mouse_y):
output_top = self._output_top_row
if self._main_menu_pad:
output_top += 1
if mouse_y == self._output_top_row and self._main_menu_pad:
# Click was in the menu bar.
return _get_command_from_line_attr_segs(mouse_x,
self._main_menu.font_attr_segs[0])
else:
absolute_mouse_y = mouse_y + self._output_pad_row - output_top
if absolute_mouse_y in self._curr_wrapped_output.font_attr_segs:
return _get_command_from_line_attr_segs(
mouse_x, self._curr_wrapped_output.font_attr_segs[absolute_mouse_y])
def _title(self, title, title_color=None):
"""Display title.
Args:
title: (str) The title to display.
title_color: (str) Color of the title, e.g., "yellow".
"""
# Pad input title str with "-" and space characters to make it pretty.
self._title_line = "--- %s " % title
if len(self._title_line) < self._max_x:
self._title_line += "-" * (self._max_x - len(self._title_line))
self._screen_draw_text_line(
self._title_row, self._title_line, color=title_color)
def _auto_key_in(self, command, erase_existing=False):
"""Automatically key in a command to the command Textbox.
Args:
command: The command, as a string.
erase_existing: (bool) whether existing text (if any) is to be erased
first.
"""
if erase_existing:
self._erase_existing_command()
for c in command:
self._command_textbox.do_command(ord(c))
def _erase_existing_command(self):
"""Erase existing text in command textpad."""
existing_len = len(self._command_textbox.gather())
for _ in xrange(existing_len):
self._command_textbox.do_command(self.BACKSPACE_KEY)
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
"""Render a line of text on the screen.
Args:
row: (int) Row index.
line: (str) The line content.
attr: curses font attribute.
color: (str) font foreground color name.
Raises:
TypeError: If row is not of type int.
"""
if not isinstance(row, int):
raise TypeError("Invalid type in row")
if len(line) > self._max_x:
line = line[:self._max_x]
color_pair = (self._default_color_pair if color is None else
self._color_pairs[color])
self._stdscr.addstr(row, 0, line, color_pair | attr)
self._screen_refresh()
def _screen_new_output_pad(self, rows, cols):
"""Generate a new pad on the screen.
Args:
rows: (int) Number of rows the pad will have: not limited to screen size.
cols: (int) Number of columns the pad will have: not limited to screen
size.
Returns:
A curses textpad object.
"""
return curses.newpad(rows, cols)
def _screen_display_output(self, output):
"""Actually render text output on the screen.
Wraps the lines according to screen width. Pad lines below according to
screen height so that the user can scroll the output to a state where
the last non-empty line is on the top of the screen. Then renders the
lines on the screen.
Args:
output: (RichTextLines) text lines to display on the screen. These lines
may have widths exceeding the screen width. This method will take care
of the wrapping.
Returns:
(List of int) A list of line indices, in the wrapped output, where there
are regex matches.
"""
# Wrap the output lines according to screen width.
self._curr_wrapped_output, wrapped_line_indices = (
debugger_cli_common.wrap_rich_text_lines(output, self._max_x - 2))
# Append lines to curr_wrapped_output so that the user can scroll to a
# state where the last text line is on the top of the output area.
self._curr_wrapped_output.lines.extend([""] * (self._output_num_rows - 1))
# Limit number of lines displayed to avoid curses overflow problems.
if self._curr_wrapped_output.num_lines() > self.max_output_lines:
self._curr_wrapped_output = self._curr_wrapped_output.slice(
0, self.max_output_lines)
self._curr_wrapped_output.lines.append("Output cut off at %d lines!" %
self.max_output_lines)
self._curr_wrapped_output.font_attr_segs[self.max_output_lines] = [
(0, len(output.lines[-1]), "magenta")
]
self._display_main_menu(self._curr_wrapped_output)
(self._output_pad, self._output_pad_height,
self._output_pad_width) = self._display_lines(self._curr_wrapped_output,
self._output_num_rows)
# The indices of lines with regex matches (if any) need to be mapped to
# indices of wrapped lines.
return [
wrapped_line_indices[line]
for line in self._unwrapped_regex_match_lines
]
def _display_output(self, output, is_refresh=False, highlight_regex=None):
"""Display text output in a scrollable text pad.
This method does some preprocessing on the text lines, render them on the
screen and scroll to the appropriate line. These are done according to regex
highlighting requests (if any), scroll-to-next-match requests (if any),
and screen refresh requests (if any).
TODO(cais): Separate these unrelated request to increase clarity and
maintainability.
Args:
output: A RichTextLines object that is the screen output text.
is_refresh: (bool) Is this a refreshing display with existing output.
highlight_regex: (str) Optional string representing the regex used to
search and highlight in the current screen output.
"""
if not output:
return
if highlight_regex:
try:
output = debugger_cli_common.regex_find(
output, highlight_regex, font_attr=self._SEARCH_HIGHLIGHT_FONT_ATTR)
except ValueError as e:
self._error_toast(str(e))
return
if not is_refresh:
# Perform new regex search on the current output.
self._unwrapped_regex_match_lines = output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY]
else:
# Continue scrolling down.
self._output_pad_row += 1
else:
self._curr_unwrapped_output = output
self._unwrapped_regex_match_lines = []
# Display output on the screen.
wrapped_regex_match_lines = self._screen_display_output(output)
# Now that the text lines are displayed on the screen scroll to the
# appropriate line according to previous scrolling state and regex search
# and highlighting state.
if highlight_regex:
next_match_line = -1
for match_line in wrapped_regex_match_lines:
if match_line >= self._output_pad_row:
next_match_line = match_line
break
if next_match_line >= 0:
self._scroll_output(
_SCROLL_TO_LINE_INDEX, line_index=next_match_line)
else:
# Regex search found no match >= current line number. Display message
# stating as such.
self._toast("Pattern not found", color=self._ERROR_TOAST_COLOR_PAIR)
elif is_refresh:
self._scroll_output(_SCROLL_REFRESH)
else:
self._output_pad_row = 0
self._scroll_output(_SCROLL_HOME)
def _display_lines(self, output, min_num_rows):
"""Display RichTextLines object on screen.
Args:
output: A RichTextLines object.
min_num_rows: (int) Minimum number of output rows.
Returns:
1) The text pad object used to display the main text body.
2) (int) number of rows of the text pad, which may exceed screen size.
3) (int) number of columns of the text pad.
Raises:
ValueError: If input argument "output" is invalid.
"""
if not isinstance(output, debugger_cli_common.RichTextLines):
raise ValueError(
"Output is required to be an instance of RichTextLines, but is not.")
self._screen_refresh()
# Number of rows the output area will have.
rows = max(min_num_rows, len(output.lines))
# Size of the output pad, which may exceed screen size and require
# scrolling.
cols = self._max_x - 2
# Create new output pad.
pad = self._screen_new_output_pad(rows, cols)
for i in xrange(len(output.lines)):
if i in output.font_attr_segs:
self._screen_add_line_to_output_pad(
pad, i, output.lines[i], color_segments=output.font_attr_segs[i])
else:
self._screen_add_line_to_output_pad(pad, i, output.lines[i])
return pad, rows, cols
def _display_main_menu(self, output):
"""Display main menu associated with screen output, if the menu exists.
Args:
output: (debugger_cli_common.RichTextLines) The RichTextLines output from
the annotations field of which the menu will be extracted and used (if
the menu exists).
"""
if debugger_cli_common.MAIN_MENU_KEY in output.annotations:
self._main_menu = output.annotations[
debugger_cli_common.MAIN_MENU_KEY].format_as_single_line(
prefix="| ", divider=" | ", enabled_item_attrs=["underline"])
self._main_menu_pad = self._screen_new_output_pad(1, self._max_x - 2)
# The unwrapped menu line may exceed screen width, in which case it needs
# to be cut off.
wrapped_menu, _ = debugger_cli_common.wrap_rich_text_lines(
self._main_menu, self._max_x - 3)
self._screen_add_line_to_output_pad(
self._main_menu_pad,
0,
wrapped_menu.lines[0],
color_segments=(wrapped_menu.font_attr_segs[0]
if 0 in wrapped_menu.font_attr_segs else None))
else:
self._main_menu = None
self._main_menu_pad = None
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
"""Render a line in a text pad.
Assumes: segments in color_segments are sorted in ascending order of the
beginning index.
Note: Gaps between the segments are allowed and will be fixed in with a
default color.
Args:
pad: The text pad to render the line in.
row: Row index, as an int.
txt: The text to be displayed on the specified row, as a str.
color_segments: A list of 3-tuples. Each tuple represents the beginning
and the end of a color segment, in the form of a right-open interval:
[start, end). The last element of the tuple is a color string, e.g.,
"red".
Raisee:
TypeError: If color_segments is not of type list.
"""
if not color_segments:
pad.addstr(row, 0, txt, self._default_color_pair)
return
if not isinstance(color_segments, list):
raise TypeError("Input color_segments needs to be a list, but is not.")
all_segments = []
all_color_pairs = []
# Process the beginning.
if color_segments[0][0] == 0:
pass
else:
all_segments.append((0, color_segments[0][0]))
all_color_pairs.append(self._default_color_pair)
for (curr_start, curr_end, curr_attrs), (next_start, _, _) in zip(
color_segments, color_segments[1:] + [(len(txt), None, None)]):
all_segments.append((curr_start, curr_end))
if not isinstance(curr_attrs, list):
curr_attrs = [curr_attrs]
curses_attr = curses.A_NORMAL
for attr in curr_attrs:
if (self._mouse_enabled and
isinstance(attr, debugger_cli_common.MenuItem)):
curses_attr |= curses.A_UNDERLINE
else:
curses_attr |= self._color_pairs.get(attr, self._default_color_pair)
all_color_pairs.append(curses_attr)
if curr_end < next_start:
# Fill in the gap with the default color.
all_segments.append((curr_end, next_start))
all_color_pairs.append(self._default_color_pair)
# Finally, draw all the segments.
for segment, color_pair in zip(all_segments, all_color_pairs):
if segment[1] < self._max_x:
pad.addstr(row, segment[0], txt[segment[0]:segment[1]], color_pair)
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pad.refresh(viewport_top, viewport_left, screen_location_top,
screen_location_left, screen_location_bottom,
screen_location_right)
self._scroll_bar = ScrollBar(
self._max_x - 2,
2,
self._max_x - 1,
self._output_num_rows,
self._output_pad_row,
self._output_pad_height - self._output_pad_screen_height)
(scroll_pad, _, _) = self._display_lines(
self._scroll_bar.layout(), self._output_num_rows - 1)
scroll_pad.refresh(
0, 0, 2, self._max_x - 2, self._output_num_rows, self._max_x - 1)
def _scroll_output(self, direction, line_index=None):
"""Scroll the output pad.
Args:
direction: _SCROLL_REFRESH, _SCROLL_UP, _SCROLL_DOWN, _SCROLL_UP_A_LINE,
_SCROLL_DOWN_A_LINE, _SCROLL_HOME, _SCROLL_END, _SCROLL_TO_LINE_INDEX
line_index: (int) Specifies the zero-based line index to scroll to.
Applicable only if direction is _SCROLL_TO_LINE_INDEX.
Raises:
ValueError: On invalid scroll direction.
TypeError: If line_index is not int and direction is
_SCROLL_TO_LINE_INDEX.
"""
if not self._output_pad:
# No output pad is present. Do nothing.
return
if direction == _SCROLL_REFRESH:
pass
elif direction == _SCROLL_UP:
# Scroll up.
self._output_pad_row -= int(self._output_num_rows / 3)
if self._output_pad_row < 0:
self._output_pad_row = 0
elif direction == _SCROLL_DOWN:
# Scroll down.
self._output_pad_row += int(self._output_num_rows / 3)
if (self._output_pad_row >
self._output_pad_height - self._output_pad_screen_height - 1):
self._output_pad_row = (
self._output_pad_height - self._output_pad_screen_height - 1)
elif direction == _SCROLL_UP_A_LINE:
# Scroll up a line
if self._output_pad_row - 1 >= 0:
self._output_pad_row -= 1
elif direction == _SCROLL_DOWN_A_LINE:
# Scroll down a line
if self._output_pad_row + 1 < (
self._output_pad_height - self._output_pad_screen_height):
self._output_pad_row += 1
elif direction == _SCROLL_HOME:
# Scroll to top
self._output_pad_row = 0
elif direction == _SCROLL_END:
# Scroll to bottom
self._output_pad_row = (
self._output_pad_height - self._output_pad_screen_height - 1)
elif direction == _SCROLL_TO_LINE_INDEX:
if not isinstance(line_index, int):
raise TypeError("Invalid line_index type (%s) under mode %s" %
(type(line_index), _SCROLL_TO_LINE_INDEX))
self._output_pad_row = line_index
else:
raise ValueError("Unsupported scroll mode: %s" % direction)
# Actually scroll the output pad: refresh with new location.
output_pad_top = self._output_pad_screen_location.top
if self._main_menu_pad:
output_pad_top += 1
self._screen_scroll_output_pad(self._output_pad, self._output_pad_row, 0,
output_pad_top,
self._output_pad_screen_location.left,
self._output_pad_screen_location.bottom,
self._output_pad_screen_location.right)
self._screen_render_menu_pad()
self._scroll_info = self._compile_ui_status_summary()
self._screen_draw_text_line(
self._output_scroll_row,
self._scroll_info,
color=self._STATUS_BAR_COLOR_PAIR)
def _screen_render_menu_pad(self):
if self._main_menu_pad:
self._main_menu_pad.refresh(0, 0, self._output_pad_screen_location.top, 0,
self._output_pad_screen_location.top,
self._max_x)
def _compile_ui_status_summary(self):
"""Compile status summary about this Curses UI instance.
The information includes: scroll status and mouse ON/OFF status.
Returns:
(str) A single text line summarizing the UI status, adapted to the
current screen width.
"""
info = ""
if self._output_pad_height > self._output_pad_screen_height + 1:
# Display information about the scrolling of tall screen output.
scroll_percentage = 100.0 * (min(
1.0,
float(self._output_pad_row) /
(self._output_pad_height - self._output_pad_screen_height - 1)))
if self._output_pad_row == 0:
scroll_directions = " (PgDn)"
elif self._output_pad_row >= (
self._output_pad_height - self._output_pad_screen_height - 1):
scroll_directions = " (PgUp)"
else:
scroll_directions = " (PgDn/PgUp)"
info += "--- Scroll%s: %.2f%% " % (scroll_directions, scroll_percentage)
self._output_array_pointer_indices = self._show_array_indices()
# Add array indices information to scroll message.
if self._output_array_pointer_indices:
if self._output_array_pointer_indices[0]:
info += self._format_indices(self._output_array_pointer_indices[0])
info += "-"
if self._output_array_pointer_indices[-1]:
info += self._format_indices(self._output_array_pointer_indices[-1])
info += " "
# Add mouse mode information.
mouse_mode_str = "Mouse: "
mouse_mode_str += "ON" if self._mouse_enabled else "OFF"
if len(info) + len(mouse_mode_str) + 5 < self._max_x:
info += "-" * (self._max_x - len(info) - len(mouse_mode_str) - 4)
info += " "
info += mouse_mode_str
info += " ---"
else:
info += "-" * (self._max_x - len(info))
return info
def _format_indices(self, indices):
# Remove the spaces to make it compact.
return repr(indices).replace(" ", "")
def _show_array_indices(self):
"""Show array indices for the lines at the top and bottom of the output.
For the top line and bottom line of the output display area, show the
element indices of the array being displayed.
Returns:
If either the top of the bottom row has any matching array indices,
a dict from line index (0 being the top of the display area, -1
being the bottom of the display area) to array element indices. For
example:
{0: [0, 0], -1: [10, 0]}
Otherwise, None.
"""
indices_top = self._show_array_index_at_line(0)
output_top = self._output_top_row
if self._main_menu_pad:
output_top += 1
bottom_line_index = (
self._output_pad_screen_location.bottom - output_top - 1)
indices_bottom = self._show_array_index_at_line(bottom_line_index)
if indices_top or indices_bottom:
return {0: indices_top, -1: indices_bottom}
else:
return None
def _show_array_index_at_line(self, line_index):
"""Show array indices for the specified line in the display area.
Uses the line number to array indices map in the annotations field of the
RichTextLines object being displayed.
If the displayed RichTextLines object does not contain such a mapping,
will do nothing.
Args:
line_index: (int) 0-based line index from the top of the display area.
For example,if line_index == 0, this method will display the array
indices for the line currently at the top of the display area.
Returns:
(list) The array indices at the specified line, if available. None, if
not available.
"""
# Examine whether the index information is available for the specified line
# number.
pointer = self._output_pad_row + line_index
if (pointer in self._curr_wrapped_output.annotations and
"i0" in self._curr_wrapped_output.annotations[pointer]):
indices = self._curr_wrapped_output.annotations[pointer]["i0"]
array_indices_str = self._format_indices(indices)
array_indices_info = "@" + array_indices_str
# TODO(cais): Determine line_index properly given menu pad status.
# Test coverage?
output_top = self._output_top_row
if self._main_menu_pad:
output_top += 1
self._toast(
array_indices_info,
color=self._ARRAY_INDICES_COLOR_PAIR,
line_index=output_top + line_index)
return indices
else:
return None
def _tab_complete(self, command_str):
"""Perform tab completion.
Obtains tab completion candidates.
If there are no candidates, return command_str and take no other actions.
If there are candidates, display the candidates on screen and return
command_str + (common prefix of the candidates).
Args:
command_str: (str) The str in the command input textbox when Tab key is
hit.
Returns:
(str) Completed string. Could be the same as command_str if no completion
candidate is available. If candidate(s) are available, return command_str
appended by the common prefix of the candidates.
"""
context, prefix, except_last_word = self._analyze_tab_complete_input(
command_str)
candidates, common_prefix = self._tab_completion_registry.get_completions(
context, prefix)
if candidates and len(candidates) > 1:
self._display_candidates(candidates)
else:
# In the case of len(candidates) == 1, the single completion will be
# entered to the textbox automatically. So there is no need to show any
# candidates.
self._display_candidates([])
if common_prefix:
# Common prefix is not None and non-empty. The completed string will
# incorporate the common prefix.
return except_last_word + common_prefix
else:
return except_last_word + prefix
def _display_candidates(self, candidates):
"""Show candidates (e.g., tab-completion candidates) on multiple lines.
Args:
candidates: (list of str) candidates.
"""
if self._curr_unwrapped_output:
# Force refresh screen output.
self._scroll_output(_SCROLL_REFRESH)
if not candidates:
return
candidates_prefix = "Candidates: "
candidates_line = candidates_prefix + " ".join(candidates)
candidates_output = debugger_cli_common.RichTextLines(
candidates_line,
font_attr_segs={
0: [(len(candidates_prefix), len(candidates_line), "yellow")]
})
candidates_output, _ = debugger_cli_common.wrap_rich_text_lines(
candidates_output, self._max_x - 3)
# Calculate how many lines the candidate text should occupy. Limit it to
# a maximum value.
candidates_num_rows = min(
len(candidates_output.lines), self._candidates_max_lines)
self._candidates_top_row = (
self._candidates_bottom_row - candidates_num_rows + 1)
# Render the candidate text on screen.
pad, _, _ = self._display_lines(candidates_output, 0)
self._screen_scroll_output_pad(
pad, 0, 0, self._candidates_top_row, 0,
self._candidates_top_row + candidates_num_rows - 1, self._max_x - 2)
def _toast(self, message, color=None, line_index=None):
"""Display a one-line message on the screen.
By default, the toast is displayed in the line right above the scroll bar.
But the line location can be overridden with the line_index arg.
Args:
message: (str) the message to display.
color: (str) optional color attribute for the message.
line_index: (int) line index.
"""
pad, _, _ = self._display_lines(
debugger_cli_common.RichTextLines(
message, font_attr_segs={0: [(0, len(message), color or "white")]}),
0)
right_end = min(len(message), self._max_x - 2)
if line_index is None:
line_index = self._output_scroll_row - 1
self._screen_scroll_output_pad(pad, 0, 0, line_index, 0, line_index,
right_end)
def _error_toast(self, message):
"""Display a one-line error message on screen.
Args:
message: The error message, without the preceding "ERROR: " substring.
"""
self._toast(
self.ERROR_MESSAGE_PREFIX + message, color=self._ERROR_TOAST_COLOR_PAIR)
def _info_toast(self, message):
"""Display a one-line informational message on screen.
Args:
message: The informational message.
"""
self._toast(
self.INFO_MESSAGE_PREFIX + message, color=self._INFO_TOAST_COLOR_PAIR)
def _interrupt_handler(self, signal_num, frame):
_ = signal_num # Unused.
_ = frame # Unused.
self._screen_terminate()
print("\ntfdbg: caught SIGINT; calling sys.exit(1).", file=sys.stderr)
sys.exit(1)
def _mouse_mode_command_handler(self, args, screen_info=None):
"""Handler for the command prefix 'mouse'.
Args:
args: (list of str) Arguments to the command prefix 'mouse'.
screen_info: (dict) Information about the screen, unused by this handler.
Returns:
None, as this command handler does not generate any screen outputs other
than toasts.
"""
del screen_info
if not args or len(args) == 1:
if args:
if args[0].lower() == "on":
enabled = True
elif args[0].lower() == "off":
enabled = False
else:
self._error_toast("Invalid mouse mode: %s" % args[0])
return None
self._set_mouse_enabled(enabled)
mode_str = "on" if self._mouse_enabled else "off"
self._info_toast("Mouse mode: %s" % mode_str)
else:
self._error_toast("mouse_mode: syntax error")
return None
def _set_mouse_enabled(self, enabled):
if self._mouse_enabled != enabled:
self._mouse_enabled = enabled
self._screen_set_mousemask()
self._redraw_output()
def _screen_set_mousemask(self):
curses.mousemask(self._mouse_enabled)
|
majora2007/plexpy | refs/heads/master | lib/mako/ext/turbogears.py | 60 | # ext/turbogears.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import inspect
from mako import compat
from mako.lookup import TemplateLookup
from mako.template import Template
class TGPlugin(object):
"""TurboGears compatible Template Plugin."""
def __init__(self, extra_vars_func=None, options=None, extension='mak'):
self.extra_vars_func = extra_vars_func
self.extension = extension
if not options:
options = {}
# Pull the options out and initialize the lookup
lookup_options = {}
for k, v in options.items():
if k.startswith('mako.'):
lookup_options[k[5:]] = v
elif k in ['directories', 'filesystem_checks', 'module_directory']:
lookup_options[k] = v
self.lookup = TemplateLookup(**lookup_options)
self.tmpl_options = {}
# transfer lookup args to template args, based on those available
# in getargspec
for kw in inspect.getargspec(Template.__init__)[0]:
if kw in lookup_options:
self.tmpl_options[kw] = lookup_options[kw]
def load_template(self, templatename, template_string=None):
"""Loads a template from a file or a string"""
if template_string is not None:
return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path
if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' +\
self.extension
# Lookup template
return self.lookup.get_template(templatename)
def render(self, info, format="html", fragment=False, template=None):
if isinstance(template, compat.string_types):
template = self.load_template(template)
# Load extra vars func if provided
if self.extra_vars_func:
info.update(self.extra_vars_func())
return template.render(**info)
|
plasma-disassembler/plasma | refs/heads/master | plasma/lib/ui/widget.py | 4 | #!/usr/bin/env python3
#
# PLASMA : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import curses
from curses import color_pair, A_REVERSE
from time import time
from plasma.lib.custom_colors import COLOR_COMMENT
class Widget():
def __init__(self, x, y, w, h):
self.mapping = {}
self.x = x
self.y = y
self.height = h
self.width = w
self.has_focus = False
self.screen = curses.newwin(h, w, y, x)
self.should_stop = False
self.value_selected = False
self.is_passive = False
def draw(self):
raise NotImplementedError
def draw_cursor(self):
raise NotImplementedError
def callback_mouse_up(self):
raise NotImplementedError
def callback_mouse_down(self):
raise NotImplementedError
def callback_mouse_left(self):
raise NotImplementedError
def callback_mouse_double_left(self):
raise NotImplementedError
class VertivalSep(Widget):
def __init__(self, x, y, h):
w = 2
Widget.__init__(self, x, y, w, h)
self.is_passive = True
self.mapping = {}
def draw(self):
c = color_pair(COLOR_COMMENT.val) #| A_REVERSE
for i in range(self.height):
self.screen.addstr(i, 0, "▕", c)
|
twinpa/virtualeco | refs/heads/master | script/loginevent.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib import env
from lib import script
ID = 30
def main(pc):
script.msg(pc, "-"*30)
script.msg(pc, "%s %s"%(env.NAME, env.LAST_UPDATE))
script.msg(pc, "%s"%env.RUNTIME_VERSION_ALL)
script.msg(pc, "-"*30) |
squisher/stella | refs/heads/master | stella/test/conftest.py | 1 | # Copyright 2013-2015 David Mohr
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from collections import defaultdict
def pytest_addoption(parser):
parser.addoption('-B', "--bench", action="store",
type=str, default=False,
help="run benchmark tests: veryshort, short, or long")
parser.addoption('-E', "--extended-bench", action="count",
default=False,
help="run also extended benchmark tests: in Python, and with clang")
results = defaultdict(dict)
@pytest.fixture(scope="module")
def bench_result():
return results
def pytest_runtest_setup(item):
if 'bench' in item.keywords and not item.config.getoption("--bench"):
pytest.skip("need --bench option to run")
def pytest_configure(config):
bench = config.getoption("--bench")
if bench not in (False, 'short', 'long', 'veryshort', 's', 'l', 'v'):
raise Exception("Invalid --bench option: " + bench)
def save_results():
import pickle
with open('timings.pickle', 'wb') as f:
pickle.dump(results, f)
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.config.getoption("--bench"):
return
lines = []
if results:
name_width = max(map(len, results.keys())) + 2
save_results()
else:
# TODO we were aborted, display a notice?
name_width = 2
for benchmark, type_times in sorted(results.items()):
type_width = max(map(len, type_times.keys())) + 2
for b_type, times in sorted(type_times.items()):
r = []
s = []
for impl, t in times.items():
r.append('{}={:0.3f}s'.format(impl, t))
if not impl.startswith('stella'):
s.append('{}={:0.2f}x '.format('f'.rjust(len(impl)), t /
times['stella']))
else:
s.append(' ' * len(r[-1]))
lines.append("{} {} {}".format(benchmark.ljust(name_width),
b_type.ljust(type_width), ' '.join(r)))
lines.append("{} {} {}".format(' '.ljust(name_width),
' '.ljust(type_width), ' '.join(s)))
if len(lines) > 0:
tr.write_line('-'*len(lines[0]), yellow=True)
for line in lines:
tr.write_line(line)
|
kurli/blink-crosswalk | refs/heads/master | Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py | 26 | # Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is an implementation of the Port interface that overrides other
ports and changes the Driver binary to "MockDRT".
The MockDRT objects emulate what a real DRT would do. In particular, they
return the output a real DRT would return for a given test, assuming that
test actually passes (except for reftests, which currently cause the
MockDRT to crash).
"""
import base64
import logging
import optparse
import os
import sys
import types
# Since we execute this script directly as part of the unit tests, we need to ensure
# that Tools/Scripts is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.append(script_dir)
from webkitpy.common import read_checksum_from_png
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.port.factory import PortFactory
_log = logging.getLogger(__name__)
class MockDRTPort(object):
port_name = 'mock'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
return port_name
def __init__(self, host, port_name, **kwargs):
self.__delegate = PortFactory(host).get(port_name.replace('mock-', ''), **kwargs)
self.__delegate_driver_class = self.__delegate._driver_class
self.__delegate._driver_class = types.MethodType(self._driver_class, self.__delegate)
def __getattr__(self, name):
return getattr(self.__delegate, name)
def check_build(self, needs_http, printer):
return True
def check_sys_deps(self, needs_http):
return True
def _driver_class(self, delegate):
return self._mocked_driver_maker
def _mocked_driver_maker(self, port, worker_number, pixel_tests, no_timeout=False):
path_to_this_file = self.host.filesystem.abspath(__file__.replace('.pyc', '.py'))
driver = self.__delegate_driver_class()(self, worker_number, pixel_tests, no_timeout)
driver.cmd_line = self._overriding_cmd_line(driver.cmd_line,
self.__delegate._path_to_driver(),
sys.executable,
path_to_this_file,
self.__delegate.name())
return driver
@staticmethod
def _overriding_cmd_line(original_cmd_line, driver_path, python_exe, this_file, port_name):
def new_cmd_line(pixel_tests, per_test_args):
cmd_line = original_cmd_line(pixel_tests, per_test_args)
index = cmd_line.index(driver_path)
cmd_line[index:index + 1] = [python_exe, this_file, '--platform', port_name]
return cmd_line
return new_cmd_line
def start_helper(self):
pass
def start_http_server(self, additional_dirs, number_of_servers):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_helper(self):
pass
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def _make_wdiff_available(self):
self.__delegate._wdiff_available = True
def setup_environ_for_server(self, server_name):
env = self.__delegate.setup_environ_for_server()
# We need to propagate PATH down so the python code can find the checkout.
env['PATH'] = os.environ['PATH']
return env
def lookup_virtual_test_args(self, test_name):
suite = self.__delegate.lookup_virtual_suite(test_name)
return suite.args + ['--virtual-test-suite-name', suite.name, '--virtual-test-suite-base', suite.base]
def lookup_virtual_reference_args(self, test_name):
suite = self.__delegate.lookup_virtual_suite(test_name)
return suite.reference_args + ['--virtual-test-suite-name', suite.name, '--virtual-test-suite-base', suite.base]
def main(argv, host, stdin, stdout, stderr):
"""Run the tests."""
options, args = parse_options(argv)
drt = MockDRT(options, args, host, stdin, stdout, stderr)
return drt.run()
def parse_options(argv):
# We do custom arg parsing instead of using the optparse module
# because we don't want to have to list every command line flag DRT
# accepts, and optparse complains about unrecognized flags.
def get_arg(arg_name):
if arg_name in argv:
index = argv.index(arg_name)
return argv[index + 1]
return None
options = optparse.Values({
'actual_directory': get_arg('--actual-directory'),
'platform': get_arg('--platform'),
'virtual_test_suite_base': get_arg('--virtual-test-suite-base'),
'virtual_test_suite_name': get_arg('--virtual-test-suite-name'),
})
return (options, argv)
class MockDRT(object):
def __init__(self, options, args, host, stdin, stdout, stderr):
self._options = options
self._args = args
self._host = host
self._stdout = stdout
self._stdin = stdin
self._stderr = stderr
port_name = None
if options.platform:
port_name = options.platform
self._port = PortFactory(host).get(port_name=port_name, options=options)
self._driver = self._port.create_driver(0)
def run(self):
while True:
line = self._stdin.readline()
if not line:
return 0
driver_input = self.input_from_line(line)
dirname, basename = self._port.split_test(driver_input.test_name)
is_reftest = (self._port.reference_files(driver_input.test_name) or
self._port.is_reference_html_file(self._port._filesystem, dirname, basename))
output = self.output_for_test(driver_input, is_reftest)
self.write_test_output(driver_input, output, is_reftest)
def input_from_line(self, line):
vals = line.strip().split("'")
uri = vals[0]
checksum = None
should_run_pixel_tests = False
if len(vals) == 2 and vals[1] == '--pixel-test':
should_run_pixel_tests = True
elif len(vals) == 3 and vals[1] == '--pixel-test':
should_run_pixel_tests = True
checksum = vals[2]
elif len(vals) != 1:
raise NotImplementedError
if uri.startswith('http://') or uri.startswith('https://'):
test_name = self._driver.uri_to_test(uri)
else:
test_name = self._port.relative_test_filename(uri)
return DriverInput(test_name, 0, checksum, should_run_pixel_tests, args=[])
def output_for_test(self, test_input, is_reftest):
port = self._port
if self._options.virtual_test_suite_name:
test_input.test_name = test_input.test_name.replace(self._options.virtual_test_suite_base, self._options.virtual_test_suite_name)
actual_text = port.expected_text(test_input.test_name)
actual_audio = port.expected_audio(test_input.test_name)
actual_image = None
actual_checksum = None
if is_reftest:
# Make up some output for reftests.
actual_text = 'reference text\n'
actual_checksum = 'mock-checksum'
actual_image = 'blank'
if test_input.test_name.endswith('-mismatch.html'):
actual_text = 'not reference text\n'
actual_checksum = 'not-mock-checksum'
actual_image = 'not blank'
elif test_input.should_run_pixel_test and test_input.image_hash:
actual_checksum = port.expected_checksum(test_input.test_name)
actual_image = port.expected_image(test_input.test_name)
if self._options.actual_directory:
actual_path = port._filesystem.join(self._options.actual_directory, test_input.test_name)
root, _ = port._filesystem.splitext(actual_path)
text_path = root + '-actual.txt'
if port._filesystem.exists(text_path):
actual_text = port._filesystem.read_binary_file(text_path)
audio_path = root + '-actual.wav'
if port._filesystem.exists(audio_path):
actual_audio = port._filesystem.read_binary_file(audio_path)
image_path = root + '-actual.png'
if port._filesystem.exists(image_path):
actual_image = port._filesystem.read_binary_file(image_path)
with port._filesystem.open_binary_file_for_reading(image_path) as filehandle:
actual_checksum = read_checksum_from_png.read_checksum(filehandle)
return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
def write_test_output(self, test_input, output, is_reftest):
if output.audio:
self._stdout.write('Content-Type: audio/wav\n')
self._stdout.write('Content-Transfer-Encoding: base64\n')
self._stdout.write(base64.b64encode(output.audio))
self._stdout.write('\n')
else:
self._stdout.write('Content-Type: text/plain\n')
# FIXME: Note that we don't ensure there is a trailing newline!
# This mirrors actual (Mac) DRT behavior but is a bug.
if output.text:
self._stdout.write(output.text)
self._stdout.write('#EOF\n')
if test_input.should_run_pixel_test and output.image_hash:
self._stdout.write('\n')
self._stdout.write('ActualHash: %s\n' % output.image_hash)
self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash)
if output.image_hash != test_input.image_hash:
self._stdout.write('Content-Type: image/png\n')
self._stdout.write('Content-Length: %s\n' % len(output.image))
self._stdout.write(output.image)
self._stdout.write('#EOF\n')
self._stdout.flush()
self._stderr.write('#EOF\n')
self._stderr.flush()
if __name__ == '__main__':
# Note that the Mock in MockDRT refers to the fact that it is emulating a
# real DRT, and as such, it needs access to a real SystemHost, not a MockSystemHost.
sys.exit(main(sys.argv[1:], SystemHost(), sys.stdin, sys.stdout, sys.stderr))
|
iSECPartners/opinel | refs/heads/master | opinel/__init__.py | 3 | __author__ = 'l01cd3v'
__version__ = '3.3.4'
|
whuaegeanse/mapnik | refs/heads/master | scons/scons-local-2.3.6/SCons/Tool/rpm.py | 4 | """SCons.Tool.rpm
Tool-specific initialization for rpm.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The rpm tool calls the rpmbuild command. The first and only argument should a
tar.gz consisting of the source file and a specfile.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rpm.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import os
import re
import shutil
import subprocess
import SCons.Builder
import SCons.Node.FS
import SCons.Util
import SCons.Action
import SCons.Defaults
def get_cmd(source, env):
tar_file_with_included_specfile = source
if SCons.Util.is_List(source):
tar_file_with_included_specfile = source[0]
return "%s %s %s"%(env['RPM'], env['RPMFLAGS'],
tar_file_with_included_specfile.abspath )
def build_rpm(target, source, env):
# create a temporary rpm build root.
tmpdir = os.path.join( os.path.dirname( target[0].abspath ), 'rpmtemp' )
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
# now create the mandatory rpm directory structure.
for d in ['RPMS', 'SRPMS', 'SPECS', 'BUILD']:
os.makedirs( os.path.join( tmpdir, d ) )
# set the topdir as an rpmflag.
env.Prepend( RPMFLAGS = '--define \'_topdir %s\'' % tmpdir )
# now call rpmbuild to create the rpm package.
handle = subprocess.Popen(get_cmd(source, env),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
output = handle.stdout.read()
status = handle.wait()
if status:
raise SCons.Errors.BuildError( node=target[0],
errstr=output,
filename=str(target[0]) )
else:
# XXX: assume that LC_ALL=C is set while running rpmbuild
output_files = re.compile( 'Wrote: (.*)' ).findall( output )
for output, input in zip( output_files, target ):
rpm_output = os.path.basename(output)
expected = os.path.basename(input.get_path())
assert expected == rpm_output, "got %s but expected %s" % (rpm_output, expected)
shutil.copy( output, input.abspath )
# cleanup before leaving.
shutil.rmtree(tmpdir)
return status
def string_rpm(target, source, env):
try:
return env['RPMCOMSTR']
except KeyError:
return get_cmd(source, env)
rpmAction = SCons.Action.Action(build_rpm, string_rpm)
RpmBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$RPMCOM', '$RPMCOMSTR'),
source_scanner = SCons.Defaults.DirScanner,
suffix = '$RPMSUFFIX')
def generate(env):
"""Add Builders and construction variables for rpm to an Environment."""
try:
bld = env['BUILDERS']['Rpm']
except KeyError:
bld = RpmBuilder
env['BUILDERS']['Rpm'] = bld
env.SetDefault(RPM = 'LC_ALL=C rpmbuild')
env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))
env.SetDefault(RPMCOM = rpmAction)
env.SetDefault(RPMSUFFIX = '.rpm')
def exists(env):
return env.Detect('rpmbuild')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bittner/django-allauth | refs/heads/master | allauth/socialaccount/providers/jupyterhub/urls.py | 5 | from allauth.socialaccount.providers.jupyterhub.provider import (
JupyterHubProvider,
)
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
urlpatterns = default_urlpatterns(JupyterHubProvider)
|
mcdaniel67/sympy | refs/heads/master | sympy/printing/tests/test_ccode.py | 13 | from sympy.core import (pi, oo, symbols, Rational, Integer,
GoldenRatio, EulerGamma, Catalan, Lambda, Dummy, Eq)
from sympy.functions import (Piecewise, sin, cos, Abs, exp, ceiling, sqrt,
gamma, sign)
from sympy.logic import ITE
from sympy.utilities.pytest import raises
from sympy.printing.ccode import CCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
from sympy.matrices import Matrix, MatrixSymbol
from sympy import ccode
x, y, z = symbols('x,y,z')
def test_printmethod():
class fabs(Abs):
def _ccode(self, printer):
return "fabs(%s)" % printer._print(self.args[0])
assert ccode(fabs(x)) == "fabs(x)"
def test_ccode_sqrt():
assert ccode(sqrt(x)) == "sqrt(x)"
assert ccode(x**0.5) == "sqrt(x)"
assert ccode(sqrt(x)) == "sqrt(x)"
def test_ccode_Pow():
assert ccode(x**3) == "pow(x, 3)"
assert ccode(x**(y**3)) == "pow(x, pow(y, 3))"
g = implemented_function('g', Lambda(x, 2*x))
assert ccode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"pow(3.5*2*x, -x + pow(y, x))/(pow(x, 2) + y)"
assert ccode(x**-1.0) == '1.0/x'
assert ccode(x**Rational(2, 3)) == 'pow(x, 2.0L/3.0L)'
_cond_cfunc = [(lambda base, exp: exp.is_integer, "dpowi"),
(lambda base, exp: not exp.is_integer, "pow")]
assert ccode(x**3, user_functions={'Pow': _cond_cfunc}) == 'dpowi(x, 3)'
assert ccode(x**3.2, user_functions={'Pow': _cond_cfunc}) == 'pow(x, 3.2)'
def test_ccode_constants_mathh():
assert ccode(exp(1)) == "M_E"
assert ccode(pi) == "M_PI"
assert ccode(oo) == "HUGE_VAL"
assert ccode(-oo) == "-HUGE_VAL"
def test_ccode_constants_other():
assert ccode(2*GoldenRatio) == "double const GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert ccode(
2*Catalan) == "double const Catalan = 0.915965594177219;\n2*Catalan"
assert ccode(2*EulerGamma) == "double const EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_ccode_Rational():
assert ccode(Rational(3, 7)) == "3.0L/7.0L"
assert ccode(Rational(18, 9)) == "2"
assert ccode(Rational(3, -7)) == "-3.0L/7.0L"
assert ccode(Rational(-3, -7)) == "3.0L/7.0L"
assert ccode(x + Rational(3, 7)) == "x + 3.0L/7.0L"
assert ccode(Rational(3, 7)*x) == "(3.0L/7.0L)*x"
def test_ccode_Integer():
assert ccode(Integer(67)) == "67"
assert ccode(Integer(-1)) == "-1"
def test_ccode_functions():
assert ccode(sin(x) ** cos(x)) == "pow(sin(x), cos(x))"
def test_ccode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert ccode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert ccode(
g(x)) == "double const Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert ccode(g(A[i]), assign_to=A[i]) == (
"for (int i=0; i<n; i++){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
def test_ccode_exceptions():
assert ccode(ceiling(x)) == "ceil(x)"
assert ccode(Abs(x)) == "fabs(x)"
assert ccode(gamma(x)) == "tgamma(x)"
def test_ccode_user_functions():
x = symbols('x', integer=False)
n = symbols('n', integer=True)
custom_functions = {
"ceiling": "ceil",
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
}
assert ccode(ceiling(x), user_functions=custom_functions) == "ceil(x)"
assert ccode(Abs(x), user_functions=custom_functions) == "fabs(x)"
assert ccode(Abs(n), user_functions=custom_functions) == "abs(n)"
def test_ccode_boolean():
assert ccode(x & y) == "x && y"
assert ccode(x | y) == "x || y"
assert ccode(~x) == "!x"
assert ccode(x & y & z) == "x && y && z"
assert ccode(x | y | z) == "x || y || z"
assert ccode((x & y) | z) == "z || x && y"
assert ccode((x | y) & z) == "z && (x || y)"
def test_ccode_Piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
assert ccode(expr) == (
"((x < 1) ? (\n"
" x\n"
")\n"
": (\n"
" pow(x, 2)\n"
"))")
assert ccode(expr, assign_to="c") == (
"if (x < 1) {\n"
" c = x;\n"
"}\n"
"else {\n"
" c = pow(x, 2);\n"
"}")
expr = Piecewise((x, x < 1), (x + 1, x < 2), (x**2, True))
assert ccode(expr) == (
"((x < 1) ? (\n"
" x\n"
")\n"
": ((x < 2) ? (\n"
" x + 1\n"
")\n"
": (\n"
" pow(x, 2)\n"
")))")
assert ccode(expr, assign_to='c') == (
"if (x < 1) {\n"
" c = x;\n"
"}\n"
"else if (x < 2) {\n"
" c = x + 1;\n"
"}\n"
"else {\n"
" c = pow(x, 2);\n"
"}")
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: ccode(expr))
def test_ccode_Piecewise_deep():
p = ccode(2*Piecewise((x, x < 1), (x + 1, x < 2), (x**2, True)))
assert p == (
"2*((x < 1) ? (\n"
" x\n"
")\n"
": ((x < 2) ? (\n"
" x + 1\n"
")\n"
": (\n"
" pow(x, 2)\n"
")))")
expr = x*y*z + x**2 + y**2 + Piecewise((0, x < 0.5), (1, True)) + cos(z) - 1
assert ccode(expr) == (
"pow(x, 2) + x*y*z + pow(y, 2) + ((x < 0.5) ? (\n"
" 0\n"
")\n"
": (\n"
" 1\n"
")) + cos(z) - 1")
assert ccode(expr, assign_to='c') == (
"c = pow(x, 2) + x*y*z + pow(y, 2) + ((x < 0.5) ? (\n"
" 0\n"
")\n"
": (\n"
" 1\n"
")) + cos(z) - 1;")
def test_ccode_ITE():
expr = ITE(x < 1, x, x**2)
assert ccode(expr) == (
"((x < 1) ? (\n"
" x\n"
")\n"
": (\n"
" pow(x, 2)\n"
"))")
def test_ccode_settings():
raises(TypeError, lambda: ccode(sin(x), method="garbage"))
def test_ccode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = CCodePrinter()
p._not_c = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[%s]' % (m*i+j)
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[%s]' % (i*o*m+j*o+k)
assert p._not_c == set()
def test_ccode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = ccode(e.rhs, assign_to=e.lhs, contract=False)
assert code0 == 'Dy[i] = (y[%s] - y[i])/(x[%s] - x[i]);' % (i + 1, i + 1)
def test_ccode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = x[j]*A[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}'
)
c = ccode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (int i_%(icount)i=0; i_%(icount)i<m_%(mcount)i; i_%(icount)i++){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = ccode(x[i], assign_to=y[i])
assert code == expected
def test_ccode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = x[j]*A[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}'
)
c = ccode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_ccode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' for (int l=0; l<p; l++){\n'
' y[i] = y[i] + b[%s]*a[%s];\n' % (j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = ccode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_ccode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' for (int l=0; l<p; l++){\n'
' y[i] = (a[%s] + b[%s])*c[%s] + y[i];\n' % (i*n*o*p + j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l, j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = ccode((a[i, j, k, l] + b[i, j, k, l])*c[j, k, l], assign_to=y[i])
assert c == s
def test_ccode_loops_multiple_terms():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
s0 = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
)
s1 = (
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' y[i] = b[j]*b[k]*c[%s] + y[i];\n' % (i*n*o + j*o + k) +\
' }\n'
' }\n'
'}\n'
)
s2 = (
'for (int i=0; i<m; i++){\n'
' for (int k=0; k<o; k++){\n'
' y[i] = b[k]*a[%s] + y[i];\n' % (i*o + k) +\
' }\n'
'}\n'
)
s3 = (
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = b[j]*a[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}\n'
)
c = ccode(
b[j]*a[i, j] + b[k]*a[i, k] + b[j]*b[k]*c[i, j, k], assign_to=y[i])
assert (c == s0 + s1 + s2 + s3[:-1] or
c == s0 + s1 + s3 + s2[:-1] or
c == s0 + s2 + s1 + s3[:-1] or
c == s0 + s2 + s3 + s1[:-1] or
c == s0 + s3 + s1 + s2[:-1] or
c == s0 + s3 + s2 + s1[:-1])
def test_dereference_printing():
expr = x + y + sin(z) + z
assert ccode(expr, dereference=[z]) == "x + y + (*z) + sin((*z))"
def test_Matrix_printing():
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
assert ccode(mat, A) == (
"A[0] = x*y;\n"
"if (y > 0) {\n"
" A[1] = x + 2;\n"
"}\n"
"else {\n"
" A[1] = y;\n"
"}\n"
"A[2] = sin(z);")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
assert ccode(expr) == (
"((x > 0) ? (\n"
" 2*A[2]\n"
")\n"
": (\n"
" A[2]\n"
")) + sin(A[1]) + A[0]")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert ccode(m, M) == (
"M[0] = sin(q[1]);\n"
"M[1] = 0;\n"
"M[2] = cos(q[2]);\n"
"M[3] = q[1] + q[2];\n"
"M[4] = q[3];\n"
"M[5] = 5;\n"
"M[6] = 2*q[4]*1.0/q[1];\n"
"M[7] = 4 + sqrt(q[0]);\n"
"M[8] = 0;")
def test_ccode_reserved_words():
x, y = symbols('x, if')
assert ccode(y**2) == 'pow(if_, 2)'
assert ccode(x * y**2, dereference=[y]) == 'pow((*if_), 2)*x'
expected = 'pow(if_unreserved, 2)'
assert ccode(y**2, reserved_word_suffix='_unreserved') == expected
with raises(ValueError):
ccode(y**2, error_on_reserved=True)
def test_ccode_sign():
expr = sign(x) * y
assert ccode(expr) == 'y*(((x) > 0) - ((x) < 0))'
assert ccode(expr, 'z') == 'z = y*(((x) > 0) - ((x) < 0));'
assert ccode(sign(2 * x + x**2) * x + x**2) == \
'pow(x, 2) + x*(((pow(x, 2) + 2*x) > 0) - ((pow(x, 2) + 2*x) < 0))'
expr = sign(cos(x))
assert ccode(expr) == '(((cos(x)) > 0) - ((cos(x)) < 0))'
|
ybellavance/python-for-android | refs/heads/master | python-build/python-libs/gdata/src/gdata/tlslite/utils/cryptomath.py | 172 | """cryptomath module
This module has basic math/crypto code."""
import os
import math
import base64
import binascii
import sha
from compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException, e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return stringToBytes(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = createByteArrayZeros(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return stringToBytes(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return stringToBytes(s)
prngName ="CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(bytes):
total = 0L
multiplier = 1L
for count in range(len(bytes)-1, -1, -1):
byte = bytes[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToBytes(n):
howManyBytes = numBytes(n)
bytes = createByteArrayZeros(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
bytes[count] = int(n % 256)
n >>= 8
return bytes
def bytesToBase64(bytes):
s = bytesToString(bytes)
return stringToBase64(s)
def base64ToBytes(s):
s = base64ToString(s)
return stringToBytes(s)
def numberToBase64(n):
bytes = numberToBytes(n)
return bytesToBase64(bytes)
def base64ToNumber(s):
bytes = base64ToBytes(s)
return bytesToNumber(bytes)
def stringToNumber(s):
bytes = stringToBytes(s)
return bytesToNumber(bytes)
def numberToString(s):
bytes = numberToBytes(s)
return bytesToString(bytes)
def base64ToString(s):
try:
return base64.decodestring(s)
except binascii.Error, e:
raise SyntaxError(e)
except binascii.Incomplete, e:
raise SyntaxError(e)
def stringToBase64(s):
return base64.encodestring(s).replace("\n", "")
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
bytes = stringToBytes(mpi[4:])
return bytesToNumber(bytes)
def numberToMPI(n):
bytes = numberToBytes(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
bytes = concatArrays(createByteArrayZeros(4+ext), bytes)
bytes[0] = (length >> 24) & 0xFF
bytes[1] = (length >> 16) & 0xFF
bytes[2] = (length >> 8) & 0xFF
bytes[3] = length & 0xFF
return bytesToString(bytes)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def hashAndBase64(s):
return stringToBase64(sha.sha(s).digest())
def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce
bytes = getRandomBytes(numChars)
bytesStr = "".join([chr(b) for b in bytes])
return stringToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
#This will break when python division changes, but we can't use // cause
#of Jython
return (a * b) / gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
#This will break when python division changes, but we can't use //
#cause of Jython
q = d / c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2**nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in xrange(1, exp2):
lowPowers.append((lowPowers[i-1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in xrange(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = range(n)
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print "*",
s, t = n-1, 0
while s % 2 == 0:
s, t = s/2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2L ** (bits-1)) * 3/2
high = 2L ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print ".",
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3/2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print ".",
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
|
andnovar/kivy | refs/heads/master | kivy/tests/test_issue_1084.py | 54 | #
# Bug fixed:
# - put utf-8 in string, and validate -> no more crash due to str() encoding
# - put utf-8 in string, validate, close, open the app and edit the value -> no
# more weird space due to ascii->utf8 encoding.
# - create an unicode directory, and select it with Path. -> no more crash at
# validation.
# - create an unicode directory, and select it with Path and restart -> the path
# is still correct.
from kivy.app import App
from kivy.uix.settings import Settings
data = '''
[
{
"type": "string",
"title": "String",
"desc": "-",
"section": "test",
"key": "string"
},
{
"type": "path",
"title": "Path",
"desc": "-",
"section": "test",
"key": "path"
}
]
'''
class UnicodeIssueSetting(App):
def build_config(self, config):
config.add_section('test')
config.setdefault('test', 'string', 'Hello world')
config.setdefault('test', 'path', '/')
def build(self):
s = Settings()
s.add_json_panel('Test Panel', self.config, data=data)
return s
if __name__ == '__main__':
UnicodeIssueSetting().run()
|
chokribr/invenioold | refs/heads/master | modules/bibformat/lib/elements/bfe_plots.py | 18 | # -*- coding: utf-8 -*-
##
## $Id: bfe_CERN_plots.py,v 1.3 2009/03/17 10:55:15 jerome Exp $
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Display image of the plot if we are in selected plots collection
"""
from invenio.bibdocfile import BibRecDocs
from invenio.urlutils import create_html_link
from invenio.config import CFG_SITE_RECORD
from invenio.messages import gettext_set_language
try:
from invenio.config import CFG_BASE_URL
except ImportError:
from invenio.config import CFG_SITE_URL
CFG_BASE_URL = CFG_SITE_URL
def format_element(bfo, width="", caption="yes", max_plots="3"):
"""
Display image of the plot if we are in selected plots collections
To achieve this, we take the pngs associated with this document
@param width: the width of the returned image (Eg: '100px')
@param caption: display the captions or not?
@param max_plots: the maximum number of plots to display (-1 is all plots)
"""
_ = gettext_set_language(bfo.lang)
img_files = []
try:
max_plots = int(max_plots)
except ValueError:
# Someone tried to squeeze in something non-numerical. Hah!
max_plots = 3
link = ""
bibarchive = BibRecDocs(bfo.recID)
if width != "":
width = 'width="%s"' % width
for doc in bibarchive.list_bibdocs(doctype="Plot"):
for _file in doc.list_latest_files():
if _file.subformat == "context":
# Ignore context files
continue
caption_text = _file.get_description()[5:]
index = int(_file.get_description()[:5])
img_location = _file.get_url()
img = '<img style="vertical-align:middle;" src="%s" title="%s" %s/>' % \
(img_location, caption_text, width)
plotlink = create_html_link(urlbase='%s/%s/%s/plots#%d' %
(CFG_BASE_URL,
CFG_SITE_RECORD,
bfo.recID,
index),
urlargd={},
link_label=img)
img_files.append((index, plotlink))
img_files = sorted(img_files, key=lambda x: x[0])
if max_plots > 0:
img_files = img_files[:max_plots]
if len(img_files) >= max_plots:
link = "<a href='/%s/%s/plots'>%s</a>" % \
(CFG_SITE_RECORD, bfo.recID, _("Show more plots"))
for index in range(len(img_files)):
img_files[index] = img_files[index][1]
if len(img_files) == 0:
return ''
return '<div style="overflow-x:auto;display:inline;width:100%;">' +\
" ".join(img_files) + ' ' + link + '</div>'
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
endlessm/chromium-browser | refs/heads/master | third_party/catapult/third_party/gsutil/gslib/vendored/boto/boto/rds2/__init__.py | 22 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import get_regions
from boto.regioninfo import connect
def regions():
"""
Get all available regions for the RDS service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.rds2.layer1 import RDSConnection
return get_regions('rds', connection_cls=RDSConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.rds2.layer1.RDSConnection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.rds2.layer1.RDSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
from boto.rds2.layer1 import RDSConnection
return connect('rds', region_name, connection_cls=RDSConnection,
**kw_params)
|
CurryBoy/ProtoML-Deprecated | refs/heads/master | protoml/experimental/__init__.py | 1 | from .dialup import DialupRandomForestRegressor, DialupExtraTreesRegressor
from .proto_col import proto_col
from .utils import get_date_dataframe
__all__ = ["DialupRandomForestRegressor",
"DialupExtraTreesRegressor",
"proto_col",
"get_date_dataframe"
]
|
danilito19/django | refs/heads/master | django/conf/locale/de/formats.py | 504 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
mxOBS/deb-pkg_trusty_chromium-browser | refs/heads/master | v8/tools/testrunner/network/endpoint.py | 84 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import os
import Queue
import threading
import time
from ..local import execution
from ..local import progress
from ..local import testsuite
from ..local import utils
from ..server import compression
class EndpointProgress(progress.ProgressIndicator):
def __init__(self, sock, server, ctx):
super(EndpointProgress, self).__init__()
self.sock = sock
self.server = server
self.context = ctx
self.results_queue = [] # Accessors must synchronize themselves.
self.sender_lock = threading.Lock()
self.senderthread = threading.Thread(target=self._SenderThread)
self.senderthread.start()
def HasRun(self, test, has_unexpected_output):
# The runners that call this have a lock anyway, so this is safe.
self.results_queue.append(test)
def _SenderThread(self):
keep_running = True
tests = []
self.sender_lock.acquire()
while keep_running:
time.sleep(0.1)
# This should be "atomic enough" without locking :-)
# (We don't care which list any new elements get appended to, as long
# as we don't lose any and the last one comes last.)
current = self.results_queue
self.results_queue = []
for c in current:
if c is None:
keep_running = False
else:
tests.append(c)
if keep_running and len(tests) < 1:
continue # Wait for more results.
if len(tests) < 1: break # We're done here.
result = []
for t in tests:
result.append(t.PackResult())
try:
compression.Send(result, self.sock)
except:
self.runner.terminate = True
for t in tests:
self.server.CompareOwnPerf(t, self.context.arch, self.context.mode)
tests = []
self.sender_lock.release()
def Execute(workspace, ctx, tests, sock, server):
suite_paths = utils.GetSuitePaths(os.path.join(workspace, "test"))
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
suites.append(suite)
suites_dict = {}
for s in suites:
suites_dict[s.name] = s
s.tests = []
for t in tests:
suite = suites_dict[t.suite]
t.suite = suite
suite.tests.append(t)
suites = [ s for s in suites if len(s.tests) > 0 ]
for s in suites:
s.DownloadData()
progress_indicator = EndpointProgress(sock, server, ctx)
runner = execution.Runner(suites, progress_indicator, ctx)
try:
runner.Run(server.jobs)
except IOError, e:
if e.errno == 2:
message = ("File not found: %s, maybe you forgot to 'git add' it?" %
e.filename)
else:
message = "%s" % e
compression.Send([[-1, message]], sock)
progress_indicator.HasRun(None, None) # Sentinel to signal the end.
progress_indicator.sender_lock.acquire() # Released when sending is done.
progress_indicator.sender_lock.release()
|
kenwang815/KodiPlugins | refs/heads/master | script.module.oceanktv/lib/youtube_dl/extractor/fivemin.py | 11 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
parse_duration,
replace_extension,
)
class FiveMinIE(InfoExtractor):
IE_NAME = '5min'
_VALID_URL = r'(?:5min:(?P<id>\d+)(?::(?P<sid>\d+))?|https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js\?(?P<query>.*))'
_TESTS = [
{
# From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/
'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791',
'md5': '4f7b0b79bf1a470e5004f7112385941d',
'info_dict': {
'id': '518013791',
'ext': 'mp4',
'title': 'iPad Mini with Retina Display Review',
'duration': 177,
},
},
{
# From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247
'url': '5min:518086247',
'md5': 'e539a9dd682c288ef5a498898009f69e',
'info_dict': {
'id': '518086247',
'ext': 'mp4',
'title': 'How to Make a Next-Level Fruit Salad',
'duration': 184,
},
'skip': 'no longer available',
},
]
_ERRORS = {
'ErrorVideoNotExist': 'We\'re sorry, but the video you are trying to watch does not exist.',
'ErrorVideoNoLongerAvailable': 'We\'re sorry, but the video you are trying to watch is no longer available.',
'ErrorVideoRejected': 'We\'re sorry, but the video you are trying to watch has been removed.',
'ErrorVideoUserNotGeo': 'We\'re sorry, but the video you are trying to watch cannot be viewed from your current location.',
'ErrorVideoLibraryRestriction': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.',
'ErrorExposurePermission': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.',
}
_QUALITIES = {
1: {
'width': 640,
'height': 360,
},
2: {
'width': 854,
'height': 480,
},
4: {
'width': 1280,
'height': 720,
},
8: {
'width': 1920,
'height': 1080,
},
16: {
'width': 640,
'height': 360,
},
32: {
'width': 854,
'height': 480,
},
64: {
'width': 1280,
'height': 720,
},
128: {
'width': 640,
'height': 360,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
sid = mobj.group('sid')
if mobj.group('query'):
qs = compat_parse_qs(mobj.group('query'))
if not qs.get('playList'):
raise ExtractorError('Invalid URL', expected=True)
video_id = qs['playList'][0]
if qs.get('sid'):
sid = qs['sid'][0]
embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id
if not sid:
embed_page = self._download_webpage(embed_url, video_id,
'Downloading embed page')
sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
response = self._download_json(
'https://syn.5min.com/handlers/SenseHandler.ashx?' +
compat_urllib_parse_urlencode({
'func': 'GetResults',
'playlist': video_id,
'sid': sid,
'isPlayerSeed': 'true',
'url': embed_url,
}),
video_id)
if not response['success']:
raise ExtractorError(
'%s said: %s' % (
self.IE_NAME,
self._ERRORS.get(response['errorMessage'], response['errorMessage'])),
expected=True)
info = response['binding'][0]
formats = []
parsed_video_url = compat_urllib_parse_urlparse(compat_parse_qs(
compat_urllib_parse_urlparse(info['EmbededURL']).query)['videoUrl'][0])
for rendition in info['Renditions']:
if rendition['RenditionType'] == 'aac' or rendition['RenditionType'] == 'm3u8':
continue
else:
rendition_url = compat_urlparse.urlunparse(parsed_video_url._replace(path=replace_extension(parsed_video_url.path.replace('//', '/%s/' % rendition['ID']), rendition['RenditionType'])))
quality = self._QUALITIES.get(rendition['ID'], {})
formats.append({
'format_id': '%s-%d' % (rendition['RenditionType'], rendition['ID']),
'url': rendition_url,
'width': quality.get('width'),
'height': quality.get('height'),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': info['Title'],
'thumbnail': info.get('ThumbURL'),
'duration': parse_duration(info.get('Duration')),
'formats': formats,
}
|
elainenaomi/sciwonc-dataflow-examples | refs/heads/master | dissertation2017/Experiment 2/instances/3_wikiflow_1sh_1s_annot/work/ubuntu/pegasus/example_workflow/20170117T184445+0000/ConfigDB_SessionCompute_0.py | 1 | HOST = "172.31.21.247"
PORT = "5432"
USER = "postgres"
PASSWORD = "enw1989"
DATABASE = "wiki"
READ_PREFERENCE = "primary"
COLLECTION_INPUT = "sessions"
COLLECTION_OUTPUT = "user_sessions"
PREFIX_COLUMN = "w_"
ATTRIBUTES = ["timestamp", "contributor_username"]
SORT = ["timestamp"]
OPERATION_TYPE = "GROUP_BY_COLUMN"
COLUMN = "contributor_username"
VALUE = [u"Sharathcshekhar", u"Trowbridge", u"FunkyFox", u"WRCurtis", u"All in all is all we are", u"Mbourgon", u"Tania667", u"Cassie574", u"Dankness", u"Jmdma", u"Davelhumpers", u"Redhatbert", u"YellabinaHabibi", u"Fratley", u"26-Amanda-26", u"Greenbeanstalk", u"HWETTs", u"Syrcatbot", u"Carlitosrosario", u"Jasonpfinch", u"Kennyluck", u"Countyfan52", u"[email protected]", u"Gruftma", u"J9c91234", u"Fashionable.politics", u"Benramm", u"Carlooos", u"Whitley11", u"Wxstorm", u"Cowcow", u"Capeteco", u"Lenin13", u"Plasma Facts", u"Mbartos", u"Bairdrew6", u"Brent274", u"Autoguru", u"Mohammed 'Blingz' Saadeddin", u"Americanwolf", u"The Polyphonics", u"KO6U", u"Asadist469", u"Alpha for knowledge", u"Danny aus", u"Bozosean1", u"Stipidstid", u"Cats15", u"Asalon12", u"TMoneyTravisL", u"Crutecruizer", u"Tapdancing Tiger", u"Shiffy10", u"Chimpgurl", u"Emma Dashwood", u"MK 133", u"Yorkie777", u"Andrewmc91", u"Morrie", u"Johntonx", u"Saulatlone", u"Gerardo cabero", u"GregVolk", u"Uffling", u"Sopaanmukesh", u"Ziroby", u"Greekafella", u"Soccer libero", u"Dhgreen84", u"NFenn1", u"Jvidoret", u"Frgttndragon8", u"TomeBrown", u"Akbarwasbald", u"Metalgodz", u"Philvsyou", u"Hippieeit", u"Transformationdestruction", u"BMBTHC", u"0987654321abc", u"AgainstV!", u"News51", u"Dmazingo", u"Rowing", u"Omm3", u"Storiesfact", u"Joetews", u"Katie1341", u"Wookie planet", u"Kazzandra", u"EvNEU", u"Jeffb0", u"Belasted", u"Posttool", u"Smilodon 06", u"JaKwan", u"Tthayer", u"Fruitceller", u"Ino", u"Bouncingbeen", u"Gurumaneogranth", u"Sinitive", u"Lojalomi", u"Gettheturthout", u"Shuyin7432", u"Kcharm", u"Kodiaksnacktime", u"Anaspudel", u"Bull god van der iser", u"Bplewe", u"Avestaconsulting", u"Adopp", u"Jazela", u"\u10ec\u10d9\u10d0\u10de\u10dd", u"Cycles", u"Arkestra", u"Oni wan", u"Stocker323", u"Apes2gogo", u"Dr.Violate", u"Acabashi", u"Bwneh", u"JustPhil", u"LabratCunt", u"Mwundram", u"Snakedogman", u"Bhoova", u"Tlowell", u"Saumoarush", u"BlueWarrior23", u"Lucius01x", u"Deepee4321", u"Dayimmm", u"Scoopert2000", u"Colonelfistertaketwo", u"BotAlfred", u"Indiver", u"Doremipaso", u"MasterGracey", u"Ngreenbe", u"Sanjiv2006", u"Gunmuny", u"Daryou", u"Black Swan01", u"Eriklover", u"Prophetscotland", u"Mvasseur", u"Riso", u"LLeaW36", u"Aaft dog", u"Jimmy333", u"Ochsenfrosch", u"Bunkowske", u"Goph42", u"Nathaniel Christopher", u"Sj26", u"Rcredit", u"Nightgurl", u"\u0e19\u0e04\u0e40\u0e23\u0e28", u"Schoopr", u"Leebo4", u"Ba2han", u"2011kirkand", u"Iowaweezer", u"Ilanwix", u"RhoDaZZ", u"Jp347", u"Conmaleta", u"Tommo1957", u"Questum", u"Starrica", u"Carlo & ashloh", u"Priyadarshic", u"Tex KT", u"Sullivanesque", u"Yeshedorje", u"IRJR", u"Neuringer", u"Errarel", u"Ultragarrison", u"Excalabyte", u"Seesot", u"Smruss2000", u"Theorodriguez", u"Nima101", u"Vshadr", u"Nightslash", u"AwesomeVidMan", u"Evansdb", u"GSwarthout", u"Tonusperegrinus", u"Beeltrystig", u"HedCR", u"PoohBerry", u"Revged", u"Cogizu", u"Rtv", u"Miclos15", u"Stephen Stylus", u"Alternat", u"HamsterKing", u"Corpsedust", u"Scion of Reason", u"Mirandamir2", u"Jda", u"Lasvegasflash", u"Peter.thejackos", u"-Lord-92", u"ANZLitLovers", u"Ismellnicotine", u"Petru Dimitriu", u"Jmf17", u"Musmanriaz", u"Dschepp", u"Wikipoonia", u"GeorgetownUniversity", u"Ifuufi", u"Zixen1", u"Efayazzadeh", u"Starchildap", u"Fx21", u"Reggaedelgado", u"Txjo", u"Stone613", u"Xenophobehater", u"Jodieb93", u"Sansevieria202", u"Classicalmusicfan1", u"Sariain", u"Jakeville", u"Dondore", u"ABlockedUser", u"Rayware", u"Alejandroh", u"Manzoorthalib", u"Oeropium", u"Pigypigy", u"3D Confidence", u"Hemustincrease", u"JoselDadea11", u"Sumnderd2", u"Hymn1984", u"Fidget2006", u"MarmotteNZ", u"Alvonruff", u"Zecured", u"Zizibo", u"Reds5fan", u"MistressMoon619", u"Crashaire", u"Michael Farris", u"93Rockgirl", u"Nacor", u"Sharethemanna", u"ErikHK", u"FormWorldOut", u"Rorschak", u"Ad2002", u"J.hkp", u"MiraclexxxBlazer", u"Aayan1", u"\"The Hurricane\" Gustav", u"Eclipse512", u"WateryHill", u"Clray1", u"ChrisQZAP", u"SampLevy", u"Juelk55", u"Compexample", u"Glenn HongKiat", u"Bboyce17", u"Paradiseparadise", u"Adler1", u"Brokenlove", u"Mrcomputerwiz", u"Citator", u"Fillapack", u"Silvag", u"Drkeyboard", u"Akmaltk96", u"Lgvanegas", u"Juanfo05", u"Dlemberger", u"Big-tom-84", u"HighSchoolSportsNet", u"Coordnate", u"Brentman87", u"Martin253", u"Kentaki", u"Weareallone", u"Gilgigamesh", u"Viddy24", u"Germanquetzal", u"Abelincolnwasaturtle", u"OHyena", u"Shortstop13", u"Ricardo257", u"WelshBloke", u"Cbonnardhk", u"Proxer", u"Lilwezzy29", u"PoorTom", u"FreeMorpheme", u"Slyron", u"Jessn050", u"Dev Bliss", u"SirFluffington", u"Jrbaw", u"Yves Lopez", u"Junulo", u"Manylevel", u"Jinesh JK", u"AMD797", u"Serein", u"Someshkirar", u"Tomazzz", u"Newmoonnight", u"Vrrayman1989", u"Tallmaninsuffolk", u"Lipstickandletdown", u"Lelov", u"Oxygen you can believe in.", u"Jcfanatic", u"Camille4444", u"Turnit0ut", u"Johnnybusboy82", u"Jan Henry S. Fosse", u"AndrewAnorak", u"Rockhead126", u"Pdrlps", u"Amicalmant", u"Ashkara sands", u"MeandWhite", u"SkippyUK", u"Kbatra", u"Lindaige", u"Shineonyoutoo", u"Kaclock", u"Inspiredstuff", u"Mpcooke3", u"Sherwoodbm", u"Gritchka", u"Ulla Gabrielsson", u"Srebri", u"Wikipedia administrators is in love with Kate McAuliffe", u"Umdreslifesuc", u"Psyk0", u"Harisin 47", u"Xvflamevx", u"IEatChildren", u"Phatbenny", u"Dharshana", u"Lonbordin", u"The701man", u"Mightychieftain", u"Pculbert", u"Abraham Lincolin", u"Qaztree69", u"SusanneLane", u"Dbratslavsky", u"Tiossi", u"Athrion", u"Littleperson", u"2parkway", u"Scottzar", u"Pkinnaird", u"DarkWarriorSRB2", u"SuperPalomitas", u"Aurogallus", u"Atulsvasu", u"Mzansiafrika", u"Socgrrrl42", u"Gloriaoriggi", u"Tanyenkheng", u"Nfarrow", u"Ginsfella", u"Naufana", u"SaturnineMind", u"Joetroll", u"Localfoodhero", u"Zenbob", u"Natasia", u"Bergmana17", u"Mugglegeog", u"McAtee08", u"IRYFS", u"Oshaberi", u"Larrry2", u"TuckerUofR", u"Tjenkins8", u"Cemendtaur", u"Frshrbts", u"Dessar", u"Titan4mmb", u"Financialmodel", u"Cancandance", u"Psifi", u"Gayside", u"Laccysap", u"Kcebnalyd", u"Specklesthetortoise", u"Allythebrat", u"Takemyex", u"Cvenom", u"DarkFire", u"Cipresso", u"Genpgk", u"Devilbaby2435", u"ZiaraatDotCom", u"CormanoSanchez", u"Eagleye1585", u"Duesh", u"EgoSanus", u"Zip1010", u"Yodaki", u"Graumail", u"Sanofi1976", u"SoFDMC", u"Pc12345", u"NamHyHoangPhong", u"Skippy2705", u"Muhammad.arman", u"Styx2881", u"Sandeep p19", u"Rosecivil", u"Hashomer", u"Dhago", u"Reader2010", u"Wasps FC", u"Frothy70", u"Stylin10", u"Bedel23", u"Healthfax", u"ProlactinPSTAT", u"Nikoshock", u"Yugure", u"Fuzzytek", u"Agrama", u"Adamamo", u"Immortalwolfx", u"Canonrockhero", u"Terry Bollinger", u"Homig", u"Andreasegde", u"Mlscdi", u"Matt friedman", u"Mandyjackson", u"RKlassen", u"Mikebevibevi", u"Dowj", u"Topiarydan", u"Mbiraman", u"Kiqass", u"Emgrrr", u"Alozano", u"Kamelot", u"Philopateer", u"Maestro91", u"Ndrwmls10", u"CaptainBlueEye", u"Vinalover", u"Kmilling", u"Do it do it!!", u"Natanaelr", u"Garrettgsell", u"Viswa sn", u"Raj6", u"Shull22", u"Toddbu", u"Greytownman", u"DeimosTheOdd", u"Sasham43", u"Symi81", u"JohnJ80", u"Bananawanker", u"Ezarate", u"Shawn b15", u"Danreitz", u"THED.B.C.(tm)", u"Pfhorrest", u"Vivers", u"Sameth2", u"Cgs4151", u"Sodafunk", u"Panzergoob", u"Bfwe", u"Llamabr", u"Sim card17", u"Vijayta", u"Gerry2009", u"Bbristol", u"Gbilliter", u"Barnabas Gunn", u"Julianneh", u"Tisdawg", u"Gav 2000", u"Yoyo34863", u"Greg Tyler", u"Jcarlock", u"Tsaturo", u"Loadmaster", u"Ckbailey76", u"Rpalin", u"Yaquncheveu", u"Cksniffen", u"Kaloskagatos", u"Vrangforestillinger", u"Sage Callahan", u"Anthonymendoza", u"Autotrophithecus", u"JONASLOVER00", u"Tink8888", u"AusTerrapin", u"Sleep pilot", u"Magic x50", u"Dirgemobile", u"Kashirin", u"Malbanez", u"TridentGumster", u"ScottyXS", u"NathanZ", u"Old Redneck Jokes", u"Bill Bei", u"Werner Berger", u"S.R.A.L.L.kid", u"Weejock", u"Nivekev", u"Janschween", u"Theradioguy", u"PositivA7", u"242424s", u"PDSpicer", u"Awtawerawet", u"Yelsaw", u"MarkRoberts", u"Ckomnick", u"Hahaloser103", u"Reap\u042dr", u"Wayne InSane (Of RTC)", u"Mwoodward3", u"Bourko", u"Emdobrowolski", u"Gmalcott", u"Kunzangyabyum", u"Ksnow", u"Eggeggeggegg", u"GB12", u"Aditreeslime", u"Joshamey1990", u"Create00", u"Wordboy3nc", u"Racethecat24", u"Squid99word", u"Bakery2k", u"Bne2", u"Aoikumo", u"PakkaPunekar", u"Goldkingtut5", u"Giordano Giordani", u"Gettyhouse", u"Wikiport", u"BONNUIT", u"Zacornelius", u"Rgsmoker", u"CerveloGuy", u"Adhewoo", u"Dropzone", u"Stephan735i", u"C12H22O11", u"TheBambooForest", u"LandonJaeger", u"Advait srinivas", u"GoTaS", u"Superman9875605", u"12345kat6789natv03", u"Rbertsche", u"Genstab", u"Shonedeep", u"\u6f58\u4f69\u73e0\u662f\u5049\u5927\u7684\u8d8a\u5357\u570b\u7236", u"Roto13", u"Cuberhobo", u"Yakob", u"Btg911", u"Zetko", u"Omnibus progression", u"Chriskrowan", u"Ketanof92", u"Moneymike111", u"Cozolins", u"Foula", u"Foscoe", u"Wibes91", u"Kenmckinley", u"Rejoyv", u"Shiffa", u"MikeYates", u"Joff", u"DracoMalfoy", u"Nowyouseeit", u"Clu3", u"Gabriel7", u"Titanmyth88", u"Tomginzedbu16", u"Zblog123", u"Michaeljbroderick", u"Poizond13", u"Shadowdemon99999", u"Mls1212", u"High School Musical909", u"Yewenyi", u"Aftertaste", u"Kwakob", u"Frontera2", u"Gandalf360", u"Mersmith", u"Salcey", u"Jimmyfresno", u"Escobar.bs", u"DetlevSchm", u"Drentu", u"Cinephiliac", u"MrSzAcEfRoN12321", u"SharIndie", u"RoadieDalton", u"Wesorshoski", u"Chaladoor", u"Finchbook01", u"Unormal", u"Malak5300", u"Andreas DE", u"JDLarsen", u"Jasonloban", u"Sarahgal", u"Dantheman0056", u"Jonnny", u"Kunalkohli2006", u"Andreb bent", u"Esiegel76", u"Maddog Battie", u"Sigurd72", u"EdogawaKND", u"Betterthanyouare", u"Chrisvatter", u"Beatroot&vegetables", u"Khanartist", u"Zhinker", u"Surabhi12", u"Kronik overkill", u"Partwraith", u"Toadette Fan", u"Benjamin Lowe", u"Major weeks", u"DigitalDaiquiri", u"Baxter0", u"Megedeath", u"LietKynes", u"Sarah.online", u"Bayankaran", u"Jokesaside", u"Cyberstrike2000x", u"Dleonard", u"Elf-friend", u"MageKing17", u"Swapons", u"Donkdaegu", u"Ajay30", u"AlmightyZoidberg", u"Phinfan84c", u"Qweqwewert", u"Dcabirac", u"Bdcurrier", u"Gilnyc", u"V-B 9", u"Cu1138", u"Lronhubbard234", u"Stupideuan", u"Psa0", u"Starvinho", u"Sutdog", u"Jimic be", u"Geosammie", u"Tunguska.", u"Anish22 03", u"Dedalus111", u"Pat Silver-Lasky", u"Varwen", u"Wrcase", u"Jimmyjames507", u"Chaoschickenfoo", u"Lemuel.africa", u"Koi", u"Cmirescu", u"Adazka", u"TFunk", u"Mjpresson", u"Tadmichael", u"Six One Seven", u"Khoonirobo", u"Jabishriki", u"Jo Juan", u"Novemberfront", u"Spektralex", u"Jawshoeaw", u"Al3191", u"Nietecza", u"Charney", u"Fatoil2000", u"BrianRecchia", u"Kevinmcgill", u"Kazjako", u"DianaLeeG", u"Peplz", u"Geezer's Den", u"Daveyb420", u"TylerMcBride", u"Placerinrome", u"Pmm103", u"Para45", u"Creativetoo", u"Kapric3", u"Precis", u"Snoroof58", u"ProperlyRaised", u"WandererNoLonger", u"KarateKickin", u"Destard", u"Hephaestos le Bancal", u"Herbertdeborba", u"Wetlab", u"Blondjamesblond", u"Monk3", u"Modi", u"TamariskBooks", u"Redskinsfan1", u"Klugg", u"Charliehall", u"Aron123456789", u"Mel Sharples", u"Darren Mulligan", u"Tuntable", u"Petr Pakandl", u"Crs2117", u"DaveHepler08", u"Maloseri", u"Zemindar", u"Oopz i did it agin", u"KLSSLK", u"Maddi8", u"Ogger13", u"Killthemusic", u"Khamfphiogne", u"Hockeyguy08", u"Clearessence", u"Adude9000", u"Ewinchel", u"Cyrus1978", u"Popesco Rosenfeld", u"EverybodyFits Studio", u"JasonBourne2007", u"Alearnedperson", u"KristinaR", u"Undoubtedly0", u"KyleKlaasen", u"Doug22123", u"Trecypascual", u"Flip135", u"Undercoverblackman", u"Aattwood", u"Muisee", u"Rentwa", u"Ictmanager", u"Reggieblue", u"Dhbt", u"Michaelzhao", u"Nityalila", u"Livid125", u"E-manuhell", u"Nemle22", u"Faye Marina", u"Aethelflad", u"Kenasuta", u"Meowmeow123y321", u"Signifier", u"PeterBarrett420", u"Guitarist56", u"Matonen", u"Mj2035", u"Luckybeaches", u"Marccc", u"Perjean27", u"Bsnyder02", u"Vkalog", u"Kairishot", u"Jyanong", u"Crazy-yak", u"Bigpeteb", u"Lotty111", u"Tabuuownsall132", u"Dzaal", u"Carrie Myers", u"John Wilder", u"Jpzank", u"Ae6rt", u"Monetarycrank", u"Vitilsky", u"Keith.West", u"Telebreaker", u"Rockasurfer", u"Eac83", u"CronScript", u"Cubidoo", u"Cascade2009", u"Seduncan", u"OSU Tulsa", u"Starboardkiss", u"Pieter Kuiper", u"Algabatz", u"Disfiguredfrolife", u"Makeyourself", u"Hentrain", u"Drivera", u"Dutchartlover", u"Pierson's Puppeteer", u"Mrtharrison", u"Markere06", u"WordsExpert", u"Lancashirelad58", u"Maheshbmw", u"Aboroumand", u"Dolgoruky", u"Willy-os", u"Roman12345", u"Crumbsucker", u"Tararagone", u"Xdemonhunterx", u"Itllbealrightinthemornin", u"Adrger", u"Manatee123", u"Poshfc", u"Usang", u"Mndz85", u"Manux", u"Sammaine89", u"Mjlarochelle", u"Podzach", u"Wow88", u"Isabella21", u"Djy9302", u"Fathead55", u"Reyortsed", u"Lw96jph", u"Pcy623", u"Fff123", u"IgarTheTerrible", u"Navid Abbas", u"Outlaws831", u"Jamesmv1921", u"Kniper", u"Samykitty", u"Wootini", u"Prateek.d.reddy", u"Ahmeto", u"Dara", u"Nhl1988", u"Shubhajeet roy", u"Fmjennif", u"Xuxo", u"Msusc", u"Brian Milan", u"Zicoswitek", u"Ksoligon", u"Creax", u"Paloian", u"Regman101", u"Omarthesecound", u"Qhist121jc", u"Suanla", u"Phoenix mo", u"Rez1001", u"Muzza", u"The info seeker", u"ScarabEpic22", u"Odyssey2001ACC", u"GWEN ROBINS", u"HumpyNoRegard24", u"Eagerpee", u"Elicenter", u"Theroachyjay", u"Captain Proton", u"John DakotaKhan Reid", u"Agusta74", u"Sr-rolleyes", u"Skywards", u"Jerde1", u"Sillychild", u"Saturdays when that happened", u"Lilyhammer", u"Maczka0", u"Behomeny", u"Cchipman", u"Pants of Destiny", u"Blionheart", u"Lynneoconnor", u"Cheesypea", u"Paulw99", u"Kwn007", u"GMkyle", u"Hwfun", u"R Barker", u"Gr8moldy", u"Kill123", u"Carmelorocks", u"Prashant Solomon", u"8doczzz1", u"5b3TnY", u"Astion", u"Merabharatmahan", u"Ginger3393", u"Rdmtimp", u"LWist", u"Soviet Guy", u"Peter.kretschmar", u"Jean-Francois Brouttier", u"Tony Clifton", u"Pgerrish", u"User998", u"Ehuru", u"Pmunyuki", u"Waltwik", u"Qwerty1395", u"Thomasss999", u"Marthasvinyard", u"Dartman", u"Culurciello", u"Battlewiz88", u"Jsalims80", u"Zope410", u"Romper", u"Vercing", u"Ziatonic", u"Mrhat557", u"Maritsa m", u"Azeemipedia", u"Arwhfli", u"SiLeNc3", u"WSmeyers", u"Gothere", u"Chshoaib", u"Rhyslightning", u"KrisW6", u"Nuutti", u"Carricko", u"KoreanIan", u"Johndvincent", u"Xxxjackass03", u"Chameleon1991", u"Statlearn", u"Davisedit", u"CCGS", u"Ybact", u"Seanty", u"Slipknot Fan", u"Ibrahim Abu Iqsa", u"Taofiknasan", u"MattDell29", u"Rckstr4lfe", u"Jgamekeeper", u"Stellar Loussier", u"Magillacuttyov", u"Schofieldman", u"Jtda", u"JohnFrancis718", u"Geb80", u"Jord84", u"Thestudier", u"Eliebellie", u"Efanning", u"Helpmecomeon", u"Sammym1997", u"Ozcammo", u"Jesse Magana", u"Umbertotf", u"Ladislav the Posthumous", u"Nilsemann", u"Deb0824", u"Ownyzall", u"GT7Bassman4JC", u"Mokele", u"Assassin of Joy", u"Writingace", u"Youstolemykill", u"Xkmail", u"Tachy", u"Btl36", u"Popcornkid2", u"MichalKotowski", u"Agustingarcia1992", u"Campesino", u"JohnDavidson", u"ShockerHelp", u"Ong saluri", u"Anon-Asset", u"ACTfilms", u"Nifflerbottom", u"Savior1974", u"Palermo1114", u"Renegate", u"Eazye85", u"Mirughaz", u"Marshall395", u"Peter adamsonp", u"Manny!-Freakin-Fresh!", u"Ezsmartads", u"Jrad91", u"Shamuhoe", u"Osbalde", u"CENTAUREAMONTANA", u"Joshuas88", u"Mab57", u"Semio7", u"Nevermind2", u"Henz1771", u"Barstool prof", u"Frances4skurdt", u"Susan White", u"Pauljg", u"PieMan.EXE", u"Sabra36", u"Shelluz", u"Janewellsmier", u"Cmureddu", u"Mlamme", u"BruceAlexAdams", u"Sweeper tamonten", u"Ulipop", u"Pramodi", u"Tit1912", u"Renbelcher", u"SAS87", u"Slaytanicslaughter", u"Brittkillian", u"Charlie fng", u"WernerVau", u"Jammin2285", u"SSA Bbhandari", u"Nygirl410027", u"Clavering", u"Kingreka", u"Mrdk", u"Natisoto15", u"Djcl", u"Mdavis6589", u"Gladtohelp", u"BWC56", u"Bigbluefish", u"Curtains99", u"Amitdotchauhan", u"Jsalem", u"Curtis J. Christianson", u"Anonlurker", u"NupeWD1993", u"Iftikhariqbal", u"Bogey156", u"Fason", u"Cmm33", u"Knevas", u"Smionhudson", u"Hiram J. Hackenbacker", u"Mr.icebreaker", u"Mzmadmike", u"Mikeartis1984", u"Gobookhotels", u"Azmildman", u"Kay4087", u"Tachytank", u"Murdoctor", u"Mr Majik", u"Nicholas0", u"-Cerberus", u"Newportsurfer", u"Samplane123", u"Mfkoxal", u"Ilink72", u"Marqurn", u"Ship69", u"Sophiecook", u"Mewmewpower", u"Eponce007", u"BondMaster", u"Ps0", u"Brainiac36", u"Hainzey", u"Handsonline", u"SMBriscoe", u"Jamesstoker", u"Suprokash", u"Haris Obaid", u"Futbaldc2", u"Glodenox", u"Zinidinezadane", u"Andhraking1", u"Drawnmeets3333", u"Pooresd", u"Furries r retarded", u"Coutinho", u"Petgamer", u"Jonathaneo", u"AlmightyCC", u"Souris-modeste", u"Wertwert", u"Ffac06", u"Kyouteki", u"Itheodbi", u"Berrie Leigh", u"Unit0918", u"LukeBlueFive", u"Arvs", u"KipHansen", u"Mrwhy2k", u"H-aze Mc", u"Leeinslc", u"Moonlight01", u"Scumbum", u"Benroberts", u"Gauravsjbrana", u"Valus reborn", u"KuruPooPoo", u"Melody444", u"Haider Al-Shaikley", u"Wedster", u"Natsugumo", u"Incubus30", u"O00wl", u"Swordfish2", u"Jeked5", u"KWTalk", u"Fryguy1209", u"Ghwatts", u"Jltwom", u"Chm33", u"Chrdit", u"Dennisvillegas", u"Lachlanfahy", u"Spainhower", u"Stewharris", u"Aogouguo", u"Sluox", u"Biophase", u"Escriva", u"Hearts101", u"Pccestudent", u"Demandatoo", u"Comedymail", u"Javafueled", u"Moxp", u"Calvination", u"Pierre Bailey", u"Thelliyan5", u"Lregelson", u"JMPowers", u"Rockstartrev", u"Butapest", u"Reasteh", u"Canuklehead", u"Mogul761", u"Joako60", u"Bearcat", u"Ginger123", u"Ben101", u"Lindisfarnelibrary", u"Rocket10", u"JoshuaMusic69", u"Vrac", u"ElLinuxPenguin", u"Jamie1743", u"Bloodratespeno is", u"Lollipop Lady", u"Akrazyrunner", u"Peterrivington", u"Gnietojr", u"Knotsfalcon", u"RudeGal Ent", u"Sod frostrup", u"Tzpnfl", u"VanyaElda", u"Crispydonuts", u"Pokemong", u"VideoPhonic", u"Logan074", u"Lfrumkin", u"JAFFA", u"Boyscout61", u"SMP0328", u"Aaron weaver", u"1108569a", u"Masello", u"Nick tchaikovsky", u"Davekpeck", u"Grebstad", u"Smarker", u"SWeber", u"Culhanem", u"Ale-brivio", u"OK1900", u"Korvergirl", u"Epsteinj", u"Marjory-Stuart-Baxtor", u"Ahaw1983", u"MajorRogers", u"Somoo", u"Shaunakg", u"Aldo25983", u"Leukaemiaresearch", u"Acceleration0", u"Jelly scorpio", u"Apintofbitterplease", u"Yamavu", u"Mikemartine", u"JMurdoch2", u"Funnytug", u"Jrapoport", u"Sofarkingfast", u"Unknowntwelfth", u"Furlock", u"Tisbonus", u"Mashoo", u"RobertsonR9", u"SPQR58", u"Wangjing13", u"Mpalais", u"MonicaM", u"Bhiz07", u"Billiiiul", u"Steven91", u"Kirklennon", u"Under22Entreprenuer", u"Rhys245", u"BSacknoosin", u"0702034", u"Weirdlookindude", u"Rubendenunes", u"Swisstony101", u"NewRisingSun", u"Kennedy78", u"Jkuhner", u"Aldrinkevin", u"Felixstrange", u"Hikoya", u"Farmer88", u"Hartonoilham", u"MEDCA", u"Blakkatt11", u"Tpilkati", u"Krcrook", u"Stasi2", u"JBOSU2008", u"RemeberTitans", u"The Saturday Boy", u"H2d2", u"Yousaf.john", u"Keenan Pepper", u"Earthmonkey69", u"Slipzen", u"Ellienugget7", u"Azbassist74", u"Arced", u"Rooroo22", u"Terrestrium", u"BendoverRWDB", u"Bdewaele", u"Shadowfaxes", u"Redlock", u"Darilisgm", u"Laberzs", u"Mokoniki", u"Livinginhaidian", u"\u5de8\u4eba", u"GoOdCoNtEnT", u"Yasirhussain", u"Funnyguy88", u"Lou mann", u"LaBohemienne", u"Omgtrident", u"Letsdrum", u"Supergunslinger", u"Zxombie", u"Elvis25", u"Gaslucky", u"Benslippy", u"Darkman042", u"Jamie2k9", u"Rpap", u"Rekabis", u"Shaunj4", u"Typogr", u"Robingeorge", u"Love Kiensvay", u"Alchemillamollis", u"Bjijbji", u"Rfede2", u"Jazzbay", u"Irguywhogont33chul3sson", u"Rahulgbe", u"Rami25canada", u"Beadbs", u"Gaggarkartik", u"Jimtaip", u"Ngod23", u"Qatari 88", u"Do the pop", u"Dev d13", u"Beldenben", u"Uterus 77", u"Deany12345", u"Andymcglynn", u"Wargirl", u"Whysoseriouss", u"The Source", u"Help4IT", u"Sttvibio", u"Proof32", u"Pozzix", u"Edward 75", u"DeviantMan", u"Tanya Hawthorne", u"Qala000", u"Feezo", u"Mightyboosh", u"Pgvolff", u"Nakedbatman", u"Ahava-tova", u"Sbacle", u"Pietre-stones", u"RaeBigBoy13", u"SnakeDerek25", u"Glennpagalan", u"Abdul Muntaqim", u"Jinlian", u"Boxter77", u"Suman malepati", u"Carl580", u"MEOW", u"Zach Oldenburg", u"Miles316", u"Batman66", u"Depressed bubblez", u"Coryp", u"Nathan Curley", u"Silenttunes", u"Kheg k", u"EgbertW", u"Sprint76", u"Versasovantare", u"Lafayettespring", u"KittyHawker", u"Deleonhectorm", u"Outofthebox", u"Cjm5150", u"Bignasty2013", u"Comrade Svensk", u"DirtY iCE", u"Kylelovesyou", u"Chbook", u"Kareem297", u"Son of Cicero", u"SleepysInc", u"Darrenhf", u"Cowboy456", u"Sahaskatta", u"Sujan39", u"Minesweeper8383", u"NintendoWii99", u"General ED", u"Baller4life2468", u"Defchris", u"KumarFilo", u"Bruabf", u"Oxfordoaks", u"Urythmic", u"Weaselboy246", u"Prettyinpink2121", u"Scorpe51", u"Pacas", u"Joeldaalv", u"Watermeloniris", u"Leglicker", u"Moshimoshi52", u"John7000", u"Bandit2001", u"Rapid Fire222", u"Mungey", u"Malabar7", u"Asmkillr323", u"**mech**", u"Ahsanejaz", u"Dabiiigtimers", u"Abuisz", u"JenkemMaker", u"StopCallingMeSurely", u"Guitarherochristopher", u"Geoffdesoye", u"Orbiting89", u"Indian Chronicles", u"Tomboot", u"Leasnam", u"Akhaze", u"Twins312", u"Pbluer", u"Fpsooner", u"Ichigo06", u"KL 01 24 10", u"RaaGoneFishing", u"Mrmonkeyyboi", u"Ferozebabu", u"Racingfanq", u"Oswaldnovember", u"XDanthony", u"Pramodgorur", u"L kubo designer", u"Rynokins", u"ZakBioligy2", u"Dorko", u"Jasonpietz", u"Johnnykeung", u"E-century", u"PockBot", u"Sandboxer", u"Felippino", u"Sean1027", u"Lakelodge", u"Pooh princess", u"Cstavinoha", u"Manlady1114", u"Air2blaze", u"BhavanaLover", u"Ridgeroy", u"Preijnen", u"Autoweaver", u"Davidjohnson314", u"Thomastankboys", u"Mike westin", u"Cpt.JohnSheridan", u"Mctxn", u"Cucklebuckle", u"Lcox09", u"Rajbirbisht1", u"Ar57", u"Adrenocort33", u"OK-RD", u"Rakkianji", u"Prince Rhubarb", u"Horoshi1820", u"12ticeb", u"Connorpistons", u"Mazi.ahmadi", u"Rashanir", u"Supermeshi", u"Ryanhough", u"Sweetman", u"Adamccl", u"Janey monroe", u"Heatherhaynes", u"RonJon1", u"NoNameIst", u"Rumel1988", u"DaveB205", u"Heckmanxc", u"Rlewkowski", u"Salamipete", u"Everettvlai", u"Dandastar", u"Khuntien Ngin", u"Ponkatron", u"Juliemcfarlandisgay", u"A305w", u"Maketablebetter", u"Weirdo81622", u"Cubesquared", u"Jamiesus", u"[email protected]", u"YouRang?", u"Dr.JDPB", u"HBC archive builderbot", u"From Venice", u"Mrgainesville", u"Andydmorrissey", u"El Macaracachimba", u"Kaijan", u"Eldaverino", u"Morganommer", u"Thornder", u"Dmyhrer", u"UMLG", u"Sam K Mirth", u"NMS", u"Theforager", u"Rodingaugh", u"Op27no1", u"Ken shoryuken", u"Youfightthepowertoparty", u"L.Wadsworth", u"Darbiedoll78", u"KannD86", u"Revlob", u"Coolidge1982", u"Fawenna mia", u"Chicagotom31", u"Mike3007", u"Dieter 3", u"Ehiga", u"AyKayKay47", u"Yangd88", u"Lucky meryl", u"PureJadeKid", u"JJx2", u"FilmFan69", u"Cloudguitar", u"Ruannabanana", u"Malik2day", u"Noderose", u"Kchase02vold", u"Blueeyedjedi1", u"Mi9cal", u"Yellow Element", u"Natanc", u"Jnobbs21", u"Lwtaylor", u"Qorilla", u"Special Agent", u"Nbonaddio", u"Acontrada", u"Subcultureuk", u"Wartytoad", u"SotirisVa", u"Nat ons", u"Skykh", u"Ahrgoms", u"TowelieSaysBringATowel", u"SeaTacWikier2009", u"Jlgparker", u"Esqa", u"Goleafs", u"JCTribe", u"Anubis2591", u"Ayataf", u"AdamG-PhotoShare", u"Mandyvtz", u"Robert cruickshank", u"Lukehounsome", u"Apilok", u"HJensen", u"Flatbush52-1", u"Jsw110mb", u"LyleBarrere", u"NegativeNed", u"Arjes", u"Siggsterations", u"Ejwma", u"Punning", u"Hellohowareyoudoing", u"BrightGreenGoth", u"\u30a6\u30a3\u30ad\u30d4\u30fc\u30c7\u30a3\u30a2\u30f3", u"Saioazubi", u"Finavon", u"Charmaingel", u"Picori56", u"Oxryly", u"Kobasiukg7", u"Scott1221", u"Typedelay", u"Vaxine19", u"Gauthamgod", u"Coldforgedfabrications", u"Sinaerem", u"Papaya maya1234567890987654321", u"Bobfiasco", u"Armand85", u"Tbst186", u"Jvijji", u"Koala man", u"LER223", u"Thomas-evl", u"AnaniasKun", u"Shallerking", u"Martc123", u"Plafargue", u"Darulz4me", u"Ketsuo", u"Goosebumptheooze", u"Jaimealejandro", u"BoboboNarutofangirl", u"Dpindustries", u"Dnklu", u"49oxen", u"JB1956", u"Ratakat", u"Ajifocus", u"Mkbean", u"Elcidia", u"Ufc99", u"Oshawapilot", u"Jmofro", u"Jbk12385", u"Zhanglj", u"Tumos9423", u"Libent", u"Dancecaster", u"Nishisuzu", u"Ybakos", u"Kukka jp", u"Crookedzero", u"WSpaceport", u"Lakashvin", u"Marshallbrown", u"Jeffwang16", u"ShowNoMercy", u"Nanucki", u"Randomhero78", u"Joelolympics", u"Pabloramirezjdp", u"Secretmist", u"IsWayneBradygonnahavetosmackabitch115", u"Qak10", u"Ronson Avilar", u"FreelandC", u"Oldbull210008", u"Jryan86", u"Calilover", u"96byrnemo1", u"Wfbor", u"Vineethcm", u"FrozenTundraBlizzard", u"Balardyblah", u"Jchriscampbell", u"Syntheticalconnections", u"Rolf-Peter Wille", u"Wigy", u"The Punk", u"Nana Higuchi", u"The smilodon", u"Jsb321", u"Mattijsvandelden", u"Deselms", u"Adanmartinezmjc", u"Stephenwood", u"Urbanzimbabwe", u"Ameerpaul", u"Metal link16", u"Jamestown28", u"Deutsch Fetisch", u"Qabala", u"Charlesaperrone", u"Jayzswifey", u"Sai3711", u"Garetgarrett", u"Rock810", u"Backwalker", u"Voske", u"Geobranding", u"Kimmysia", u"BigFatFartInTheFace", u"Crispytreat", u"Kewllewk", u"NMUfinance", u"Gabular", u"Pietrosperoni", u"Mingle324", u"Komisaroff", u"Surajbhatt 343", u"Chapter1", u"ThomasR", u"The Beatles Fan", u"CerberusAlpha", u"Flowingwords", u"KenGirard", u"Enterthesound", u"Noske", u"Fangfufu", u"Jhaiduce", u"KhaTzek", u"Dargen", u"Nico santoso", u"Paparai", u"Tashan Adams (Tay)", u"Masanook", u"Wyclif", u"HTHS1981", u"Uptonatom", u"Suhmeister", u"Prsitter", u"Antique cuckhoo clock", u"Nexxia", u"Allthewayaround13332", u"A sexy twat", u"Blacksteel1234", u"Eekernohan", u"Bcrao 17", u"SuLeimanAlhAkim", u"DaveWalley", u"Af reno911", u"Oreolove", u"Sprungl", u"Sid1138", u"Homie C", u"Mrflibble", u"Harlem225", u"Amendedj", u"GoodwinC", u"Nelsonljohnson", u"Judithsq", u"Index1", u"Nctalkinghead", u"Cloak3000", u"Frghbynju5ki7gtrhyju7i6t5grf4g56u7r5gtf4e24", u"Drakon09", u"DanDrage", u"Gdlpcp", u"ShowWeb", u"Athens45701", u"Larrybird789", u"Muchosbadgieros", u"Helen 2806", u"Runner404", u"Tankmac", u"Caruperto", u"Checko", u"Omhafeieio", u"Jordan Xord", u"Kinghoodia", u"Flausch", u"Lprideux", u"Kaos 42", u"Tokba88", u"Markal", u"Spudd1892", u"Scornfield", u"Hooderdak", u"Django5", u"Hermione12", u"Frank8999", u"Prothro", u"KORAD", u"Context Media", u"Monty-the-montster", u"Couperman", u"TequilaShot", u"Potatis invalido", u"Grabo112", u"Spikespeigel42", u"Hayabusa future", u"AndrewCrown", u"Cunnety", u"Projectmayhem44", u"Dhanu1612", u"Jayp710", u"Doug20r", u"Dadan121", u"TSR-eye", u"Jaguara", u"Xggi", u"Ashlar", u"HPotterLover7687", u"Wikiterry", u"Gaedheal", u"Medovina", u"Boeseben", u"Kangorilaphant", u"Brad the Producer", u"Warbler271", u"Jammurph", u"Doctorbri", u"Got more milk inc", u"Aellin123", u"MarianKroy", u"Csaw", u"Blue-Forest", u"Herreros10000", u"Squashstring", u"Itayrosenfeld", u"Kfk5927", u"SouthParkGirl", u"Cosmopolitancats", u"Wtfhaxnao", u"John Peter Hall", u"GypsyMorrison", u"Sautrelle", u"Daystarcap", u"Jesse.chia", u"Svetka19", u"Finishim", u"D34thsp4wn", u"Piotr433", u"Babs12345", u"Pubkjre", u"Rogersanchez", u"MusicGirl2132", u"ShogunMaximus", u"Tofukungfu", u"Strcprst", u"Kyle020493", u"Niraj dwdi", u"Peeter.joot", u"Storet", u"Doubleplusjeff", u"Slayher", u"Berrdatherrd", u"Adhem", u"Ceotty", u"QuetschJL", u"Doorshear", u"ToughLuckMissouri", u"TrueSquall", u"Phresno", u"Jumpinjahoda", u"AlasdairJohnstone", u"Lctngo0227402", u"Natinsley", u"Campy456", u"TheVoiceWr", u"Amichels", u"Arturo Rodas", u"Sutechan", u"Strike1866", u"SHadowkillaz", u"Genojin", u"Boxofapes", u"Illyassuttar", u"Kurkoe", u"PetrosGreek", u"Carmean", u"CooLpro", u"BillyBobPedant", u"Orfdorf", u"Johnkershaw", u"GUN SLIGGER", u"Elpasobooks", u"Jfhoup", u"LotusXP", u"Monkey191", u"MisterBootles", u"Balajilx", u"1Truth111", u"The Crunchy Nutter", u"Philip.wittamore", u"Letthegoodtimesroll", u"Smartinson", u"Greenvoid", u"Vanderdutch", u"07andrew", u"Zyanwk789", u"GeekAngel", u"Herbertsun", u"Airelon", u"Wonchop", u"Merseysidebuses", u"KDebelak", u"Mrjamieball", u"Sundayrain", u"Weridfairy", u"Flickts", u"Burnman", u"Brendon5", u"Tumbleweedjoe", u"Mayhemizer", u"St Cyrill", u"Ts.prasanna", u"Wiwoc.info", u"ShowShow69", u"Wduohfeof", u"Lucas0483", u"Apostrophe", u"Matt9311", u"Ramanvirk", u"Dj Breaker", u"Georgenguyen92", u"Gereby", u"Snipefreak", u"GustoBLSJP", u"ShapurAriani", u"V-Man737", u"Skylark of Space", u"Jrad92", u"Pie Man", u"Joost.b", u"Woofydog", u"Axlaiden", u"ManishR", u"Pipster2", u"Ecis", u"MajorHazord", u"Itsezas123", u"TexasHardOak", u"Imarksmith", u"Cane01", u"Howardroarke", u"Arameus", u"Jason S. Chang", u"NashCarey", u"Urbanative", u"Ahmni", u"Breath33", u"Jamie002", u"HonestArry", u"DrAlvi", u"Bjklein", u"Wphamilton", u"Klaas van Aarsen", u"Shambhushivananda", u"DaveWFarthing", u"Argus Panoptes", u"Ckhandy", u"Yonesuck", u"Wharrison", u"Ricemom2010", u"Montysdouble", u"Rectalhealth", u"13thchild", u"Geo2geo", u"Robinsuz", u"Razza123", u"HibeeJibee", u"Tuvas", u"Jomion1", u"SwissM", u"Ls110858", u"Sansa512", u"MikeAltieri", u"Galen Marek", u"Sploonie", u"Texas93", u"MentalHealthProf", u"Shatenjager", u"Skarican", u"Izenn", u"Ipwidgely", u"SchoobieDoobieDo", u"Drrahil", u"Palmtree81", u"WMA33", u"Czaplowski", u"Ballaban", u"SPORE\u2122", u"Ishna", u"Maruf1", u"JusticeForICANNsVictims", u"Redsub", u"Stphen treat", u"FrugalFooker", u"Roguescribner", u"Racerx0027", u"Joel7687", u"Kthoma3", u"Hello55", u"Efair", u"Drosev", u"Qurozon", u"Bamberio", u"Jeremski", u"Dmerrill", u"Johnnigel", u"Geropod", u"47timmy", u"Pimpachu", u"Imstillhere", u"Toniroxxx", u"D! Studio", u"Shawnbranch", u"Megaconvention", u"Mleder", u"Spuddy 17", u"Guy.with.the.hat", u"Roguemaster83", u"Terence75", u"Entropy9", u"Sarmadhassan", u"Amx390", u"Bombardier PTI", u"Wayfool", u"Saxsa", u"Eloc Jcg", u"Chege waiyego", u"Fullmaster", u"Tranchera", u"Curtis.Stiles", u"Progressdetroit", u"Fokken 21 sasco", u"Duesentrieb", u"Eli77e", u"Independent victorian schools", u"Angelesguavi", u"Roberthilley", u"Ucrc multimedia", u"Emba02", u"RedIsaac", u"Winmussa", u"Tritrime", u"Zbaloca", u"Topspin87", u"Postlewaight", u"Zero705", u"Acg768", u"Depaultivo", u"Voremaster", u"Naep1", u"Mark im wiki", u"Pete173", u"Brooklyn5", u"Matth i", u"Brogan50", u"Td1racer", u"Eventuallyparallel", u"MusicianOutletNetwork", u"Smishek", u"Fathersoc", u"Croatianpimp15", u"Verainia", u"Hffman", u"Vogels", u"Tylerramos", u"TwilightDusk", u"Danteorange2012", u"Bkwatson23", u"Naveenkumarcm", u"Kitty katt woman", u"Carter88", u"Xxrogerxx", u"Akaiiribbon", u"JimCorrigan", u"Epsilonsa", u"JLThorpe", u"Ellenbrown", u"Toolingu", u"Finchef", u"MugenSeiRyuu", u"Marko Levy", u"Gregiscool14", u"ChristopherCaufield", u"Peteycannon", u"Megalon152", u"AlexaHughes", u"Loresayer", u"Spammy41", u"Spt8719", u"ITPerforms", u"Rajpoot.vivek", u"Justcop3", u"Xycl0ne", u"Lemmio", u"Tucansam420", u"Rfelten", u"Silver95280", u"Laoper", u"Ricksy", u"Mjunnior", u"Takeshimcaphee", u"Pholower", u"WikiAdminTheOneAndOnly", u"Deffydeffydeffy", u"Chuckstudios", u"Dshallard", u"Ampermc", u"Vsenderov", u"MayaSimFan", u"Leahcim512", u"Galista", u"Trose58", u"Neojeran", u"Erbille", u"Hanrahan", u"Zogi91", u"Deadlink", u"Dark droid", u"Ciaran500", u"BAPHughes", u"CorkenutPorter", u"GeoTe", u"Yocsilva", u"Joshnaylor", u"Ramiromasters", u"Holisticreligion", u"Aniruddha joshi83", u"XpsiGunslingerx", u"Glerf", u"Nhoangton", u"Heraldic", u"Phorner87", u"Kushiban", u"1 Cent In Mind", u"Hossein.ir", u"Dr. Who", u"Jermy", u"Bandco", u"Omicron84", u"Congi77", u"Bballer21212121", u"CJJC", u"ForzaJuve4ever", u"Piotrek00", u"Timmytime69", u"Rocio Nadat", u"MarzieK", u"Seventimesseven", u"Achirpertieu", u"Mailman9", u"Roger Hui", u"Colin", u"Neumann", u"Arunrajsekhar", u"Oleg Kikta", u"Marmar98", u"Isak48", u"Ustad24", u"A300st", u"MrLeeloo", u"Wallerina", u"Hurleyman", u"LizCorpse", u"Rimbanna", u"Hugh Kenrick", u"Lastexpofan", u"KAdler", u"RPD12", u"Gergivt", u"Freedomtroll", u"Ibusis1", u"Spewin", u"Horsewithnoname", u"Migssant19", u"Mark114", u"Apex Rising", u"Boney dude56", u"Lemaymd", u"THE ACT FRANCiS!", u"Yeokaiwei", u"Chhoro", u"DrHot", u"Skabbo", u"Dkram3", u"Fenlick", u"Bottles98", u"Mmukav", u"Barrington Womble", u"Boalt2761", u"Ollieman123", u"Lviv1256", u"Alex.K.NY", u"Amithaba10", u"Y Ville", u"Nicklovesgold", u"King of gold", u"SaveThePoint", u"Zoixatlua", u"Gazer", u"Pitt the younger", u"PrisonBreakguy", u"Swimbball14", u"Jill Hemphill", u"Lvbd", u"Potatoscone", u"DreadPirateBloom", u"Morebanter", u"Udipta", u"Reaper Eternal", u"Nicholas Ian B", u"Henmor", u"Koujin Sato", u"Rook3000", u"JohnTramm", u"VCA", u"Annoythefish", u"TeamZissou", u"Shilpavarma15", u"Vuvox", u"MrFawwaz", u"Benwd", u"It3986", u"Sescaugust", u"StephenHudson", u"Sir John Richardson", u"Suzaku Medli", u"Nickellis", u"Waffly", u"Andy1876", u"Justinduff", u"Bellisan", u"Okayka44", u"Gooper78", u"Jdemillo", u"Anonz3", u"CaliLincoln", u"Aminneal", u"The oldtimer", u"Sapphicqueen", u"Rubberpi1415", u"Dr V", u"Ryiinn", u"Mxcnidiot625", u"Track1200", u"Avpmechman", u"Nrsinn", u"Seawatch", u"WarpstarRider", u"Pagecreator11", u"Allen222", u"Pinkchaddis", u"Fasttrax", u"RamBow", u"WinBish40", u"Lithiumflower", u"Mover85", u"Agnarb", u"EliasRizkallah", u"Nambitiger", u"Armymp", u"William Mostmans", u"Curiousinsc", u"Newzild", u"Witzulu", u"AlenWatters", u"Wesino", u"Jugga", u"Tarun88", u"Vachelist", u"Squid tamer", u"Aks khandelwal", u"General Lighting", u"Jacekmw", u"Vulturejoe", u"Z-man888", u"DHR", u"Celebrei", u"Cus.moritz", u"Ubikman", u"Petesamson", u"Tessavee", u"Innov8tor", u"Micah hawkins", u"Kronermark", u"Cassie1944", u"Chamness", u"Adalger", u"Kiki360", u"Mvanderw", u"Dark10prince", u"Knight418", u"BaconFat", u"Twiggaland", u"Mental monk", u"Armilos", u"William Wollis", u"D.wine", u"Stewart222", u"Thedazinator91", u"Shadyville ent", u"Miyawaki", u"Failure, Duke Of Hull", u"Emasengil", u"PhysRevB", u"Monkeykillzbanana", u"Mattyboi2k7", u"Trisdee", u"Zozoa", u"Jj2k9", u"Ouch", u"Dukefan73", u"Morris the PIank", u"Bwilliamson54", u"UltraCaution", u"Faerra", u"Bob2222", u"Front of House Gary Howard", u"JoaoRicardo", u"On the block", u"TobiasK", u"Liquidcat", u"MSanchez252", u"Miomiomio", u"Kashwaa", u"Primal400", u"Cashcraft", u"Baritone10", u"Andi1235", u"Thazzy", u"Balti kisserfish", u"Majordomo41", u"Sweeney09", u"Renatab1977", u"TLPA2004", u"Nasch", u"Fireantman", u"Kozzz", u"M.Levieva", u"Mlhooten", u"Tonybarrese", u"Pnerger", u"TechsArkana", u"PoeticX", u"Paf92", u"Blather", u"Camtang", u"XN", u"Jesper Carlstrom", u"Oyeguru", u"Ana Hilinsky", u"Capsfan100", u"Shinobi558", u"Ipso99", u"Leestark2630", u"Apexirons", u"HunterST420", u"My opinion 9", u"TV Tony", u"Octi", u"Jemsan", u"Palestinianolives", u"Quadra", u"Felix 12 22", u"MnoruS", u"Kittysmith123", u"Sniderman11", u"Travelbed", u"RussPorter", u"Wizzair", u"Earlyoptions", u"Klutzulmaniack", u"Pookiekook", u"ThG", u"R2w08", u"TimQuinn", u"Albert Einstien's ghost", u"Andrea Ambrosio", u"Ianlucraft", u"Andyroo316", u"Monkeycheetah", u"Ianml", u"The Lesser Merlin", u"Zeinounawwad", u"Amcd79", u"Basvb", u"Philturdus", u"BenGGriffin", u"Zrusilla", u"[email protected]", u"Vanbelle.t", u"Heffa105", u"Colonel Bask", u"Ddddp", u"Keysersose", u"Markkellner", u"NorseStar", u"Lalapoem", u"Tanbitch", u"Oyster Head", u"Wimimei", u"Johnhenney", u"Maxwoods", u"Diorboi", u"SPUJ", u"Scewing", u"Cberth", u"Bobwhitney", u"Rizzss", u"Piletavacia", u"Sfajacks", u"PoolboyXReturns", u"Yoxbox", u"Shadesofgrey", u"Sterlingjones", u"Dgritzer", u"Mr.Z-man.sock", u"Imashomer", u"Lc3869-X2481187", u"Tube-dweller", u"Herbs505", u"LeighBCD", u"Venticius", u"Vukeidge", u"Wickman08", u"Trefork", u"Hawkeye1066", u"Nab82ba", u"Thefe2", u"Himoki", u"Bigphancy", u"Niallio93", u"Oraclewiki", u"Hatman22", u"Salehara", u"Alcyone LS-L", u"Mattockswinga", u"CaitlinTheMiller", u"T271", u"Lostguy", u"Masvingonetara", u"Verdecito", u"Peachcakemike", u"Nickfamesrich", u"Fdonck", u"Jzlevine", u"Pandathatpwns", u"KatieNugent", u"Hollisterlover90210", u"Jcub50", u"Manosij.m", u"Kaitoein", u"\u05d0\u05d7\u05d3 \u05e9\u05d7\u05d5\u05e9\u05d1", u"Tony Hill's Fiction", u"Rohanrailkar", u"Ujeanx", u"Solonggoodfaith", u"Steveodwyer", u"Erik E VestBot", u"Wako2", u"Damnbutter", u"Skaderali", u"Cultcinemacritic", u"Lee7733", u"Rankles", u"Almarq", u"Mcgalas", u"Balfabio", u"Jaimecaro", u"Doodle48", u"Brightwife", u"Sarahcola", u"Andynapso", u"Sketchmoose", u"Blakkrussian", u"Clone 9", u"Phonecloth 554", u"CR7", u"The Elders", u"Ampedasia.com", u"Mattebers", u"Patternplay", u"Rezondeck", u"Ed.vasquez", u"Anteojito", u"Johnsonooijunsheng", u"Photomajor", u"Polotet", u"Lucarelli", u"Harley124", u"Marblewonder", u"Yoursvivek", u"Peterandersson", u"Eduardolegato", u"Blessed123", u"XXxBridget KaguyaxXx", u"Jchardcore68", u"KtrROCKS", u"Parthrb", u"XRaySpecks", u"EsonLinji", u"Xander271", u"Wikibsas", u"AdamReed", u"CrispinBurke", u"Palavdin", u"City stoke", u"Jakegn", u"Torridsoup", u"Sashi1215", u"Plis", u"ESF KRIS", u"Alex6219", u"Roman Spinner", u"Sam Chase", u"Davgrig04", u"Say Sal", u"Polin4eto", u"Truthseekwhere", u"Heri Joensen", u"Crouchingtigershiningdragon", u"Duncan A Wood", u"TrumpetManLA", u"Soksan", u"Jblockman", u"Cyberhippy13", u"Oudeis2002", u"Wasserlasser", u"Wishnav", u"Chwixz", u"Hello981", u"Ccc999ccc999", u"Tjuk09", u"4321carla", u"Waterloo86", u"XieChengnuo", u"NursingAJJ", u"Risteristevski", u"Celis", u"Fcbcn", u"Tothemaxxx", u"Wolfiez", u"Wolfgangvenkman", u"Mammu wiki", u"Warchdr7906a", u"Mostfamiliar", u"Thewesker", u"Cypher3c", u"Bobover1", u"Ewholman", u"Indiatoday", u"Grady123", u"Bob123478", u"Mykost", u"Rantanners", u"Barnikel", u"Josephgwilson", u"Rebeldiver334", u"Shutsal", u"Cllrjhumphries", u"Yosupdawg97", u"Nicktate", u"Johnsonjeff2462", u"775500rr", u"Shipluhoque", u"GLstyle", u"Angelofdeath275", u"Sfmustang", u"Invasorfunk", u"Griffithscp", u"LehighValley", u"Wrightbus", u"Romzombie", u"Jcarr99", u"Jim drury", u"ScienceBoffin", u"Guido del Confuso", u"Little bush", u"Yakbasser", u"Lotta Lindgard", u"Constablejohn", u"WikiRedSox", u"Ubaldop", u"Pulsadinura", u"PlasmaRadio", u"Immctony", u"Simen 88", u"Pdyako1", u"Bel.Alena", u"Shawnmark", u"Coolbloke94", u"Allstardude874", u"Shearer99", u"Andrew D White", u"Cominoverdahill", u"AuntFlo", u"Lasweeney", u"Aamair", u"SharylM", u"Uspastpresentwatch09", u"Peter Kaz", u"Heather Denarski", u"Sir Teh", u"Yezzor", u"Publius52", u"JimmydaWorm", u"Karlwinterling", u"Omegin", u"Satyenkb", u"Kaisando", u"Weird Al", u"Ydd2112", u"Mcdonald mdhluli", u"Pennykayetr34", u"Tauwarrior", u"ConservativeBlogger", u"Frobitz", u"Filmlabpotato", u"Matthew.Bohrer", u"Gill Vachon", u"Roflmao2", u"Ostwald", u"Mang01", u"JulieFlute", u"Videoismylife", u"109Eastside", u"3manu3l1lov3", u"Frase344", u"Steven crayn", u"Orobert", u"ROBoFox64", u"Unias", u"Of7271", u"Random Tree", u"Stormfairy64", u"Meerkatlll", u"Arenarax", u"Carpetburn4", u"Archaic d00d", u"Inverse Tiger", u"Idahobeef", u"Elekimante", u"Pongo101", u"Earliac", u"Momotcm13", u"XxGreenDayzxX", u"Calledto", u"Ggjacobsen", u"RRRRowcliff", u"Urallpoofs", u"Seanlanefuller", u"Flops83", u"Alkalada", u"Ahnenerbe", u"Brymor", u"Tom one", u"Irazfan", u"Billhpike", u"LordSneazy", u"Inkki Bookman", u"Glenellyn", u"Lazried57", u"TennFlamecaster", u"RP9", u"Aish2kar", u"Mprulez", u"Arnelgarcesa", u"The Dopefish", u"Tikkiofheart", u"WhispertoTheWind", u"David Clifford Taylor", u"Bookster", u"WWETNA4Life", u"Treefox", u"Jaquilong269", u"Bmimages", u"Camoq", u"Mringkeith", u"Timmurphyjd", u"Tred85", u"Paeisi", u"Cocode", u"Didier Marie", u"Vudujava", u"Charls Andre", u"Poizunus", u"Jamed930", u"JonasLove120", u"Dgm3333", u"Umrain", u"Beachie2k", u"Stephen Thompson", u"Professorteebrook", u"Paolostrat", u"HAL175", u"Kidtaekwondo", u"Sanilsweb", u"Cheerygirl02", u"Sunascythe", u"Xitalianrobotx", u"Oxfaminternational", u"Brynhilde", u"KingReidy", u"Kittycat0143", u"Normanthegnome", u"Simon Webb", u"Aux1496", u"BristolRuss", u"Edelwei\u00dfpiraten", u"Bcm1309", u"Selenagomezfan67", u"Deafcuer", u"One2one", u"J-melz", u"Unwritten 1", u"Kanonkas", u"Cgario", u"Globulinapr1966", u"Devchander", u"Parkywiki", u"Idk1955701485", u"Mayachanbeach", u"Seogeek", u"Glenford", u"Ciprian Dorin Craciun", u"Brad's playground", u"YuLin", u"Manderson71", u"StudsvikUK", u"Computerperseon", u"Beauknows30", u"Irksomekitty", u"Etao77", u"Eastbeverly nsh", u"Elibart51", u"AveOK", u"John Bentley", u"Dunneknow", u"Iamuurme", u"Adrian lopez", u"Alexander 007", u"Carcilinie1005", u"James possamai", u"Kkstrong0", u"Phcheck", u"Epona4", u"Paradox4600", u"AdvancedGamer", u"Colonyrice", u"The13thMan", u"Johlin", u"Rya Min", u"JohnofFDA", u"CatNoir", u"Pranav1833", u"Mayhem80", u"Watchtower2323", u"Jzaltman", u"Billxoxo", u"Geedubber", u"Leedude", u"Onelittleindianrecords", u"Samavastata", u"Yinon", u"Kamensk", u"Tyler10M", u"XMomiji Bunnyx", u"Mjkirk12", u"Davygrvy", u"Ruble3", u"CGCD", u"Fleish", u"MungVacume", u"Clockwork3", u"AcidDJDennis", u"Jedgeco", u"Bearje", u"Ryovercash", u"Shadizilla", u"Mina7", u"Tenorcnj", u"Amareshchandel", u"Jhump5", u"Stop.spam", u"Weapon 10", u"Craftyminion", u"Phoenixwiki", u"Spanmandoo", u"S4119292", u"Andreiacute", u"Harrylister", u"Cocoaverification", u"Jagoffdeluxe", u"Jack Forest", u"Avars", u"Peck123", u"Elijahzz", u"Silverthief2", u"Rappoozko", u"ForeverTemptin", u"A hm156", u"Ferdinand12", u"Darktorres", u"Knuddel8", u"Soccer30", u"Al razibaba7511", u"Rusbuldtra", u"Skeaton", u"Jeffersle", u"Luecaz", u"Banson", u"Stellalala", u"Ronald11", u"Undeundeunde", u"Ic0n0klast", u"Bardagh", u"ChristoferNelson", u"Abrookins2000", u"Davewrites", u"CasBak", u"Ahajali", u"Strwbrrylemon4de", u"Thegiblins", u"MyOcean AFX", u"Quanuum", u"Niemeyerstein en", u"Richardmw", u"Jaguta", u"Pacsafe", u"Haardt", u"Vikes93", u"Krayner89uk", u"Trevelyaninc", u"Ajb4567", u"Shoshanadh", u"Keinlan", u"Cappio", u"Archibald Leitch", u"TobiasMar", u"B.Z.", u"EDDIE.A.WHITE", u"Raynedzel", u"Waterplace08", u"Muccassassina", u"John thompson 456", u"Rahsalumni", u"Indium", u"He Takes A Whiskey Drink He Takes A Cider Drink", u"Jomacajo", u"Chellywonga", u"RLM2007", u"PointedEars", u"RhoBeau86", u"M1j2c3s4", u"Markisevil", u"Hahathemoose", u"Darklightning1", u"Niklas berg", u"Ballinderry", u"MisTQ", u"Feanor Eldarian", u"Kc227", u"Xaxxon", u"Broilerman", u"Jaymanlb", u"Bearhunter1", u"Bumbumjal", u"Sagarvk", u"Jptrash", u"Alb28", u"Sm ashiq", u"2Nimprov", u"Chris66", u"MAKevin", u"Iamnothuman", u"Chubbyfunster", u"Inevitablefate", u"Universaladdress", u"Mnomy88", u"Lichtenauer", u"Keltron", u"Danielbennis", u"Muijzo", u"Gcaw", u"Purple Sprite", u"Judgemug", u"Fanager", u"Cookman", u"PCE", u"CryptographerX", u"Andouble", u"Asianent", u"Rosi Crane", u"Srk1999", u"Tonyharp", u"Slusk", u"Peterbruce01", u"Morselpix", u"Edifei", u"Realaudio2007", u"Robtj966", u"Conroy2007", u"AcademicResearch", u"Brett hunter", u"Rab sb", u"Mmundik", u"Jgateau", u"Mazzard", u"Tuyenvo", u"Desert Prodigy", u"Mray", u"Little Baby King", u"Jason Hughes", u"Tonytml4life", u"The Sceptical Chymist", u"Gaimer555", u"RobofLeeds", u"Duhos", u"OMDxAdept", u"Zoffdino", u"Goatlegs", u"3MP", u"Kimbokat", u"Jpedia", u"Mr.24SevenCrashHolly", u"Satanic tica", u"Andbur", u"Rolo Tamasi", u"Jgunaratne", u"Lmaia", u"JPeach35", u"Dunemonkey", u"Hughveal", u"PopNews1", u"Akk2261", u"Jomaine23", u"JuhoP", u"Ssb9", u"Jauers", u"Vout25", u"Wccaccamise", u"Louie4sublime", u"Onekennedy", u"Scottu87", u"Frank0508", u"Rooner623", u"April victoria", u"Runolfurh", u"Zabadinho", u"Atsac1", u"CapitalLetterBeginning", u"Bibliophile319", u"Emylonas", u"Xchickenx", u"Maj0r Tom", u"Seb-moore", u"Topfacts", u"High Suey Fam", u"Lazloholifeld", u"Whatyouchating", u"RicardoFachada", u"Aerillious", u"Vasileiosg", u"DailyGuy", u"Brian.fsm", u"Fsharpflatmajor", u"Homeworkmachine 415", u"NuNinja", u"Nextinline2009", u"Alfierise", u"Achilles852", u"Gregolma", u"Julien300", u"Valeriavillas", u"Ymlc", u"Bpradhan wiki", u"RpgN", u"Mtoddy", u"Mz.3xclus1ve", u"DavidMichie", u"Vineetparolia", u"Arvinz0", u"Prasannatb", u"SafwatChoudhury", u"Chandman", u"Willster3", u"Yellowroto", u"Dogbate", u"Audiocream", u"Joshwest1", u"Bobby120609", u"Grinchsmate", u"Chickenroyal", u"MIKE300488", u"Ksaraf", u"Janizary", u"GWWalters", u"Wolflairviper", u"Hurmata", u"Mgn3000", u"Mrthemike", u"Roger balmer switzerland", u"TunaEduardo", u"Danzero", u"SheepdogSteve", u"Cricket3536", u"Lerele45", u"Keightlynn", u"Marchelo88", u"Solidkevinx", u"Tim Nicklin", u"Butt50", u"Tarja Lawless", u"Gdavidp1", u"DavidFairbrother", u"A-strange", u"Rv011995", u"Lincolnshire81", u"Margaretilogan", u"Steviemcmanaman", u"Dutchman Schultz", u"Englishcompo", u"Vinnygribbin", u"Rainbowfootie", u"Chetanr", u"Arsareth", u"Stephenjohnson", u"Tom Gardner", u"Nicole8675309", u"Thortrac", u"Pcleary1960", u"Roctobercpt", u"Georgebaily", u"Guido Dimicelli", u"Pitxulin1", u"Adrienzo", u"Dr. Rondart", u"TCHJ3K", u"Wzeqiri", u"Xcomaxgirlx", u"Stormydawn", u"Angah hfz", u"Oakey47", u"Jon-jon-07", u"Danielgem", u"Painterjoe4you", u"Zorn169", u"Spiritflare", u"Lazzaking", u"Cazzyk", u"Jamie Townes", u"Nathanflaig", u"PGIB", u"Ravelinks", u"Jondich", u"Pierre2012", u"JorgeFiora", u"Xx.Simplicity", u"Robert Hansler", u"Watchmasters", u"Ojasb", u"Eastoflabrea", u"Oxalate", u"Nixda", u"Coolo52", u"Hand of Bjesomar", u"Kevinjohnsen", u"Ahmed dawod", u"Seelbach", u"Dextercioby", u"Edwin Hamilton", u"TomYems", u"Justtheinformation", u"Dwees", u"Pdiamond", u"Dellelce", u"Open poppyseed", u"Afg96", u"Shadowfox887", u"Foragenius", u"Jag362343", u"Chris neilson", u"Joelol75", u"Tarpy", u"ToddGamblin", u"Zariane", u"Nickoftime9398", u"Cassius282", u"Emorej75", u"Magiczhang93", u"HumbleAdviser", u"Bhig3", u"Mylesp77", u"Tomatetom", u"ForzaAzzurri75", u"Mwsletten", u"Dpickrel", u"Extra bases", u"Fisher654", u"Miralayan", u"PowerRanger101", u"Layoneil", u"Mdovell", u"Sukhjinder101", u"Partigenary", u"Benjata", u"Pygmypony", u"Pimpindawg", u"Skylark414", u"Robertlee87", u"Satchel Pug", u"Eanc", u"ChargingEve", u"Dafuzzy", u"Sfalkman", u"Naif", u"Jogvanj", u"Haffkineknowsbest", u"Metalignition", u"Quakegod667", u"Theresaduplessis", u"Aivy vu", u"Richsadams", u"Sh000gun", u"Pasci", u"Mirallwood", u"Kurtitski", u"MissB1000", u"Dcb", u"Bongwarrior666", u"Crowhunter", u"Bdorminy", u"Kimchi.sg", u"Blogmum", u"Just1Page", u"Danemaricich", u"Leadpipevigilante", u"Sun Chips", u"Medico Dimamico", u"Yahyayahya", u"Sochitalong1994", u"Wikigod01", u"Lia Star", u"Lordleonusa", u"Jaker561", u"BondedByBlood", u"Andymatthews91", u"RedRobin89", u"Halotroop", u"Eddl", u"Chaoshorrorlord", u"Bldolson", u"FallenAngelsandDemons", u"Tshtob", u"Asdofindia", u"Don-golione", u"Joseph Dwayne", u"Jimmysdms", u"JJN Samuel", u"I fuck cows, moo", u"Wilfred Day", u"Antiochus21", u"Ryan Higgitt", u"Shootingstar", u"Benak007", u"Zaiise", u"Imperfectdan", u"Fallout76", u"Ochkivodki", u"Ibrahim.noor", u"Sung715", u"Moleman 9001", u"SkippyTheWonder", u"Norwhales's tusk", u"Shinji12345678", u"Pseudonym 101", u"Identityandconsulting", u"Jahanna80", u"Pirateman69", u"HectorVK", u"Melcsw", u"Karimi cae", u"Noodlezman", u"Lemi4", u"Jerod209", u"Anung Mwka", u"Joejitsumd", u"Southsidegb", u"James Crippen", u"Mariano.iglesias", u"Tommytom812", u"Abalcar", u"Amfsdfgfyxf", u"Sasukelololol", u"Namsn76", u"Peacham", u"Ashleymacdonald", u"PrivateSniper", u"Ryanblock", u"Musicfan15", u"Owenmarriott", u"Razih", u"Andytopia", u"Richardpaulhall", u"Nandan uk", u"Gunsnroses65", u"HUJINTAOSUCKS", u"Galaxysong", u"Ratz7", u"Wizardboyniga2", u"ChelseaFru", u"Singelik", u"Antmatt", u"Roger C Smart", u"Jagged Fel", u"Fritzdsouza", u"Edwardo69", u"Corinthians12", u"HaloFreak2116", u"MysticGem15", u"Hg6996", u"Vyndel", u"Jkay747", u"Ksandvik", u"Dell970", u"Tlelkins", u"Broken Kid", u"Kwells54", u"Titostacos", u"Hthtoy", u"I am a tree", u"Adrian1171", u"Gruveloper", u"Alexhustle", u"Shrpa01", u"Basket-ball-15", u"CanadianTyro", u"Flaadrom", u"Flane555", u"Andersman", u"Donshanks", u"Maximus Nova", u"Now2blue", u"Mdineenwob", u"Yukisealive", u"FalSkyWolf", u"Vilmos", u"Boxflux", u"Cathyland", u"Sanjayakumarsahu", u"Brody's Ghost", u"Erix15", u"Johnnysast", u"Emixpearl", u"Manchurian Tiger", u"Kriss22", u"Sageerrant", u"Samzo999", u"Chrones00", u"Jimmy70", u"Joshualeefreeman", u"Ilayaraja plk", u"Davidnason", u"NCNH-DAD", u"Kdickson", u"HappyGod", u"Ddr1234", u"Pfenzer", u"Valhalla08", u"Dbuffington", u"Buzzygo", u"Mike Hackney", u"Schvan", u"\u0104\u017euolas", u"Wineboy3", u"Davegreen200", u"N.Noel", u"Oxpaulo", u"Nostradamus1984", u"Yunie971", u"Jasumi", u"Dominatus", u"Mightyafro", u"Toyota6291", u"GaStormy", u"Tommyt", u"Vaividhya", u"Cambridgeartgallery", u"Ahmadbaig2002", u"Firebomb Fritz", u"Athart7", u"Greattits", u"Jcsham", u"JFM110", u"Obelia", u"Jmax-", u"Brcg", u"Bbrute", u"Ptomato", u"Navi83act", u"Zepheus", u"YourNewCEO", u"Whodidyouexpect", u"OriginalPiMan", u"NRZarrugh", u"Mnuber", u"10x10x10", u"Morton dill", u"Mazim82", u"Onesimos", u"TehPhil", u"Ryn823", u"Ellentveit", u"Francespeabody", u"Jmr323", u"Transistorapparat995", u"Birajghosh", u"Riyaz Ahamed", u"Akatie", u"Windspy", u"Gmacbrown", u"Ridersbydelta", u"Keerabl", u"AeomMai", u"Tiggs000", u"Gnomeliberation front", u"Dillonjm1217", u"Arashiology", u"Palm9999", u"ErinHicks", u"Mf123", u"Vodkamad", u"Julienls", u"Jpmora", u"Marshmellow16", u"Gregorthebigmac", u"Neurosciencelogin", u"DJ BatWave", u"Dontmesswittx18", u"Bowie152", u"Omniposcent", u"Runesm", u"Donnanna", u"Theonlycase", u"DoubleSidedTape", u"Qwerty The 3rd", u"Jess010", u"Hairylip", u"Britball", u"D'Anconia'sChosen", u"Metallicca", u"Manjuap", u"Psmeers", u"Paulypaul", u"Camerataducale", u"Mikael07", u"Ysen", u"Baaaa", u"Pete.rain", u"Mistrydude", u"Ralphiemalphie", u"Andrew leto", u"Tripleh7092", u"Jasminko", u"Sidereal2", u"Erager", u"Bubba318", u"Dubedjammy", u"Htrappeniers", u"Hamidb", u"Heliostellar", u"Nanahuatzin", u"Ability and agility", u"JPEsperanca&FernandaCorreia", u"Ken Estabrook", u"NecroAsthar", u"Giovannitomaro", u"Nangeroni", u"Pithchneed", u"MGhaith", u"Popa01"]
INPUT_FILE = "session"
OUTPUT_FILE = "user_0" |
airbnb/airflow | refs/heads/master | airflow/providers/amazon/aws/sensors/cloud_formation.py | 5 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains sensors for AWS CloudFormation."""
from typing import Optional
from airflow.providers.amazon.aws.hooks.cloud_formation import AWSCloudFormationHook
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class CloudFormationCreateStackSensor(BaseSensorOperator):
"""
Waits for a stack to be created successfully on AWS CloudFormation.
:param stack_name: The name of the stack to wait for (templated)
:type stack_name: str
:param aws_conn_id: ID of the Airflow connection where credentials and extra configuration are
stored
:type aws_conn_id: str
:param poke_interval: Time in seconds that the job should wait between each try
:type poke_interval: int
"""
template_fields = ['stack_name']
ui_color = '#C5CAE9'
@apply_defaults
def __init__(self, *, stack_name, aws_conn_id='aws_default', region_name=None, **kwargs):
super().__init__(**kwargs)
self.stack_name = stack_name
self.hook = AWSCloudFormationHook(aws_conn_id=aws_conn_id, region_name=region_name)
def poke(self, context):
stack_status = self.hook.get_stack_status(self.stack_name)
if stack_status == 'CREATE_COMPLETE':
return True
if stack_status in ('CREATE_IN_PROGRESS', None):
return False
raise ValueError(f'Stack {self.stack_name} in bad state: {stack_status}')
class CloudFormationDeleteStackSensor(BaseSensorOperator):
"""
Waits for a stack to be deleted successfully on AWS CloudFormation.
:param stack_name: The name of the stack to wait for (templated)
:type stack_name: str
:param aws_conn_id: ID of the Airflow connection where credentials and extra configuration are
stored
:type aws_conn_id: str
:param poke_interval: Time in seconds that the job should wait between each try
:type poke_interval: int
"""
template_fields = ['stack_name']
ui_color = '#C5CAE9'
@apply_defaults
def __init__(
self,
*,
stack_name: str,
aws_conn_id: str = 'aws_default',
region_name: Optional[str] = None,
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.stack_name = stack_name
self.hook: Optional[AWSCloudFormationHook] = None
def poke(self, context):
stack_status = self.get_hook().get_stack_status(self.stack_name)
if stack_status in ('DELETE_COMPLETE', None):
return True
if stack_status == 'DELETE_IN_PROGRESS':
return False
raise ValueError(f'Stack {self.stack_name} in bad state: {stack_status}')
def get_hook(self) -> AWSCloudFormationHook:
"""Create and return an AWSCloudFormationHook"""
if self.hook:
return self.hook
self.hook = AWSCloudFormationHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
return self.hook
|
ULHPC/modules | refs/heads/devel | easybuild/easybuild-framework/easybuild/tools/repository/gitrepo.py | 2 | # #
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Repository tools
Git repository
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Fotis Georgatos (Uni.Lu, NTUA)
"""
import getpass
import os
import socket
import tempfile
import time
from vsc.utils import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import rmtree2
from easybuild.tools.repository.filerepo import FileRepository
from easybuild.tools.version import VERSION
_log = fancylogger.getLogger('gitrepo', fname=False)
# optional Python packages, these might be missing
# failing imports are just ignored
# a NameError should be catched where these are used
# GitPython
try:
import git
from git import GitCommandError
HAVE_GIT = True
except ImportError:
_log.debug('Failed to import git module')
HAVE_GIT = False
class GitRepository(FileRepository):
"""
Class for git repositories.
"""
DESCRIPTION = ("A non-empty bare git repository (created with 'git init --bare' or 'git clone --bare'). "
"The 1st argument contains the git repository location, which can be a directory or an URL. "
"The 2nd argument is a path inside the repository where to save the files.")
USABLE = HAVE_GIT
def __init__(self, *args):
"""
Initialize git client to None (will be set later)
All the real logic is in the setupRepo and createWorkingCopy methods
"""
self.client = None
FileRepository.__init__(self, *args)
def setup_repo(self):
"""
Set up git repository.
"""
try:
git.GitCommandError
except NameError, err:
raise EasyBuildError("It seems like GitPython is not available: %s", err)
self.wc = tempfile.mkdtemp(prefix='git-wc-')
def create_working_copy(self):
"""
Create git working copy.
"""
reponame = 'UNKNOWN'
# try to get a copy of
try:
client = git.Git(self.wc)
client.clone(self.repo)
reponame = os.listdir(self.wc)[0]
self.log.debug("rep name is %s" % reponame)
except git.GitCommandError, err:
# it might already have existed
self.log.warning("Git local repo initialization failed, it might already exist: %s", err)
# local repo should now exist, let's connect to it again
try:
self.wc = os.path.join(self.wc, reponame)
self.log.debug("connectiong to git repo in %s" % self.wc)
self.client = git.Git(self.wc)
except (git.GitCommandError, OSError), err:
raise EasyBuildError("Could not create a local git repo in wc %s: %s", self.wc, err)
# try to get the remote data in the local repo
try:
res = self.client.pull()
self.log.debug("pulled succesfully to %s in %s" % (res, self.wc))
except (git.GitCommandError, OSError), err:
raise EasyBuildError("pull in working copy %s went wrong: %s", self.wc, err)
def add_easyconfig(self, cfg, name, version, stats, append):
"""
Add easyconfig to git repository.
"""
dest = FileRepository.add_easyconfig(self, cfg, name, version, stats, append)
# add it to version control
if dest:
try:
self.client.add(dest)
except GitCommandError, err:
self.log.warning("adding %s to git failed: %s" % (dest, err))
def commit(self, msg=None):
"""
Commit working copy to git repository
"""
host = socket.gethostname()
timestamp = time.strftime("%Y-%m-%d_%H-%M-%S")
user = getpass.getuser()
completemsg = "%s with EasyBuild v%s @ %s (time: %s, user: %s)" % (msg, VERSION, host, timestamp, user)
self.log.debug("committing in git with message: %s" % msg)
self.log.debug("git status: %s" % self.client.status())
try:
self.client.commit('-am %s' % completemsg)
self.log.debug("succesfull commit: %s", self.client.log('HEAD^!'))
except GitCommandError, err:
self.log.warning("Commit from working copy %s failed, empty commit? (msg: %s): %s", self.wc, msg, err)
try:
info = self.client.push()
self.log.debug("push info: %s ", info)
except GitCommandError, err:
self.log.warning("Push from working copy %s to remote %s failed (msg: %s): %s",
self.wc, self.repo, msg, err)
def cleanup(self):
"""
Clean up git working copy.
"""
try:
self.wc = os.path.dirname(self.wc)
rmtree2(self.wc)
except IOError, err:
raise EasyBuildError("Can't remove working copy %s: %s", self.wc, err)
|
brianjgeiger/osf.io | refs/heads/develop | api/chronos/serializers.py | 4 | from rest_framework import serializers as ser
from rest_framework.exceptions import NotFound
from api.base.exceptions import Conflict
from api.base.serializers import JSONAPISerializer, RelationshipField, LinksField, ConditionalField
from api.base.utils import absolute_reverse, get_user_auth
from osf.external.chronos import ChronosClient
from osf.models import ChronosJournal
from osf.utils.workflows import ChronosSubmissionStatus
class ShowIfChronosSubmitter(ConditionalField):
"""
If the ChronosSubmission instance's submitter is not the current user, hide this field.
"""
def should_show(self, instance):
request = self.context.get('request')
auth = get_user_auth(request)
if auth.logged_in:
if instance.submitter == auth.user:
return True
return False
class ChronosJournalRelationshipField(RelationshipField):
def to_internal_value(self, journal_id):
try:
journal = ChronosJournal.objects.get(journal_id=journal_id)
except ChronosJournal.DoesNotExist:
raise NotFound('Unable to find specified journal.')
return {'journal': journal}
class ChronosJournalSerializer(JSONAPISerializer):
class Meta:
type_ = 'chronos-journals'
filterable_fields = frozenset(['title', 'name', 'id'])
id = ser.CharField(source='journal_id', read_only=True)
name = ser.CharField(read_only=True)
title = ser.CharField(read_only=True)
links = LinksField({'self': 'get_absolute_url'})
def get_absolute_url(self, obj):
return absolute_reverse('chronos:chronos-journal-detail', kwargs={'journal_id': obj.journal_id})
class ChronosSubmissionSerializer(JSONAPISerializer):
class Meta:
type_ = 'chronos-submissions'
id = ser.CharField(source='publication_id', read_only=True)
submission_url = ShowIfChronosSubmitter(ser.CharField(read_only=True))
status = ser.SerializerMethodField()
modified = ser.DateTimeField(read_only=True)
journal = RelationshipField(
read_only=True,
related_view='chronos:chronos-journal-detail',
related_view_kwargs={'journal_id': '<journal.journal_id>'},
)
preprint = RelationshipField(
read_only=True,
related_view='preprints:preprint-detail',
related_view_kwargs={'preprint_id': '<preprint._id>'},
)
submitter = RelationshipField(
read_only=True,
related_view='users:user-detail',
related_view_kwargs={'user_id': '<submitter._id>'},
)
links = LinksField({'self': 'get_absolute_url'})
def get_absolute_url(self, obj):
return absolute_reverse('chronos:chronos-submission-detail', kwargs={'preprint_id': obj.preprint._id, 'submission_id': obj.publication_id})
def get_status(self, obj):
value_lookup = {val.value: key for key, val in ChronosSubmissionStatus.__members__.items()}
return value_lookup[obj.status]
class ChronosSubmissionDetailSerializer(ChronosSubmissionSerializer):
id = ser.CharField(source='publication_id', required=True)
def update(self, instance, validated_data):
return ChronosClient().sync_manuscript(instance)
class ChronosSubmissionCreateSerializer(ChronosSubmissionSerializer):
journal = ChronosJournalRelationshipField(
read_only=False,
related_view='chronos:chronos-journal-detail',
related_view_kwargs={'journal_id': '<journal.journal_id>'},
)
def create(self, validated_data):
journal = validated_data.pop('journal')
preprint = validated_data.pop('preprint')
submitter = validated_data.pop('submitter')
try:
return ChronosClient().submit_manuscript(journal=journal, preprint=preprint, submitter=submitter)
except ValueError as e:
raise Conflict(e.message)
|
henridwyer/scikit-learn | refs/heads/master | sklearn/neighbors/__init__.py | 306 | """
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest']
|
pythonprobr/pythonpro-website | refs/heads/master | pythonpro/dashboard/forms.py | 1 | from django.forms import ModelForm
from pythonpro.dashboard.models import TopicInteraction
class TopicInteractionForm(ModelForm):
class Meta:
model = TopicInteraction
fields = ('topic', 'user', 'topic_duration', 'total_watched_time', 'max_watched_time')
|
coolxll/PyAutoRegBot | refs/heads/master | test/me/coolxll/sms/shenhua.py | 1 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Created on 2015年10月24日
@author: Conan
'''
import unittest
from me.coolxll.sms.f02.aima import Aima
from me.coolxll.sms.shjmpt.shenhua import Shenhua
class Test(unittest.TestCase):
def test_shenhua(self):
sh = Shenhua(1731)
mobile = sh.getMobileNum()
text = sh.getVcodeAndReleaseMobile(mobile)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testf02']
unittest.main() |
csomerlot/WIPTools | refs/heads/master | addin/Install/ProtLeng.py | 1 |
# Import system modules
import sys, os
import Helper
import regression
import arcpy
from arcpy import env
arcpy.env.extent = "MAXOF"
from arcpy.sa import *
hp = Helper.Helper(sys.argv)
try:
# Local variables
bmp_noclip = sys.argv[1]
# Vectors
vecMask = os.path.join(hp.SWorkspace, "vectMask.shp")
BMPpts = os.path.join(hp.SWorkspace, "BMPptsPL.shp")
arcpy.RasterToPolygon_conversion(hp.Mask, vecMask, "SIMPLIFY", "Value")
hp.SetPIDs(bmp_noclip)
arcpy.Clip_analysis(bmp_noclip, vecMask, BMPpts)
OID = "PID"
# Setup output field
ProtLen_fld = "ProtLeng"
hp.log("Calculate Existing Urban and Rural discharges...")
Cumulative_Impervious = Raster(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "cumimpcovlake") )
Cum_da = Raster(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "cumda"))
## usgs_calcs = Helper.USGSVars(hp.Basin)
U1yr_ext = regression.urbanQcp(hp.Basin, Cum_da, Cumulative_Impervious)
U1yr_ext.save(os.path.join(hp.SWorkspace, "U1yr_ext"))
hp.log("Looping through input BMPs...")
BMProws = arcpy.UpdateCursor(BMPpts)
all = arcpy.GetCount_management(BMPpts)
count = 1
for BMProw in BMProws:
BMP_FID = BMProw.getValue(OID)
hp.log(" Processing point %s of %s..." % (count, all))
#~ print " %s %s: %s" % (BMPpts, OID, BMP_FID),
bmp_Ex1yr = float(BMProw.getValue(sys.argv[2]))
bmp_Prop1yr = float(BMProw.getValue(sys.argv[3]))
if not (bmp_Prop1yr < bmp_Ex1yr):
hp.log(" No Channel Protection from this BMP")
else:
hp.log(" Found Channel Protection BMP")
hp.log(" Creating new dataset for this point")
SinBMPpts = os.path.join(hp.SWorkspace, "PLBMPpts%s.shp" % BMP_FID)
hp.GetSubset(BMPpts, SinBMPpts, " \"PID\" = %s " % BMP_FID)
hp.log(" Calculating Urban 1yr Flow")
ModCumDa, thisBMPras, Urban_1yrQ = regression.ChannelProtection(hp, SinBMPpts, sys.argv[3])
# we don't need these rasters at all
#~ ModCumDa.save(os.path.join(hp.SWorkspace, "ModCumDa"))
#~ thisBMPras.save(os.path.join(hp.SWorkspace, "thisBMPras"))
Urban_1yrQ.save(os.path.join(hp.SWorkspace, "Urban_1yrQ"))
flowdir = os.path.join(hp.Workspace + "\\WIPoutput.mdb", "flowdir")* hp.Mask
flowdir.save(os.path.join(hp.SWorkspace, "flowdir"))
ans = hp.ProtLength(thisBMPras, flowdir, Urban_1yrQ, U1yr_ext)
hp.log (" The cell distance calculated by protleng.exe is: %s" % ans)
hp.SetAtt(BMP_FID, ProtLen_fld, ans * hp.units['size'], bmp_noclip)
count += 1
hp.Close()
except:
i, j, k = sys.exc_info()
hp.EH(i, j, k)
|
iphoting/healthchecks | refs/heads/heroku | hc/front/tests/test_update_channel_name.py | 2 | from hc.api.models import Channel
from hc.test import BaseTestCase
class UpdateChannelNameTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.channel = Channel(kind="email", project=self.project)
self.channel.save()
self.url = "/integrations/%s/name/" % self.channel.code
def test_it_works(self):
payload = {"name": "My work email"}
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.channels_url)
self.channel.refresh_from_db()
self.assertEqual(self.channel.name, "My work email")
def test_team_access_works(self):
payload = {"name": "Bob was here"}
# Logging in as bob, not alice. Bob has team access so this
# should work.
self.client.login(username="[email protected]", password="password")
self.client.post(self.url, data=payload)
self.channel.refresh_from_db()
self.assertEqual(self.channel.name, "Bob was here")
def test_it_checks_ownership(self):
payload = {"name": "Charlie Sent This"}
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 404)
def test_it_handles_missing_uuid(self):
# Valid UUID but there is no check for it:
url = "/integrations/6837d6ec-fc08-4da5-a67f-08a9ed1ccf62/name/"
payload = {"name": "Alice Was Here"}
self.client.login(username="[email protected]", password="password")
r = self.client.post(url, data=payload)
self.assertEqual(r.status_code, 404)
def test_it_rejects_get(self):
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 405)
def test_it_requires_rw_access(self):
self.bobs_membership.rw = False
self.bobs_membership.save()
payload = {"name": "My work email"}
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 403)
|
leafclick/intellij-community | refs/heads/master | python/testData/intentions/transformConditionalExpression.py | 83 | x = a if <caret>cond else b |
eLBati/odoo | refs/heads/master | addons/website_forum_doc/models/documentation.py | 52 | # -*- coding: utf-8 -*-
import openerp
from openerp.osv import osv, fields
class Documentation(osv.Model):
_name = 'forum.documentation.toc'
_description = 'Documentation ToC'
_inherit = ['website.seo.metadata']
_order = "parent_left"
_parent_order = "sequence, name"
_parent_store = True
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'sequence': fields.integer('Sequence'),
'display_name': fields.function(_name_get_fnc, type="char", string='Full Name'),
'name': fields.char('Name', required=True, translate=True),
'introduction': fields.html('Introduction', translate=True),
'parent_id': fields.many2one('forum.documentation.toc', 'Parent Table Of Content', ondelete='cascade'),
'child_ids': fields.one2many('forum.documentation.toc', 'parent_id', 'Children Table Of Content'),
'parent_left': fields.integer('Left Parent', select=True),
'parent_right': fields.integer('Right Parent', select=True),
'post_ids': fields.one2many('forum.post', 'documentation_toc_id', 'Posts'),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
class DocumentationStage(osv.Model):
_name = 'forum.documentation.stage'
_description = 'Post Stage'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Stage Name', required=True, translate=True),
}
class Post(osv.Model):
_inherit = 'forum.post'
_columns = {
'documentation_toc_id': fields.many2one('forum.documentation.toc', 'Documentation ToC', ondelete='set null'),
'documentation_stage_id': fields.many2one('forum.documentation.stage', 'Documentation Stage'),
'color': fields.integer('Color Index')
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('forum.documentation.stage')
stage_ids = stage_obj.search(cr, uid, [], context=context)
result = stage_obj.name_get(cr, uid, stage_ids, context=context)
return result, {}
_group_by_full = {
'documentation_stage_id': _read_group_stage_ids,
}
|
ClearCorp-dev/odoo | refs/heads/8.0 | addons/event/wizard/__init__.py | 435 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kevin-vb/drupdevelop | refs/heads/master | sites/all/themes/drupdevelop/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
Sonicbids/django | refs/heads/master | tests/view_tests/tests/test_debug.py | 4 | # -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import unicode_literals
import importlib
import inspect
import os
import re
import shutil
import sys
from tempfile import NamedTemporaryFile, mkdtemp, mkstemp
from unittest import skipIf
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.template.base import TemplateDoesNotExist
from django.test import TestCase, RequestFactory, override_settings
from django.utils.encoding import force_text, force_bytes
from django.utils import six
from django.views.debug import CallableSettingWrapper, ExceptionReporter
from .. import BrokenException, except_args
from ..views import (sensitive_view, non_sensitive_view, paranoid_view,
custom_exception_reporter_filter_view, sensitive_method_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
multivalue_dict_key_error)
class CallableSettingWrapperTests(TestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable(object):
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True,
ROOT_URLCONF="view_tests.urls")
class DebugViewTests(TestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error.',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr)
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(raising_loc.find('raise BrokenException'), -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with NamedTemporaryFile(prefix=template_name) as tempfile:
tempdir = os.path.dirname(tempfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (File does not exist)" % template_path, status_code=500, count=1)
@skipIf(sys.platform == "win32", "Python on Windows doesn't have working os.chmod() and os.access().")
def test_template_loader_postmortem_notreadable(self):
"""Tests for not readable file"""
with NamedTemporaryFile() as tempfile:
template_name = tempfile.name
tempdir = os.path.dirname(tempfile.name)
template_path = os.path.join(tempdir, template_name)
os.chmod(template_path, 0o0222)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (File is not readable)" % template_path, status_code=500, count=1)
def test_template_loader_postmortem_notafile(self):
"""Tests for not being a file"""
try:
template_path = mkdtemp()
template_name = os.path.basename(template_path)
tempdir = os.path.dirname(template_path)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Not a file)" % template_path, status_code=500, count=1)
finally:
shutil.rmtree(template_path)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
self.assertRaises(TemplateDoesNotExist, self.client.get, '/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default urlconf template is shown shown instead
of the technical 404 page, if the user has not altered their
url conf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
@override_settings(
DEBUG=True,
ROOT_URLCONF="view_tests.urls",
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(TestCase):
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput(object):
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput(object):
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
@skipIf(six.PY2, 'Bug manifests on PY3 only')
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError on Python 3. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ImportError at /test_view/</h1>', html)
class PlainTextReportTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', '[email protected]'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', '[email protected]'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', '[email protected]'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Ensure that sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as arguments to the
decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as keyword arguments
to the decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots(object):
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
|
RAtechntukan/Sick-Beard | refs/heads/development | lib/guessit/__init__.py | 37 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
__version__ = '0.6b1'
__all__ = ['Guess', 'Language',
'guess_file_info', 'guess_video_info',
'guess_movie_info', 'guess_episode_info']
# Do python3 detection before importing any other module, to be sure that
# it will then always be available
# with code from http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/
import sys
if sys.version_info[0] >= 3:
PY3 = True
unicode_text_type = str
native_text_type = str
base_text_type = str
def u(x):
return str(x)
def s(x):
return x
class UnicodeMixin(object):
__str__ = lambda x: x.__unicode__()
import binascii
def to_hex(x):
return binascii.hexlify(x).decode('utf-8')
else:
PY3 = False
__all__ = [ str(s) for s in __all__ ] # fix imports for python2
unicode_text_type = unicode
native_text_type = str
base_text_type = basestring
def u(x):
if isinstance(x, str):
return x.decode('utf-8')
return unicode(x)
def s(x):
if isinstance(x, unicode):
return x.encode('utf-8')
if isinstance(x, list):
return [ s(y) for y in x ]
if isinstance(x, tuple):
return tuple(s(y) for y in x)
if isinstance(x, dict):
return dict((s(key), s(value)) for key, value in x.items())
return x
class UnicodeMixin(object):
__str__ = lambda x: unicode(x).encode('utf-8')
def to_hex(x):
return x.encode('hex')
from guessit.guess import Guess, merge_all
from guessit.language import Language
from guessit.matcher import IterativeMatcher
from guessit.textutils import clean_string
import logging
log = logging.getLogger(__name__)
class NullHandler(logging.Handler):
def emit(self, record):
pass
# let's be a nicely behaving library
h = NullHandler()
log.addHandler(h)
def _guess_filename(filename, filetype):
mtree = IterativeMatcher(filename, filetype=filetype)
m = mtree.matched()
if 'language' not in m and 'subtitleLanguage' not in m:
return m
# if we found some language, make sure we didn't cut a title or sth...
mtree2 = IterativeMatcher(filename, filetype=filetype,
opts=['nolanguage', 'nocountry'])
m2 = mtree2.matched()
def find_nodes(tree, props):
"""Yields all nodes containing any of the given props."""
if isinstance(props, base_text_type):
props = [props]
for node in tree.nodes():
if any(prop in node.guess for prop in props):
yield node
def warning(title):
log.warning('%s, guesses: %s - %s' % (title, m.nice_string(), m2.nice_string()))
return m
if m.get('title') != m2.get('title'):
title = next(find_nodes(mtree.match_tree, 'title'))
title2 = next(find_nodes(mtree2.match_tree, 'title'))
langs = list(find_nodes(mtree.match_tree, ['language', 'subtitleLanguage']))
if not langs:
return warning('A weird error happened with language detection')
# find the language that is likely more relevant
for lng in langs:
if lng.value in title2.value:
# if the language was detected as part of a potential title,
# look at this one in particular
lang = lng
break
else:
# pick the first one if we don't have a better choice
lang = langs[0]
# language code are rarely part of a title, and those
# should be handled by the Language exceptions anyway
if len(lang.value) <= 3:
return m
# if filetype is subtitle and the language appears last, just before
# the extension, then it is likely a subtitle language
parts = clean_string(title.root.value).split()
if (m['type'] in ['moviesubtitle', 'episodesubtitle'] and
parts.index(lang.value) == len(parts) - 2):
return m
# if the language was in the middle of the other potential title,
# keep the other title (eg: The Italian Job), except if it is at the
# very beginning, in which case we consider it an error
if m2['title'].startswith(lang.value):
return m
elif lang.value in title2.value:
return m2
# if a node is in an explicit group, then the correct title is probably
# the other one
if title.root.node_at(title.node_idx[:2]).is_explicit():
return m2
elif title2.root.node_at(title2.node_idx[:2]).is_explicit():
return m
return warning('Not sure of the title because of the language position')
return m
def guess_file_info(filename, filetype, info=None):
"""info can contain the names of the various plugins, such as 'filename' to
detect filename info, or 'hash_md5' to get the md5 hash of the file.
>>> guess_file_info('tests/dummy.srt', 'autodetect', info = ['hash_md5', 'hash_sha1'])
{'hash_md5': 'e781de9b94ba2753a8e2945b2c0a123d', 'hash_sha1': 'bfd18e2f4e5d59775c2bc14d80f56971891ed620'}
"""
result = []
hashers = []
# Force unicode as soon as possible
filename = u(filename)
if info is None:
info = ['filename']
if isinstance(info, base_text_type):
info = [info]
for infotype in info:
if infotype == 'filename':
result.append(_guess_filename(filename, filetype))
elif infotype == 'hash_mpc':
from guessit.hash_mpc import hash_file
try:
result.append(Guess({'hash_mpc': hash_file(filename)},
confidence=1.0))
except Exception as e:
log.warning('Could not compute MPC-style hash because: %s' % e)
elif infotype == 'hash_ed2k':
from guessit.hash_ed2k import hash_file
try:
result.append(Guess({'hash_ed2k': hash_file(filename)},
confidence=1.0))
except Exception as e:
log.warning('Could not compute ed2k hash because: %s' % e)
elif infotype.startswith('hash_'):
import hashlib
hashname = infotype[5:]
try:
hasher = getattr(hashlib, hashname)()
hashers.append((infotype, hasher))
except AttributeError:
log.warning('Could not compute %s hash because it is not available from python\'s hashlib module' % hashname)
else:
log.warning('Invalid infotype: %s' % infotype)
# do all the hashes now, but on a single pass
if hashers:
try:
blocksize = 8192
hasherobjs = dict(hashers).values()
with open(filename, 'rb') as f:
chunk = f.read(blocksize)
while chunk:
for hasher in hasherobjs:
hasher.update(chunk)
chunk = f.read(blocksize)
for infotype, hasher in hashers:
result.append(Guess({infotype: hasher.hexdigest()},
confidence=1.0))
except Exception as e:
log.warning('Could not compute hash because: %s' % e)
result = merge_all(result)
# last minute adjustments
# if country is in the guessed properties, make it part of the filename
if 'series' in result and 'country' in result:
result['series'] += ' (%s)' % result['country'].alpha2.upper()
return result
def guess_video_info(filename, info=None):
return guess_file_info(filename, 'autodetect', info)
def guess_movie_info(filename, info=None):
return guess_file_info(filename, 'movie', info)
def guess_episode_info(filename, info=None):
return guess_file_info(filename, 'episode', info)
|
Subsets and Splits