repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
walchko/pygecko | retired/find_geckocore.py | 1 | 1087 | # #!/usr/bin/env python3
# # -*- coding: utf-8 -*-
# ##############################################
# # The MIT License (MIT)
# # Copyright (c) 2018 Kevin Walchko
# # see LICENSE for full details
# ##############################################
# # import time
# import argparse
# import os
# from pygecko.transport.beacon import BeaconFinder
#
#
# def handleArgs():
# parser = argparse.ArgumentParser(description='Use multicast to find a geckocore node on the network')
# parser.add_argument('-k', '--key', help='key, default is hostname', default=None)
# args = vars(parser.parse_args())
# return args
#
#
# if __name__ == "__main__":
# args = handleArgs()
# key = args['key']
# if key is None:
# key = os.uname().nodename.split('.')[0].lower()
# finder = BeaconFinder(key)
# resp = finder.search(0,"0")
# if resp:
# print("[GeckoCore]===========================")
# print(" in: {}".format(resp[0]))
# print(" out: {}".format(resp[1]))
# else:
# print("*** No GeckoCore found on this network ***")
| mit | -3,143,877,320,339,952,600 | 31.939394 | 107 | 0.516099 | false |
yoshrote/valid_model | valid_model/descriptors.py | 1 | 7772 | from datetime import datetime, timedelta
import six
from .base import Generic, Object
from .exc import ValidationError
from .utils import is_descriptor
class SimpleType(Generic):
"""This descriptor will not attempt to coerce the value on __set__."""
_type_klass = None
_type_label = None
def __set__(self, instance, value):
if value is not None and not isinstance(value, self._type_klass):
raise ValidationError(
"{!r} is not {}".format(value, self._type_label),
self.name
)
return Generic.__set__(self, instance, value)
class EmbeddedObject(Generic):
def __init__(self, class_obj):
self.class_obj = class_obj
def validator(obj):
return isinstance(obj, class_obj)
Generic.__init__(
self, default=class_obj, validator=validator
)
def __set__(self, instance, value):
try:
if isinstance(value, dict):
value = self.class_obj(**value)
return Generic.__set__(self, instance, value)
except ValidationError as ex:
raise ValidationError(
ex.msg,
'{}.{}'.format(self.name, ex.field) if ex.field else self.name
)
class String(Generic):
"""
This descriptor attempts to set a unicode string value.
If the value is type(str) it will be decoded using utf-8.
"""
def __set__(self, instance, value):
if value is None or isinstance(value, six.text_type):
pass
elif isinstance(value, six.binary_type):
value = value.decode('utf-8')
else:
raise ValidationError(
"{!r} is not a string".format(value),
self.name
)
return Generic.__set__(self, instance, value)
class _Number(Generic):
"""This descriptor attempts to converts any a value to a number."""
_number_type = None
_number_label = None
def __set__(self, instance, value):
if value is not None:
number_like = isinstance(value, (six.integer_types, float))
is_bool = isinstance(value, bool)
if not number_like or is_bool:
raise ValidationError(
"{!r} is not {}".format(value, self._number_label),
self.name
)
else:
value = int(value)
return Generic.__set__(self, instance, value)
class Integer(_Number):
"""This descriptor attempts to coerce a number to an integer."""
_number_type = int
_number_label = "an int"
class Float(_Number):
"""This descriptor attempts to coerce a number to a float."""
_number_type = float
_number_label = "a float"
class Bool(Generic):
"""This descriptor attempts to converts any a value to a boolean."""
def __set__(self, instance, value):
if value is not None:
if value in (0, 1) or isinstance(value, bool):
value = bool(value)
else:
raise ValidationError(
"{!r} is not a bool".format(value),
self.name
)
return Generic.__set__(self, instance, value)
class DateTime(SimpleType):
"""This descriptor attempts to set a datetime value."""
_type_klass = datetime
_type_label = "a datetime"
class TimeDelta(SimpleType):
"""This descriptor attempts to set a timedalta value."""
_type_klass = timedelta
_type_label = "a timedelta"
NO_DEFAULT = object()
class _Collection(Generic):
_collection_type = object
_collection_label = None
def __init__(self, default=NO_DEFAULT, value=None, validator=None, mutator=None):
if default is NO_DEFAULT:
default = self._collection_type
Generic.__init__(
self, default=default, validator=validator, mutator=mutator, nullable=False
)
if value is not None and not isinstance(value, Generic):
raise TypeError('value must be None or an instance of Generic')
self.value = value
@staticmethod
def iterate(collection):
return iter(collection)
def recursive_validation(self, element):
"""Validate element of collection against `self.value`."""
dummy = Object()
if self.value is not None:
try:
element = self.value.__set__(dummy, element)
except ValidationError as ex:
raise ValidationError(
ex.msg,
'{}.{}'.format(self.name, ex.field) if ex.field else self.name
)
return element
def add_to_collection(self, collection, element):
raise NotImplementedError("_add_to_collection")
def __set__(self, instance, value):
if value is None:
value = self._collection_type()
elif not isinstance(value, self._collection_type):
raise ValidationError(
"{!r} is not {}".format(value, self._collection_label),
self.name
)
new_value = self._collection_type()
iterable = self.iterate(value)
for element in iterable:
element = self.recursive_validation(element)
self.add_to_collection(new_value, element)
value = new_value
return Generic.__set__(self, instance, value)
class List(_Collection):
_collection_type = list
_collection_label = "a list"
def add_to_collection(self, collection, element):
collection.append(element)
return collection
class Set(_Collection):
_collection_type = set
_collection_label = "a set"
def add_to_collection(self, collection, element):
collection.add(element)
return collection
class Dict(_Collection):
_collection_type = dict
_collection_label = "a dict"
def __init__(self, default=dict, key=None, value=None, validator=None, mutator=None):
_Collection.__init__(
self, default=default, value=value, validator=validator, mutator=mutator
)
if key is not None and not isinstance(key, Generic):
raise TypeError('key must be None or an instance of Generic')
self.key = key
@staticmethod
def iterate(collection):
return six.iteritems(collection)
def recursive_validation(self, element):
"""Validate element of collection against `self.value`."""
dummy = Object()
key, value = element
if self.key is not None:
try:
key = self.key.__set__(dummy, key)
except ValidationError as ex:
raise ValidationError(
ex.msg,
"{} key {}".format(self.name, key)
)
if self.value is not None:
try:
value = self.value.__set__(dummy, value)
except ValidationError as ex:
raise ValidationError(
ex.msg,
"{}['{}']".format(self.name, key)
)
return key, value
def add_to_collection(self, collection, element):
key, value = element
collection[key] = value
return collection
def descriptors():
"""Generate list of descriptor class names."""
return [
name for name, value in six.iteritems(globals())
if is_descriptor(value) and issubclass(value, Generic)
]
def descriptor_classes():
"""Generate list of descriptor classes."""
return [
value for value in six.itervalues(globals())
if is_descriptor(value) and issubclass(value, Generic)
]
__all__ = ['descriptor_classes'] + descriptors()
| mit | 7,817,542,412,266,166,000 | 28.439394 | 89 | 0.573726 | false |
paulmartel/voltdb | src/catgen/catalog_utils/strings.py | 1 | 1677 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
gpl_header = \
"""/* This file is part of VoltDB.
* Copyright (C) 2008-2016 VoltDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
*/
"""
auto_gen_warning = \
"""/* WARNING: THIS FILE IS AUTO-GENERATED
DO NOT MODIFY THIS SOURCE
ALL CHANGES MUST BE MADE IN THE CATALOG GENERATOR */
"""
| agpl-3.0 | 2,273,630,477,120,155,100 | 38.928571 | 75 | 0.731067 | false |
jazzabeanie/python_koans | python2/koans/about_none.py | 1 | 2911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutNil in the Ruby Koans
#
from runner.koan import *
class AboutNone(Koan):
def test_none_is_an_object(self):
"Unlike NULL in a lot of languages"
self.assertEqual(True, isinstance(None, object)) # so isinstance(1, 2) checks whether 1 is an type of 2, so what else can 2 be?? Does it also check if 1 is a child of 2?
def test_none_is_universal(self):
"There is only one None"
self.assertEqual(True, None is None) #"is" is an operator, see here for more info: https://docs.python.org/2/reference/expressions.html#not-in. Apparently you use == when comparing values and is when comparing identities. An identitiy is something that is unique to every object.
def test_what_exception_do_you_get_when_calling_nonexistent_methods(self):
"""
What is the Exception that is thrown when you call a method that does
not exist?
Hint: launch python command console and try the code in the
block below.
Don't worry about what 'try' and 'except' do, we'll talk about
this later
"""
try:
None.some_method_none_does_not_know_about()
except Exception as ex:
# What exception has been caught?
#
# Need a recap on how to evaluate __class__ attributes?
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
# start my experiment
try:
blah
except Exception as excep:
self.assertEqual(NameError, excep.__class__) # this is me testing my ability to produce a NameError. Python knows that I'm finished with the except section because of the indentation. Indentation and newlines matter in python.
# end my experiment
self.assertEqual(AttributeError, ex.__class__) # When I originally tried this I got a name error because I tried it in a separate Python session where ex had not been definied. Here it's been defined with the "except Exceptions as ex:" line.
# What message was attached to the exception?
# (HINT: replace __ with part of the error message.)
self.assertMatch("NoneType", ex.args[0]) # Assert match takes the first argument and does a search through the second argument for a match. ex.args[] take the first argument of ex, which is the message of the exceptions.
#What I gather from all this is that when you call an unknown method, you get an AttributeError that says "'__' object has not attribute '__'".
def test_none_is_distinct(self):
"""
None is distinct from other things which are False.
"""
self.assertEqual(True, None is not 0)
self.assertEqual(True, None is not False)
# None is a NULL value. It is not equal to true or false or zero.
| mit | -8,089,154,453,468,490,000 | 49.189655 | 287 | 0.651323 | false |
ZeitgeberH/nengo | nengo/networks/tests/test_oscillator.py | 1 | 1526 | import logging
import pytest
import nengo
from nengo.utils.functions import piecewise
from nengo.utils.numpy import rmse
from nengo.utils.testing import Plotter
logger = logging.getLogger(__name__)
def test_oscillator(Simulator, nl):
model = nengo.Network(label='Oscillator', seed=789)
with model:
inputs = {0: [1, 0], 0.5: [0, 0]}
input = nengo.Node(piecewise(inputs), label='Input')
tau = 0.1
freq = 5
T = nengo.networks.Oscillator(
tau, freq, label="Oscillator", neurons=nl(100))
nengo.Connection(input, T.input)
A = nengo.Ensemble(nl(100), label='A', dimensions=2)
nengo.Connection(A, A, synapse=tau,
transform=[[1, -freq*tau], [freq*tau, 1]])
nengo.Connection(input, A)
in_probe = nengo.Probe(input, "output")
A_probe = nengo.Probe(A, "decoded_output", synapse=0.01)
T_probe = nengo.Probe(T.ensemble, "decoded_output", synapse=0.01)
sim = Simulator(model)
sim.run(3.0)
with Plotter(Simulator, nl) as plt:
t = sim.trange()
plt.plot(t, sim.data[A_probe], label='Manual')
plt.plot(t, sim.data[T_probe], label='Template')
plt.plot(t, sim.data[in_probe], 'k', label='Input')
plt.legend(loc=0)
plt.savefig('test_oscillator.test_oscillator.pdf')
plt.close()
assert rmse(sim.data[A_probe], sim.data[T_probe]) < 0.3
if __name__ == "__main__":
nengo.log(debug=True)
pytest.main([__file__, '-v'])
| gpl-3.0 | -4,465,944,502,416,498,000 | 28.346154 | 73 | 0.598296 | false |
walterbender/story | collabwrapper.py | 1 | 33058 | # Copyright (C) 2015 Walter Bender
# Copyright (C) 2015 Sam Parkinson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, 51 Franklin Street, Suite 500 Boston, MA 02110-1335 USA
'''
The wrapper module provides an abstraction over the Sugar
collaboration system.
Using CollabWrapper
-------------------
1. Add `get_data` and `set_data` methods to the activity class::
def get_data(self):
# return plain python objects - things that can be encoded
# using the json module
return dict(
text=self._entry.get_text()
)
def set_data(self, data):
# data will be the same object returned by get_data
self._entry.set_text(data.get('text'))
2. Make a CollabWrapper instance::
def __init__(self, handle):
sugar3.activity.activity.Activity.__init__(self, handle)
self._collab = CollabWrapper(self)
self._collab.connect('message', self.__message_cb)
# setup your activity here
self._collab.setup()
3. Post any changes of shared state to the CollabWrapper. The changes
will be sent to other buddies if any are connected, for example::
def __entry_changed_cb(self, *args):
self._collab.post(dict(
action='entry_changed',
new_text=self._entry.get_text()
))
4. Handle incoming messages, for example::
def __message_cb(self, collab, buddy, msg):
action = msg.get('action')
if action == 'entry_changed':
self._entry.set_text(msg.get('new_text'))
'''
import os
import json
import socket
from gettext import gettext as _
import gi
gi.require_version('TelepathyGLib', '0.12')
from gi.repository import GObject
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import TelepathyGLib
import dbus
from dbus import PROPERTIES_IFACE
CHANNEL_INTERFACE = TelepathyGLib.IFACE_CHANNEL
CHANNEL_INTERFACE_GROUP = TelepathyGLib.IFACE_CHANNEL_INTERFACE_GROUP
CHANNEL_TYPE_TEXT = TelepathyGLib.IFACE_CHANNEL_TYPE_TEXT
CHANNEL_TYPE_FILE_TRANSFER = TelepathyGLib.IFACE_CHANNEL_TYPE_FILE_TRANSFER
CONN_INTERFACE_ALIASING = TelepathyGLib.IFACE_CONNECTION_INTERFACE_ALIASING
CONN_INTERFACE = TelepathyGLib.IFACE_CONNECTION
CHANNEL = TelepathyGLib.IFACE_CHANNEL
CLIENT = TelepathyGLib.IFACE_CLIENT
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES = \
TelepathyGLib.ChannelGroupFlags.CHANNEL_SPECIFIC_HANDLES
CONNECTION_HANDLE_TYPE_CONTACT = TelepathyGLib.HandleType.CONTACT
CHANNEL_TEXT_MESSAGE_TYPE_NORMAL = TelepathyGLib.ChannelTextMessageType.NORMAL
SOCKET_ADDRESS_TYPE_UNIX = TelepathyGLib.SocketAddressType.UNIX
SOCKET_ACCESS_CONTROL_LOCALHOST = TelepathyGLib.SocketAccessControl.LOCALHOST
from sugar3.presence import presenceservice
from sugar3.activity.activity import SCOPE_PRIVATE
from sugar3.graphics.alert import NotifyAlert
import logging
_logger = logging.getLogger('CollabWrapper')
ACTION_INIT_REQUEST = '!!ACTION_INIT_REQUEST'
ACTION_INIT_RESPONSE = '!!ACTION_INIT_RESPONSE'
ACTIVITY_FT_MIME = 'x-sugar/from-activity'
class CollabWrapper(GObject.GObject):
'''
The wrapper provides a high level abstraction over the
collaboration system. The wrapper deals with setting up the
channels, encoding and decoding messages, initialization and
alerting the caller to the status.
An activity instance is initially private, but may be shared. Once
shared, an instance will remain shared for as long as the activity
runs. On stop, the journal will preserve the instance as shared,
and on resume the instance will be shared again.
When the caller shares an activity instance, they are the leader,
and other buddies may join. The instance is now a shared activity.
When the caller joins a shared activity, the leader will call
`get_data`, and the caller's `set_data` will be called with the
result.
The `joined` signal is emitted when the caller joins a shared
activity. One or more `buddy_joined` signals will be emitted before
this signal. The signal is not emitted to the caller who first
shared the activity. There are no arguments.
The `buddy_joined` signal is emitted when another buddy joins the
shared activity. At least one will be emitted before the `joined`
signal. The caller will never be mentioned, but is assumed to be
part of the set. The signal passes a
:class:`sugar3.presence.buddy.Buddy` as the only argument.
The `buddy_left` signal is emitted when another user leaves the
shared activity. The signal is not emitted during quit. The signal
passes a :class:`sugar3.presence.buddy.Buddy` as the only argument.
Any buddy may call `post` to send a message to all buddies. Each
buddy will receive a `message` signal.
The `message` signal is emitted when a `post` is received from any
buddy. The signal has two arguments. The first is a
:class:`sugar3.presence.buddy.Buddy`. The second is the message.
Any buddy may call `send_file_memory` or `send_file_file` to
transfer a file to all buddies. A description is to be given.
Each buddy will receive an `incoming_file` signal.
The `incoming_file` signal is emitted when a file transfer is
received. The signal has two arguments. The first is a
:class:`IncomingFileTransfer`. The second is the description.
'''
message = GObject.Signal('message', arg_types=[object, object])
joined = GObject.Signal('joined')
buddy_joined = GObject.Signal('buddy_joined', arg_types=[object])
buddy_left = GObject.Signal('buddy_left', arg_types=[object])
incoming_file = GObject.Signal('incoming_file', arg_types=[object, object])
def __init__(self, activity):
_logger.debug('__init__')
GObject.GObject.__init__(self)
self.activity = activity
self.shared_activity = activity.shared_activity
self._leader = False
self._init_waiting = False
self._text_channel = None
self._owner = presenceservice.get_instance().get_owner()
def setup(self):
'''
Setup must be called so that the activity can join or share
if appropriate.
.. note::
As soon as setup is called, any signal, `get_data` or
`set_data` call may occur. This means that the activity
must have set up enough so these functions can work. For
example, call setup at the end of the activity
`__init__` function.
'''
_logger.debug('setup')
# Some glue to know if we are launching, joining, or resuming
# a shared activity.
if self.shared_activity:
# We're joining the activity.
self.activity.connect("joined", self.__joined_cb)
if self.activity.get_shared():
_logger.debug('calling _joined_cb')
self.__joined_cb(self)
else:
_logger.debug('Joining activity...')
self._alert(_('Joining activity...'),
_('Please wait for the connection...'))
else:
self._leader = True
if not self.activity.metadata or self.activity.metadata.get(
'share-scope', SCOPE_PRIVATE) == \
SCOPE_PRIVATE:
# We are creating a new activity instance.
_logger.debug('Off-line')
else:
# We are sharing an old activity instance.
_logger.debug('On-line')
self._alert(_('Resuming shared activity...'),
_('Please wait for the connection...'))
self.activity.connect('shared', self.__shared_cb)
def _alert(self, title, msg=None):
a = NotifyAlert()
a.props.title = title
a.props.msg = msg
self.activity.add_alert(a)
a.connect('response', lambda a, r: self.activity.remove_alert(a))
a.show()
def __shared_cb(self, sender):
''' Callback for when activity is shared. '''
_logger.debug('__shared_cb')
# FIXME: may be called twice, but we should only act once
self.shared_activity = self.activity.shared_activity
self._setup_text_channel()
self._listen_for_channels()
def __joined_cb(self, sender):
'''Callback for when an activity is joined.'''
_logger.debug('__joined_cb')
self.shared_activity = self.activity.shared_activity
if not self.shared_activity:
return
self._setup_text_channel()
self._listen_for_channels()
self._init_waiting = True
self.post({'action': ACTION_INIT_REQUEST})
for buddy in self.shared_activity.get_joined_buddies():
self.buddy_joined.emit(buddy)
self.joined.emit()
def _setup_text_channel(self):
''' Set up a text channel to use for collaboration. '''
_logger.debug('_setup_text_channel')
self._text_channel = _TextChannelWrapper(
self.shared_activity.telepathy_text_chan,
self.shared_activity.telepathy_conn)
# Tell the text channel what callback to use for incoming
# text messages.
self._text_channel.set_received_callback(self.__received_cb)
# Tell the text channel what callbacks to use when buddies
# come and go.
self.shared_activity.connect('buddy-joined', self.__buddy_joined_cb)
self.shared_activity.connect('buddy-left', self.__buddy_left_cb)
def _listen_for_channels(self):
_logger.debug('_listen_for_channels')
conn = self.shared_activity.telepathy_conn
conn.connect_to_signal('NewChannels', self.__new_channels_cb)
def __new_channels_cb(self, channels):
_logger.debug('__new_channels_cb')
conn = self.shared_activity.telepathy_conn
for path, props in channels:
if props[CHANNEL + '.Requested']:
continue # This channel was requested by me
channel_type = props[CHANNEL + '.ChannelType']
if channel_type == CHANNEL_TYPE_FILE_TRANSFER:
self._handle_ft_channel(conn, path, props)
def _handle_ft_channel(self, conn, path, props):
_logger.debug('_handle_ft_channel')
ft = IncomingFileTransfer(conn, path, props)
if ft.description == ACTION_INIT_RESPONSE:
ft.connect('ready', self.__ready_cb)
ft.accept_to_memory()
else:
desc = json.loads(ft.description)
self.incoming_file.emit(ft, desc)
def __ready_cb(self, ft, stream):
_logger.debug('__ready_cb')
if self._init_waiting:
stream.close(None)
# FIXME: The data prop seems to just be the raw pointer
gbytes = stream.steal_as_bytes()
data = gbytes.get_data()
_logger.debug('Got init data from buddy: %r', data)
data = json.loads(data)
self.activity.set_data(data)
self._init_waiting = False
def __received_cb(self, buddy, msg):
'''Process a message when it is received.'''
_logger.debug('__received_cb')
action = msg.get('action')
if action == ACTION_INIT_REQUEST:
if self._leader:
data = self.activity.get_data()
if data is not None:
data = json.dumps(data)
OutgoingBlobTransfer(
buddy,
self.shared_activity.telepathy_conn,
data,
self.get_client_name(),
ACTION_INIT_RESPONSE,
ACTIVITY_FT_MIME)
return
if buddy:
nick = buddy.props.nick
else:
nick = '???'
_logger.debug('Received message from %s: %r', nick, msg)
self.message.emit(buddy, msg)
def send_file_memory(self, buddy, data, description):
'''
Send a one to one file transfer from memory to a buddy. The
buddy will get the file transfer and description through the
`incoming_transfer` signal.
Args:
buddy (sugar3.presence.buddy.Buddy), buddy to send to.
data (str), the data to send.
description (object), a json encodable description for the
transfer. This will be given to the
`incoming_transfer` signal at the buddy.
'''
OutgoingBlobTransfer(
buddy,
self.shared_activity.telepathy_conn,
data,
self.get_client_name(),
json.dumps(description),
ACTIVITY_FT_MIME)
def send_file_file(self, buddy, path, description):
'''
Send a one to one file transfer from a filesystem path to a
given buddy. The buddy will get the file transfer and
description through the `incoming_transfer` signal.
Args:
buddy (sugar3.presence.buddy.Buddy), buddy to send to.
path (str), path of the file containing the data to send.
description (object), a json encodable description for the
transfer. This will be given to the
`incoming_transfer` signal at the buddy.
'''
OutgoingFileTransfer(
buddy,
self.shared_activity.telepathy_conn,
path,
self.get_client_name(),
json.dumps(description),
ACTIVITY_FT_MIME)
def post(self, msg):
'''
Send a message to all buddies. If the activity is not shared,
no message is sent.
Args:
msg (object): json encodable object to send,
eg. :class:`dict` or :class:`str`.
'''
if self._text_channel is not None:
self._text_channel.post(msg)
def __buddy_joined_cb(self, sender, buddy):
'''A buddy joined.'''
self.buddy_joined.emit(buddy)
def __buddy_left_cb(self, sender, buddy):
'''A buddy left.'''
self.buddy_left.emit(buddy)
def get_client_name(self):
'''
Get the name of the activity's telepathy client.
Returns: str, telepathy client name
'''
return CLIENT + '.' + self.activity.get_bundle_id()
@GObject.property
def leader(self):
'''
Boolean of if this client is the leader in this activity. The
way the leader is decided may change, however there should only
ever be one leader for an activity.
'''
return self._leader
@GObject.property
def owner(self):
'''
Ourselves, :class:`sugar3.presence.buddy.Owner`
'''
return self._owner
FT_STATE_NONE = 0
FT_STATE_PENDING = 1
FT_STATE_ACCEPTED = 2
FT_STATE_OPEN = 3
FT_STATE_COMPLETED = 4
FT_STATE_CANCELLED = 5
FT_REASON_NONE = 0
FT_REASON_REQUESTED = 1
FT_REASON_LOCAL_STOPPED = 2
FT_REASON_REMOTE_STOPPED = 3
FT_REASON_LOCAL_ERROR = 4
FT_REASON_LOCAL_ERROR = 5
FT_REASON_REMOTE_ERROR = 6
class _BaseFileTransfer(GObject.GObject):
'''
The base file transfer should not be used directly. It is used as a
base class for the incoming and outgoing file transfers.
Props:
filename (str), metadata provided by the buddy
file_size (str), size of the file being sent/received, in bytes
description (str), metadata provided by the buddy
mime_type (str), metadata provided by the buddy
buddy (:class:`sugar3.presence.buddy.Buddy`), other party
in the transfer
reason_last_change (FT_REASON_*), reason for the last state change
GObject Props:
state (FT_STATE_*), current state of the transfer
transferred_bytes (int), number of bytes transferred so far
'''
def __init__(self):
GObject.GObject.__init__(self)
self._state = FT_STATE_NONE
self._transferred_bytes = 0
self.channel = None
self.buddy = None
self.filename = None
self.file_size = None
self.description = None
self.mime_type = None
self.reason_last_change = FT_REASON_NONE
def set_channel(self, channel):
'''
Setup the file transfer to use a given telepathy channel. This
should only be used by direct subclasses of the base file transfer.
'''
self.channel = channel
self.channel[CHANNEL_TYPE_FILE_TRANSFER].connect_to_signal(
'FileTransferStateChanged', self.__state_changed_cb)
self.channel[CHANNEL_TYPE_FILE_TRANSFER].connect_to_signal(
'TransferredBytesChanged', self.__transferred_bytes_changed_cb)
self.channel[CHANNEL_TYPE_FILE_TRANSFER].connect_to_signal(
'InitialOffsetDefined', self.__initial_offset_defined_cb)
channel_properties = self.channel[PROPERTIES_IFACE]
props = channel_properties.GetAll(CHANNEL_TYPE_FILE_TRANSFER)
self._state = props['State']
self.filename = props['Filename']
self.file_size = props['Size']
self.description = props['Description']
self.mime_type = props['ContentType']
def __transferred_bytes_changed_cb(self, transferred_bytes):
_logger.debug('__transferred_bytes_changed_cb %r', transferred_bytes)
self.props.transferred_bytes = transferred_bytes
def _set_transferred_bytes(self, transferred_bytes):
self._transferred_bytes = transferred_bytes
def _get_transferred_bytes(self):
return self._transferred_bytes
transferred_bytes = GObject.property(type=int,
default=0,
getter=_get_transferred_bytes,
setter=_set_transferred_bytes)
def __initial_offset_defined_cb(self, offset):
_logger.debug('__initial_offset_defined_cb %r', offset)
self.initial_offset = offset
def __state_changed_cb(self, state, reason):
_logger.debug('__state_changed_cb %r %r', state, reason)
self.reason_last_change = reason
self.props.state = state
def _set_state(self, state):
self._state = state
def _get_state(self):
return self._state
state = GObject.property(type=int, getter=_get_state, setter=_set_state)
def cancel(self):
'''
Request that telepathy close the file transfer channel
Spec: http://telepathy.freedesktop.org/spec/Channel.html#Method:Close
'''
self.channel[CHANNEL].Close()
class IncomingFileTransfer(_BaseFileTransfer):
'''
An incoming file transfer from another buddy. You need to first accept
the transfer (either to memory or to a file). Then you need to listen
to the state and wait until the transfer is completed. Then you can
read the file that it was saved to, or access the
:class:`Gio.MemoryOutputStream` from the `output` property.
The `output` property is different depending on how the file was accepted.
If the file was accepted to a file on the file system, it is a string
representing the path to the file. If the file was accepted to memory,
it is a :class:`Gio.MemoryOutputStream`.
'''
ready = GObject.Signal('ready', arg_types=[object])
def __init__(self, connection, object_path, props):
_BaseFileTransfer.__init__(self)
channel = {}
proxy = dbus.Bus().get_object(connection.bus_name, object_path)
channel[PROPERTIES_IFACE] = dbus.Interface(proxy, PROPERTIES_IFACE)
channel[CHANNEL] = dbus.Interface(proxy, CHANNEL)
channel[CHANNEL_TYPE_FILE_TRANSFER] = dbus.Interface(
proxy, CHANNEL_TYPE_FILE_TRANSFER)
self.set_channel(channel)
self.connect('notify::state', self.__notify_state_cb)
self._destination_path = None
self._output_stream = None
self._socket_address = None
self._socket = None
self._splicer = None
def accept_to_file(self, destination_path):
'''
Accept the file transfer and write it to a new file. The file must
already exist.
Args:
destination_path (str): the path where a new file will be
created and saved to
'''
if os.path.exists(destination_path):
raise ValueError('Destination path already exists: %r' %
destination_path)
self._destination_path = destination_path
self._accept()
def accept_to_memory(self):
'''
Accept the file transfer. Once the state is FT_STATE_OPEN, a
:class:`Gio.MemoryOutputStream` accessible via the output prop.
'''
self._destination_path = None
self._accept()
def _accept(self):
channel_ft = self.channel[CHANNEL_TYPE_FILE_TRANSFER]
self._socket_address = channel_ft.AcceptFile(
SOCKET_ADDRESS_TYPE_UNIX,
SOCKET_ACCESS_CONTROL_LOCALHOST,
'',
0,
byte_arrays=True)
def __notify_state_cb(self, file_transfer, pspec):
_logger.debug('__notify_state_cb %r', self.props.state)
if self.props.state == FT_STATE_OPEN:
# Need to hold a reference to the socket so that python doesn't
# close the fd when it goes out of scope
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(self._socket_address)
input_stream = Gio.UnixInputStream.new(self._socket.fileno(), True)
if self._destination_path is not None:
destination_file = Gio.File.new_for_path(
self._destination_path)
if self.initial_offset == 0:
self._output_stream = destination_file.create(
Gio.FileCreateFlags.PRIVATE, None)
else:
self._output_stream = destination_file.append_to()
else:
if hasattr(Gio.MemoryOutputStream, 'new_resizable'):
self._output_stream = \
Gio.MemoryOutputStream.new_resizable()
else:
self._output_stream = Gio.MemoryOutputStream()
self._output_stream.splice_async(
input_stream,
Gio.OutputStreamSpliceFlags.CLOSE_SOURCE |
Gio.OutputStreamSpliceFlags.CLOSE_TARGET,
GLib.PRIORITY_LOW, None, self.__splice_done_cb, None)
def __splice_done_cb(self, output_stream, res, user):
_logger.debug('__splice_done_cb')
self.ready.emit(self._destination_path or self._output_stream)
@GObject.Property
def output(self):
return self._destination_path or self._output_stream
class _BaseOutgoingTransfer(_BaseFileTransfer):
'''
This class provides the base of an outgoing file transfer.
You can override the `_get_input_stream` method to return any type of
Gio input stream. This will then be used to provide the file if
requested by the application. You also need to call `_create_channel`
with the length of the file in bytes during your `__init__`.
Args:
buddy (sugar3.presence.buddy.Buddy), who to send the transfer to
conn (telepathy.client.conn.Connection), telepathy connection to
use to send the transfer. Eg. `shared_activity.telepathy_conn`
filename (str), metadata sent to the receiver
description (str), metadata sent to the receiver
mime (str), metadata sent to the receiver
'''
def __init__(self, buddy, conn, filename, description, mime):
_BaseFileTransfer.__init__(self)
self.connect('notify::state', self.__notify_state_cb)
self._socket_address = None
self._socket = None
self._splicer = None
self._conn = conn
self._filename = filename
self._description = description
self._mime = mime
self.buddy = buddy
def _create_channel(self, file_size):
object_path, properties_ = self._conn.CreateChannel(dbus.Dictionary({
CHANNEL + '.ChannelType': CHANNEL_TYPE_FILE_TRANSFER,
CHANNEL + '.TargetHandleType': CONNECTION_HANDLE_TYPE_CONTACT,
CHANNEL + '.TargetHandle': self.buddy.contact_handle,
CHANNEL_TYPE_FILE_TRANSFER + '.Filename': self._filename,
CHANNEL_TYPE_FILE_TRANSFER + '.Description': self._description,
CHANNEL_TYPE_FILE_TRANSFER + '.Size': file_size,
CHANNEL_TYPE_FILE_TRANSFER + '.ContentType': self._mime,
CHANNEL_TYPE_FILE_TRANSFER + '.InitialOffset': 0}, signature='sv'))
channel = {}
proxy = dbus.Bus().get_object(self._conn.bus_name, object_path)
channel[PROPERTIES_IFACE] = dbus.Interface(proxy, PROPERTIES_IFACE)
channel[CHANNEL] = dbus.Interface(proxy, CHANNEL)
channel[CHANNEL_TYPE_FILE_TRANSFER] = dbus.Interface(
proxy, CHANNEL_TYPE_FILE_TRANSFER)
self.set_channel(channel)
channel_file_transfer = self.channel[CHANNEL_TYPE_FILE_TRANSFER]
self._socket_address = channel_file_transfer.ProvideFile(
SOCKET_ADDRESS_TYPE_UNIX, SOCKET_ACCESS_CONTROL_LOCALHOST, '',
byte_arrays=True)
def _get_input_stream(self):
raise NotImplementedError()
def __notify_state_cb(self, file_transfer, pspec):
if self.props.state == FT_STATE_OPEN:
# Need to hold a reference to the socket so that python doesn't
# closes the fd when it goes out of scope
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(self._socket_address)
output_stream = Gio.UnixOutputStream.new(
self._socket.fileno(), True)
input_stream = self._get_input_stream()
output_stream.splice_async(
input_stream,
Gio.OutputStreamSpliceFlags.CLOSE_SOURCE |
Gio.OutputStreamSpliceFlags.CLOSE_TARGET,
GLib.PRIORITY_LOW, None, None, None)
class OutgoingFileTransfer(_BaseOutgoingTransfer):
'''
An outgoing file transfer to send from a file (on the computer's file
system).
Note that the `path` argument is the path for the file that will be
sent, whereas the `filename` argument is only for metadata.
Args:
path (str), path of the file to send
'''
def __init__(self, buddy, conn, path, filename, description, mime):
_BaseOutgoingTransfer.__init__(
self, buddy, conn, filename, description, mime)
self._path = path
file_size = os.stat(path).st_size
self._create_channel(file_size)
def _get_input_stream(self):
return Gio.File.new_for_path(self._path).read(None)
class OutgoingBlobTransfer(_BaseOutgoingTransfer):
'''
An outgoing file transfer to send from a string in memory.
Args:
blob (str), data to send
'''
def __init__(self, buddy, conn, blob, filename, description, mime):
_BaseOutgoingTransfer.__init__(
self, buddy, conn, filename, description, mime)
self._blob = blob
self._create_channel(len(self._blob))
def _get_input_stream(self):
return Gio.MemoryInputStream.new_from_data(self._blob, None)
class _TextChannelWrapper(object):
'''Wrapper for a telepathy Text Channel'''
def __init__(self, text_chan, conn):
'''Connect to the text channel'''
self._activity_cb = None
self._activity_close_cb = None
self._text_chan = text_chan
self._conn = conn
self._signal_matches = []
m = self._text_chan[CHANNEL_INTERFACE].connect_to_signal(
'Closed', self._closed_cb)
self._signal_matches.append(m)
def post(self, msg):
if msg is not None:
_logger.debug('post')
self._send(json.dumps(msg))
def _send(self, text):
'''Send text over the Telepathy text channel.'''
_logger.debug('sending %s' % text)
if self._text_chan is not None:
self._text_chan[CHANNEL_TYPE_TEXT].Send(
CHANNEL_TEXT_MESSAGE_TYPE_NORMAL, text)
def close(self):
'''Close the text channel.'''
_logger.debug('Closing text channel')
try:
self._text_chan[CHANNEL_INTERFACE].Close()
except Exception:
_logger.debug('Channel disappeared!')
self._closed_cb()
def _closed_cb(self):
'''Clean up text channel.'''
for match in self._signal_matches:
match.remove()
self._signal_matches = []
self._text_chan = None
if self._activity_close_cb is not None:
self._activity_close_cb()
def set_received_callback(self, callback):
'''Connect the function callback to the signal.
callback -- callback function taking buddy and text args
'''
if self._text_chan is None:
return
self._activity_cb = callback
m = self._text_chan[CHANNEL_TYPE_TEXT].connect_to_signal(
'Received', self._received_cb)
self._signal_matches.append(m)
def handle_pending_messages(self):
'''Get pending messages and show them as received.'''
for identity, timestamp, sender, type_, flags, text in \
self._text_chan[
CHANNEL_TYPE_TEXT].ListPendingMessages(False):
self._received_cb(identity, timestamp, sender, type_, flags, text)
def _received_cb(self, identity, timestamp, sender, type_, flags, text):
'''Handle received text from the text channel.
Converts sender to a Buddy.
Calls self._activity_cb which is a callback to the activity.
'''
_logger.debug('received_cb %r %s' % (type_, text))
if type_ != 0:
# Exclude any auxiliary messages
return
msg = json.loads(text)
if self._activity_cb:
try:
self._text_chan[CHANNEL_INTERFACE_GROUP]
except Exception:
# One to one XMPP chat
nick = self._conn[
CONN_INTERFACE_ALIASING].RequestAliases([sender])[0]
buddy = {'nick': nick, 'color': '#000000,#808080'}
_logger.debug('exception: received from sender %r buddy %r' %
(sender, buddy))
else:
# XXX: cache these
buddy = self._get_buddy(sender)
_logger.debug('Else: received from sender %r buddy %r' %
(sender, buddy))
self._activity_cb(buddy, msg)
self._text_chan[
CHANNEL_TYPE_TEXT].AcknowledgePendingMessages([identity])
else:
_logger.debug('Throwing received message on the floor'
' since there is no callback connected. See'
' set_received_callback')
def set_closed_callback(self, callback):
'''Connect a callback for when the text channel is closed.
callback -- callback function taking no args
'''
_logger.debug('set closed callback')
self._activity_close_cb = callback
def _get_buddy(self, cs_handle):
'''Get a Buddy from a (possibly channel-specific) handle.'''
# XXX This will be made redundant once Presence Service
# provides buddy resolution
# Get the Presence Service
pservice = presenceservice.get_instance()
# Get the Telepathy Connection
tp_name, tp_path = pservice.get_preferred_connection()
obj = dbus.Bus().get_object(tp_name, tp_path)
conn = dbus.Interface(obj, CONN_INTERFACE)
group = self._text_chan[CHANNEL_INTERFACE_GROUP]
my_csh = group.GetSelfHandle()
if my_csh == cs_handle:
handle = conn.GetSelfHandle()
elif group.GetGroupFlags() & \
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES:
handle = group.GetHandleOwners([cs_handle])[0]
else:
handle = cs_handle
# XXX: deal with failure to get the handle owner
assert handle != 0
return pservice.get_buddy_by_telepathy_handle(
tp_name, tp_path, handle)
| gpl-3.0 | 3,951,370,830,328,633,000 | 36.438279 | 79 | 0.616038 | false |
Teifion/concision | views/old_forms.py | 1 | 9057 | # import transaction
# import datetime
from pyramid.httpexceptions import HTTPFound
# from pyramid.renderers import get_renderer
from ..models import (
StoredQuery,
)
# import json
from ..lib import joins
from .. import config
def alter_query_type(request):
request.do_not_log = True
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
new_type = request.params['new_type']
the_query.jdata['type'] = new_type
the_query.compress_data()
config['DBSession'].add(the_query)
if new_type == "advanced":
return HTTPFound(location=request.route_url("concision.adv_query.overview", query_id=query_id))
return HTTPFound(location="%s#columns" % request.route_url("concision.query.edit", query_id=query_id))
def edit_columns(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
columns = []
for s, the_source in config['sources'].items():
for c in the_source.columns:
key = "{}.{}".format(s, c)
if key in request.params:
columns.append(key)
the_query.jdata['columns'] = columns
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#columns" % request.route_url("concision.query.edit", query_id=query_id))
def add_filter(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
new_filter = {
"column": request.params['column'],
"operator": request.params['operator'],
"value": request.params['value'],
}
existing_filters = the_query.jdata.get('filters', [])
existing_filters.append(new_filter)
the_query.jdata['filters'] = existing_filters
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#filters" % request.route_url("concision.query.edit", query_id=query_id))
def edit_filter(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
new_filter = {
"column": request.params['column'],
"operator": request.params['operator'],
"value": request.params['value'],
}
filter_id = int(request.params['filter_id'])
existing_filters = the_query.jdata.get('filters', [])
existing_filters[filter_id] = new_filter
the_query.jdata['filters'] = existing_filters
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#filters" % request.route_url("concision.query.edit", query_id=query_id))
def delete_filter(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
filter_id = int(request.params['f'])
existing_filters = the_query.jdata.get('filters', [])
existing_filters = existing_filters[:filter_id] + existing_filters[filter_id+1:]
the_query.jdata['filters'] = existing_filters
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#filters" % request.route_url("concision.query.edit", query_id=query_id))
def edit_key(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
the_query.jdata['key'] = request.params.get('query_key', None)
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#graphing" % request.route_url("concision.query.edit", query_id=query_id))
def edit_groupby(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
the_query.jdata['group_by'] = "group_by" in request.params
groupings = {}
for s, the_source in config['sources'].items():
for c in the_source.columns:
key = "{}.{}".format(s, c)
if key in request.params:
groupings[key] = request.params[key]
the_query.jdata['group_by_funcs'] = groupings
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#groupby" % request.route_url("concision.query.edit", query_id=query_id))
def add_orderby(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
new_orderby = {
"column": request.params['column'],
"order": request.params['order'],
}
existing_orderby = the_query.jdata.get('orderby', [])
existing_orderby.append(new_orderby)
the_query.jdata['orderby'] = existing_orderby
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#orderby" % request.route_url("concision.query.edit", query_id=query_id))
def edit_orderby(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
new_orderby = {
"column": request.params['column'],
"order": request.params['order'],
}
orderby_id = int(request.params['orderby_id'])
existing_orderby = the_query.jdata.get('orderby', [])
existing_orderby[orderby_id] = new_orderby
the_query.jdata['orderby'] = existing_orderby
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#orderby" % request.route_url("concision.query.edit", query_id=query_id))
def delete_orderby(request):
request.do_not_log = True
# config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
orderby_id = int(request.params['o'])
existing_orderby = the_query.jdata.get('orderby', [])
existing_orderby = existing_orderby[:orderby_id] + existing_orderby[orderby_id+1:]
the_query.jdata['orderby'] = existing_orderby
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#orderby" % request.route_url("concision.query.edit", query_id=query_id))
def add_join(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
new_join = {
"left": request.params['left'],
"right": request.params['right'],
}
new_source = request.params['right'].split(".")[0]
existing_joins = the_query.jdata.get('joins', [])
existing_joins.append(new_join)
the_query.jdata['joins'] = existing_joins
the_query.jdata['sources'].append(new_source)
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#join" % request.route_url("concision.query.edit", query_id=query_id))
def delete_join(request):
request.do_not_log = True
# config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
join_id = int(request.params['j'])
the_query.jdata = joins.remove_join(the_query.jdata, join_id)
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#join" % request.route_url("concision.query.edit", query_id=query_id))
| bsd-2-clause | 3,480,844,766,862,487,000 | 33.701149 | 107 | 0.643259 | false |
amsimoes/bat-country | batcountry/batcountry.py | 1 | 6348 | # import the necessary packages
from __future__ import print_function
from google.protobuf import text_format
from cStringIO import StringIO
from PIL import Image
import scipy.ndimage as nd
import numpy as np
import caffe
import os
class BatCountry:
def __init__(self, base_path, deploy_path, model_path,
patch_model="./tmp.prototxt", mean=(104.0, 116.0, 122.0),
channels=(2, 1, 0)):
# if the deploy path is None, set the default
if deploy_path is None:
deploy_path = base_path + "/deploy.prototxt"
# if the model path is None, set it to the default GoogleLeNet model
if model_path is None:
model_path = base_path + "/imagenet.caffemodel"
# check to see if the model should be patched to compute gradients
if patch_model:
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(deploy_path).read(), model)
model.force_backward = True
f = open(patch_model, "w")
f.write(str(model))
f.close()
# load the network and store the patched model path
self.net = caffe.Classifier(patch_model, model_path, mean=np.float32(mean),
channel_swap=channels)
self.patch_model = patch_model
def dream(self, image, iter_n, octave_n, octave_scale=None,
end="inception_4c/output", clip=True, step_fn=None, objective_fn=None,
preprocess_fn=None, deprocess_fn=None, verbose=True, visualize=False,
**step_params):
if iter_n is None:
iter_n = 10
if octave_n is None:
octave_n = 4
if octave_scale is None:
octave_scale = 1.4
# if a step function has not been supplied, initialize it as the
# standard gradient ascent step
if step_fn is None:
step_fn = BatCountry.gradient_ascent_step
# if the objective function has not been supplied, initialize it
# as the L2 objective
if objective_fn is None:
objective_fn = BatCountry.L2_objective
# if the preprocess function has not been supplied, initialize it
if preprocess_fn is None:
preprocess_fn = BatCountry.preprocess
# if the deprocess function has not been supplied, initialize it
if deprocess_fn is None:
deprocess_fn = BatCountry.deprocess
# initialize the visualization list
visualizations = []
# prepare base image_dims for all octaves
octaves = [preprocess_fn(self.net, image)]
for i in xrange(octave_n - 1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale,
1.0 / octave_scale), order=1))
# allocate image for network-produced details
detail = np.zeros_like(octaves[-1])
src = self.net.blobs["data"]
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0 * h/ h1, 1.0 * w / w1), order=1)
# resize the network's input image size
src.reshape(1, 3, h, w)
src.data[0] = octave_base + detail
for i in xrange(iter_n):
step_fn(self.net, end=end, clip=clip, objective_fn=objective_fn,
**step_params)
# visualization
vis = deprocess_fn(self.net, src.data[0])
# adjust image contrast if clipping is disabled
if not clip:
vis = vis * (255.0 / np.percentile(vis, 99.98))
if verbose:
print("octave={}, iter={}, layer={}, image_dim={}".format(octave,
i, end, vis.shape))
# check to see if the visualization list should be
# updated
if visualize:
k = "octave_{}-iter_{}-layer_{}".format(octave, i,
end.replace("/", "_"))
visualizations.append((k, vis))
# extract details produced on the current octave
detail = src.data[0] - octave_base
# grab the resulting image
r = deprocess_fn(self.net, src.data[0])
# check to see if the visualizations should be included
if visualize:
r = (r, visualizations)
return r
@staticmethod
def gradient_ascent_step(net, step_size=1.5, end="inception_4c/output",
jitter=32, clip=True, objective_fn=None, **objective_params):
# if the objective function is None, initialize it as
# the standard L2 objective
if objective_fn is None:
objective_fn = BatCountry.L2_objective
# input image is stored in Net's 'data' blob
src = net.blobs["data"]
dst = net.blobs[end]
# apply jitter shift
ox, oy = np.random.randint(-jitter, jitter + 1, 2)
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2)
net.forward(end=end)
objective_fn(dst, **objective_params)
net.backward(start=end)
g = src.diff[0]
# apply normalized ascent step to the input image
src.data[:] += step_size / np.abs(g).mean() * g
# unshift image
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2)
# unshift image
if clip:
bias = net.transformer.mean["data"]
src.data[:] = np.clip(src.data, -bias, 255 - bias)
def layers(self):
# return the layers of the network
return self.net._layer_names
def cleanup(self):
# remove the patched model from disk
os.remove(self.patch_model)
def prepare_guide(self, image, end="inception_4c/output", maxW=224, maxH=224,
preprocess_fn=None):
# if the preprocess function has not been supplied, initialize it
if preprocess_fn is None:
preprocess_fn = BatCountry.preprocess
# grab dimensions of input image
(w, h) = image.size
# GoogLeNet was trained on images with maximum width and heights
# of 224 pixels -- if either dimension is larger than 224 pixels,
# then we'll need to do some resizing
nW, nH = 244, 244
if w != 244 or h != 244:
image = np.float32(image.resize((nW, nH), Image.BILINEAR))
(src, dst) = (self.net.blobs["data"], self.net.blobs[end])
src.reshape(1, 3, nH, nW)
src.data[0] = preprocess_fn(self.net, image)
self.net.forward(end=end)
guide_features = dst.data[0].copy()
return guide_features
@staticmethod
def L2_objective(dst):
dst.diff[:] = dst.data
@staticmethod
def guided_objective(dst, objective_features):
x = dst.data[0].copy()
y = objective_features
ch = x.shape[0]
x = x.reshape(ch,-1)
y = y.reshape(ch,-1)
# compute the matrix of dot-products with guide features
A = x.T.dot(y)
# select ones that match best
dst.diff[0].reshape(ch, -1)[:] = y[:,A.argmax(1)]
@staticmethod
def preprocess(net, img):
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean["data"]
@staticmethod
def deprocess(net, img):
return np.dstack((img + net.transformer.mean["data"])[::-1])
| mit | -3,119,327,493,951,980,500 | 28.525581 | 78 | 0.675961 | false |
KonradBreitsprecher/espresso | doc/tutorials/09-swimmer_reactions/EXERCISES/reaction.py | 1 | 9461 | ################################################################################
# #
# Copyright (C) 2010,2011,2012,2013,2014,2015,2016 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Catalytic Reactions: Enhanced Diffusion Tutorial #
# #
################################################################################
from __future__ import print_function
import numpy as np
import os
import sys
import time
from espressomd import assert_features
from espressomd.observables import ParticlePositions, ParticleBodyAngularMomentum
from espressomd.correlators import Correlator
from espressomd.reaction import Reaction
################################################################################
# Read in the active velocity from the command prompt
if len(sys.argv) != 2:
print("Usage:",sys.argv[0],"<passive/active = 0/1>")
exit()
active = int(sys.argv[1])
if (active != 0) and (active != 1):
print("Usage:",sys.argv[0],"<passive/active = 0/1>")
exit()
# Set the parameters
box_l = 10
radius = 3.0
csmall = 0.1
rate = 1000.0
# Print input parameters
print("Box length: {}".format(box_l))
print("Colloid radius: {}".format(radius))
print("Particle concentration: {}".format(csmall))
print("Reaction rate: {}".format(rate))
print("Active or Passive: {}".format(active))
# Create output directory
if active == 0:
outdir = "./passive-system"
else:
outdir = "./active-system"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
################################################################################
# Setup system parameters
equi_steps = 250
equi_length = 100
prod_steps = 2000
prod_length = 100
dt = 0.01
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.cell_system.skin = 0.1
system.time_step = dt
system.min_global_cut = 1.1*radius
# Set up the random seeds
system.seed = np.random.randint(0,2**31-1)
################################################################################
# Thermostat parameters
# Catalyzer is assumed to be larger, thus larger friction
frict_trans_colloid = 20.0
frict_rot_colloid = 20.0
# Particles are small and have smaller friction
frict_trans_part = 1.0
frict_rot_part = 1.0
# Temperature
temp = 1.0
################################################################################
# Set up the swimmer
## Exercise 1 ##
# Determine the initial position of the particle, which
# should be in the center of the box.
x0pnt = ...
y0pnt = ...
z0pnt = ...
# Note that the swimmer needs to rotate freely
cent = len(system.part)
system.part.add(id=cent,pos=[x0pnt,y0pnt,z0pnt],type=0,temp=temp,
gamma=frict_trans_colloid,
gamma_rot=frict_rot_colloid,
rotation=[1,1,1])
# Set up the particles
## Exercise 2 ##
# Above, we have set the concentration of the particles in the
# variable $csmall. The concentration of both species of particles is
# equal. Determine *how many* particles of one species there are.
# There are two species of equal concentration
nB = ...
nA = nB
print("Number of reactive A particles: {}".format(nB))
print("Number of reactive B particles: {}".format(nA))
for i in range(nA):
x = box_l*np.random.random()
y = box_l*np.random.random()
z = box_l*np.random.random()
# Prevent overlapping the colloid
while (x-x0pnt)**2 + (y-y0pnt)**2 + (z-z0pnt)**2 < 1.15*radius**2:
x = box_l*np.random.random()
y = box_l*np.random.random()
z = box_l*np.random.random()
# reactants and products do not need to rotate
system.part.add(pos=[x,y,z],type=1,temp=temp,
gamma=frict_trans_part,
gamma_rot=frict_rot_part,
rotation=[0,0,0])
for i in range(nB):
x = box_l*np.random.random()
y = box_l*np.random.random()
z = box_l*np.random.random()
# Prevent overlapping the colloid
while (x-x0pnt)**2 + (y-y0pnt)**2 + (z-z0pnt)**2 < 1.15*radius**2:
x = box_l*np.random.random()
y = box_l*np.random.random()
z = box_l*np.random.random()
# reactants and products do not need to rotate
system.part.add(pos=[x,y,z],type=2,temp=temp,
gamma=frict_trans_part,
gamma_rot=frict_rot_part,
rotation=[0,0,0])
print("box: {}, npart: {}".format(system.box_l,len(system.part)))
################################################################################
# Set up the WCA potential
## Exercise 3 ##
# Why are there two different cutoff lengths for the LJ interaction
# catalyzer/product and catalyzer/reactant?
eps = 5.0
sig = 1.0
shift = 0.25
roff = radius - 0.5*sig
# central and A particles
cut = 2**(1/6.)*sig
system.non_bonded_inter[0,1].lennard_jones.set_params(epsilon=eps, sigma=sig, cutoff=cut, shift=shift, offset=roff)
# central and B particles (larger cutoff)
cut = 1.5*sig
system.non_bonded_inter[0,2].lennard_jones.set_params(epsilon=eps, sigma=sig, cutoff=cut, shift=shift, offset=roff)
################################################################################
# Set up the reaction
cat_range = radius + 1.0*sig
cat_rate = rate
## Exercise 4 ##
# We have read the acticity parameter from the command line into
# $active, where 0 means off and 1 means on. When $active = 0 we can
# simply go on, but when $active = 1 we have to set up the reaction.
# Check the $active parameter and setup a reaction for the catalyzer
# of type 0 with the reactants of type 1 and products of type 2. The
# reaction range is stored in $cat_range, the reaction rate in
# $cat_rate. Use the number-conserving scheme by setting swap on.
...
################################################################################
# Perform warmup
cap = 1.0
warm_length = 100
## Exercise 5 ##
# Consult the User Guide for minimize_energy to find out the
# difference to warmup with explicit force-capping.
system.minimize_energy.init(f_max=cap,max_steps=warm_length,gamma=1.0/20.0,max_displacement=0.05)
system.minimize_energy.minimize()
################################################################################
# Enable the thermostat
## Exercise 6 ##
# Why do we enable the thermostat only after warmup?
system.thermostat.set_langevin(kT=temp, gamma=frict_trans_colloid)
################################################################################
# Perform equilibration
# Integrate
for k in range(equi_steps):
print("Equilibration: {} of {}".format(k,equi_steps))
system.integrator.run(equi_length)
################################################################################
for cnt in range(5):
# Set up the MSD calculation
tmax = prod_steps*prod_length*dt
pos_id = ParticlePositions(ids=[cent])
msd = Correlator(obs1=pos_id,
corr_operation="square_distance_componentwise",
dt=dt,
tau_max=tmax,
tau_lin=16)
system.auto_update_correlators.add(msd)
## Exercise 7a ##
# Construct the auto-correlators for the AVACF, using the example
# of the MSD.
# Initialize the angular velocity auto-correlation function
# (AVACF) correlator
...
# Perform production
# Integrate
for k in range(prod_steps):
print("Production {} of 5: {} of {}".format(cnt+1,k,prod_steps))
system.integrator.run(prod_length)
# Finalize the MSD and export
system.auto_update_correlators.remove(msd)
msd.finalize()
np.savetxt("{}/msd_{}.dat".format(outdir,cnt),msd.result())
## Exercise 7b ##
# Finalize the angular velocity auto-correlation function (AVACF)
# correlator and write the result to a file.
...
np.savetxt("{}/avacf_{}.dat".format(outdir,cnt),avacf.result())
| gpl-3.0 | -1,595,223,305,182,569,700 | 31.071186 | 115 | 0.530599 | false |
heilaaks/snippy | tests/test_api_search_snippet.py | 1 | 58285 | # -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# snippy - software development and maintenance notes manager.
# Copyright 2017-2020 Heikki J. Laaksonen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""test_api_search_snippet: Test GET /snippets API endpoint."""
from falcon import testing
import falcon
import pytest
from tests.lib.content import Content
from tests.lib.content import Storage
from tests.lib.snippet import Snippet
pytest.importorskip('gunicorn')
# pylint: disable=unsubscriptable-object
class TestApiSearchSnippet(object): # pylint: disable=too-many-public-methods, too-many-lines
"""Test GET /snippets API."""
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_001(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields. The search
query matches to two snippets and both of them are returned. The
search is sorted based on one field. The limit defined in the search
query is not exceeded.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1523'
}
expect_body = {
'meta': {
'count': 2,
'limit': 20,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/vnd.api+json'},
query_string='sall=docker%2Cswarm&limit=20&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_002(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields. The search
query matches to four snippets but limit defined in search query
results only two of them sorted by the brief field. The sorting must
be applied before limit is applied.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1658'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_003(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields. The search
query matches to two snippets but only one of them is returned
because the limit parameter was set to one. In this case the sort is
descending and the last match must be returned. The resulting fields
are limited only to brief and category.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '218'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': {field: Storage.forced[field] for field in ['brief', 'category']}
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker&limit=1&sort=-brief&fields=brief,category')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_004(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields but return
only two fields. This syntax that separates the sorted fields causes
the parameter to be processed in string context which must handle
multiple fields.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '218'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': {field: Storage.forced[field] for field in ['brief', 'category']}
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker&limit=1&sort=-brief&fields=brief%2Ccategory')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_005(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields. The search
query matches to four snippets but limit defined in search query
results only two of them sorted by the utc field in descending order.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1626'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&limit=2&sort=-created,-brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_006(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields sorted with
two fields. This syntax that separates the sorted fields causes the
parameter to be processed in string context which must handle multiple
fields.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1626'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&limit=2&sort=-created%2C-brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_007(server):
"""Search snippets with GET.
Try to send GET /snippets with sort parameter set to field name
that does not exist. In this case sorting must fall to default
sorting.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '385'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'sort option validation failed for non existent field=notexisting'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cswarm&limit=20&sort=notexisting')
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_008(server):
"""Search snippets with GET.
Send GET /snippets to return only defined fields. In this case the
fields are defined by setting the 'fields' parameter multiple times.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '218'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': {field: Storage.forced[field] for field in ['brief', 'category']}
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker&limit=1&sort=-brief&fields=brief&fields=category')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_009(server):
"""Search snippets with GET.
Try to send GET /snippets with search keywords that do not result
any matches.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=notfound&limit=10&sort=-brief&fields=brief,category')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_010(server):
"""Search snippets with GET from tag fields.
Try to send GET /snippets with search tag keywords that do not
result any matches.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='stag=notfound&limit=10&sort=-brief&fields=brief,category')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_011(server):
"""Search snippet from groups fields.
Try to send GET /snippets with search groups keywords that do not
result any matches.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sgrp=notfound&limit=10&sort=-brief&fields=brief,category')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_012(server):
"""Search snippet with digets.
Send GET /snippets/{id} to read a snippet based on digest. In this
case the snippet is found. In this case the URI path contains 15 digit
digest. The returned self link must be the 16 digit link.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '871'
}
expect_body = {
'meta': {
'count': 1,
'limit': 20,
'offset': 0,
'total': 1
},
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/' + Snippet.REMOVE_UUID
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52a02b6',
headers={'accept': 'application/json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_013(server):
"""Search snippet with digets.
Try to send GET /snippets/{id} with a digest that is not found.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '395'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'content identity: 101010101010101 was not unique and matched to: 0 resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/101010101010101',
headers={'accept': 'application/json'})
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_014(server):
"""Search snippet without search parameters.
Send GET /snippets without defining search parameters. In this
case all content should be returned.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1523'
}
expect_body = {
'meta': {
'count': 2,
'limit': 20,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='limit=20&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_015(server):
"""Search snippet without search parameters.
Send GET /snippets without defining search parameters. In this
case only one snippet must be returned because the limit is set to
one. Also the sorting based on brief field causes the last snippet
to be returned.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '830'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='limit=1&sort=-brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.parametrize('server', [['server', '--server-host', 'localhost:8080', '-q']], indirect=True)
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_016(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all attributes. The
search query matches to two snippets and both of them are returned.
The response JSON is sent as pretty printed.
TODO: The groups refactoring changed the lenght from 2196 to 2278.
Why so much? Is there a problem in the result JSON?
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '2709'
}
expect_body = {
'meta': {
'count': 2,
'limit': 20,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/vnd.api+json'},
query_string='sall=docker%2Cswarm&limit=20&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_001(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
zero and limit is bigger that the amount of search results so that
all results fit into one response. Because all results fit into the
same response, there is no need for next and prev links and those
must not be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '3425'
}
expect_body = {
'meta': {
'count': 4,
'limit': 10,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}, {
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=10&offset=0&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=10&offset=0&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=10&offset=0&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=0&limit=10&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_002(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
zero and limit is smaller that the amount of search results so that
all results do not fit into one response. Because this is the first
page, the prev link must not be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '2110'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'next': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=0&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_003(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
non zero and second page is requested. The requested second page is
the last page. Because of this, there next link must not be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1942'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 2,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}, {
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'prev': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=2&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_004(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
non zero and second page is requested. The requested second page is
not the last page. In this case the last page has as many hits that
will fit into one page (even last page). All pagination links must
be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1528'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 1,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=1&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=0&sall=docker%2Cnmap&sort=brief',
'next': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=2&sall=docker%2Cnmap&sort=brief',
'prev': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=0&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=3&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=1&limit=1&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_005(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
non zero and second page is requested. The requested second page is
not the last page. In this case the last page has less items than
will fit to last page (uneven last page). Also the first page is
not even and must be correctly set to zero. All pagination links must
be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '2289'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 1,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=1&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'next': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=3&sall=docker%2Cnmap&sort=brief',
'prev': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=3&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=1&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_006(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
non zero and the last page is requested. Because original request
was not started with offset zero, the first and prev pages are not
having offset based on limit. In here the offset is also exactly
the same as total amount of hits.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1181'
}
expect_body = {
'meta': {
'count': 1,
'limit': 2,
'offset': 3,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=3&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'prev': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=1&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=3&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=3&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'import-umount')
def test_api_search_snippet_paginate_007(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset and
limit are set so that the last page contains less hits than the limit
and the requested page is not the last or the second last page.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '2146'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 5
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cumount%2Cnmap&sort=brief',
'next': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cumount%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cumount%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=4&sall=docker%2Cumount%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cumount%2Cnmap&offset=0&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_008(server):
"""Search snippets with GET.
Try to send GET /snippets with pagination offset that is the same
as the amount of snippets stored into the database.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=4&limit=2&sort=brief')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_009(server):
"""Search snippets with GET.
Try to send GET /snippets with pagination offset that is one bigger
than the maximum amount of hits.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=10&limit=10&sort=brief')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_010(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied with limit zero.
This is a special case that returns the metadata but the data list
is empty.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '71'
}
expect_body = {
'meta': {
'count': 0,
'limit': 0,
'offset': 0,
'total': 4
},
'data': [],
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=0&limit=0&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_011(server):
"""Search snippets with GET.
Try to send GET /snippets with negative offset.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '364'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search offset is not a positive integer: -4'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=-4&limit=2&sort=brief')
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_012(server):
"""Search snippets with GET.
Try to send GET /snippets with negative offset and limit.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '520'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search result limit is not a positive integer: -2'
}, {
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search offset is not a positive integer: -4'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=-4&limit=-2&sort=brief')
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_013(server):
"""Search snippets with GET.
Try to send GET /snippets when offset and limit are not numbers.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '533'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search result limit is not a positive integer: 0xdeadbeef'
}, {
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search offset is not a positive integer: ABCDEFG'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=ABCDEFG&limit=0xdeadbeef&sort=brief')
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_001(server):
"""Get specific snippet field.
Send GET /snippets/{id}/data for existing snippet.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '267'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'data': Storage.remove['data']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/data'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52a02b63/data',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_002(server):
"""Get specific snippet field.
Send GET /snippets/{id}/brief for existing snippet. In this case
the URI digest is only 10 octets. The returned link must contain 16
octet digest in the link.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '262'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'brief': Storage.remove['brief']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/brief',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_003(server):
"""Get specific snippet field.
Send GET /snippets/{id}/groups for existing snippet.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '231'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'groups': Storage.remove['groups']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/groups'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/groups',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_004(server):
"""Get specific snippet field.
Send GET /snippets/{id}/tags for existing snippet.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '272'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'tags': Storage.remove['tags']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/tags'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/tags',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_005(server):
"""Get specific snippet field.
Send GET /snippets/{id}/links for existing snippet.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '279'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'links': Storage.remove['links']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/links'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/links',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_field_006(server):
"""Get specific snippet field.
Try to send GET /snippets/{id}/notexist for existing snippet. In
this case the field name does not exist.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '360'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'resource field does not exist: notexist'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/notexist',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_field_007(server):
"""Get specific snippet field.
Try to send GET /snippets/0101010101/brief for non existing
snippet with valid field.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '390'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'content identity: 0101010101 was not unique and matched to: 0 resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/0101010101/brief',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_008(server):
"""Get specific snippet field.
Send GET /snippets/{id}/brief for existing snippet. In this case
the URI id is full length UUID that must be found.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '251'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Storage.forced['uuid'],
'attributes': {
'brief': Storage.forced['brief']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/12cd5827-b6ef-4067-b5ac-3ceac07dde9f/brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/12cd5827-b6ef-4067-b5ac-3ceac07dde9f/brief',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_field_009(server):
"""Get specific snippet field.
Try to send GET /snippets/{id} for existing snippet with short form
from UUID. The short form must not be accepted and no results must be
returned. The UUID is intended to be used as fully matching identity.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '416'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'content identity: 116cd5827-b6ef-4067-b5ac-3ceac07dde9 was not unique and matched to: 0 resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/116cd5827-b6ef-4067-b5ac-3ceac07dde9',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_pytest_fixtures(server):
"""Test pytest fixtures with pytest specific mocking.
Send GET /snippets and search keywords from all fields. The search
query matches to two snippets and both of them are returned. The
search is sorted based on one field. The limit defined in the search
query is not exceeded.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1523'
}
expect_body = {
'meta': {
'count': 2,
'limit': 20,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cswarm&limit=20&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('import-remove', 'import-forced', 'import-exited', 'import-netcat')
def test_pytest_fixtures2(server):
"""Test pytest fixtures with pytest specific mocking.
Send GET /snippets and search keywords from all fields. The search
query matches to four snippets but limit defined in search query
results only two of them sorted by the brief field. The sorting must
be applied before limit is applied.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1658'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@classmethod
def teardown_class(cls):
"""Teardown tests."""
Content.delete()
| agpl-3.0 | -5,150,466,451,538,383,000 | 38.30209 | 135 | 0.556455 | false |
DawidvanGraan/HomeAutomation | Raspberry/api.py | 1 | 3437 | #!/bin/sh
from flask import Flask, jsonify
import smbus
import time
import RPi.GPIO as io
import requests
# Plex Call
plexUrl = 'http://192.168.1.100/jsonrpc?request={"jsonrpc": "2.0", "method": "Player.GetItem", "params": { "properties": ["title", "album", "duration", "showtitle"], "playerid": 1 }, "id": "VideoGetItem"}';
I2C_ADDRESS = 0x4a
app = Flask(__name__)
gpioBigGate = 18 # Big Gate
gpioSmallGate = 23 # Small Gate
gpioGarageRight = 24 # Garage Right
gpioGarageLeft = 25 # Garage Left
mag_switch1 = 22 # Garage Door Right
mag_switch2 = 17 # Garage Door Left
# I2C BUS
bus = smbus.SMBus(0)
# GPIO
io.setmode(io.BCM)
io.setup(mag_switch1, io.IN, pull_up_down=io.PUD_UP)
io.setup(mag_switch2, io.IN, pull_up_down=io.PUD_UP)
io.setup(gpioBigGate, io.OUT)
io.setup(gpioSmallGate, io.OUT)
io.setup(gpioGarageRight, io.OUT)
io.setup(gpioGarageLeft, io.OUT)
@app.route('/api/v1/hello', methods=['GET'])
def get_hello():
return jsonify({
"status": 200,
"message": "Hello API. I'm Alive and waiting for your Commands!"
})
@app.route('/api/v1/plex', methods=['GET'])
def plex():
r = requests.get(plexUrl)
if r.status_code != 200:
return jsonify({
"status": 500,
"message": "Oops, could not make call to Plex!"
})
return jsonify(r.content)
@app.route('/api/v1/biggate', methods=['GET'])
def get_biggate():
io.output(gpioBigGate, io.HIGH)
time.sleep(2)
io.output(gpioBigGate, io.LOW)
return jsonify({
"status": 200,
"message": "Big Gate Busy..."
})
@app.route('/api/v1/smallgate', methods=['GET'])
def get_smallgate():
io.output(gpioSmallGate, io.HIGH)
time.sleep(2)
io.output(gpioSmallGate, io.LOW)
return jsonify({
"status": 200,
"message": "Small Gate Busy..."
})
@app.route('/api/v1/garageright', methods=['GET'])
def get_garage_right():
io.output(gpioGarageRight, io.HIGH)
time.sleep(2)
io.output(gpioGarageRight, io.LOW)
rightSensor = io.input(mag_switch1)
return jsonify({
"status": 200,
"message": "Garage Door Right",
"garageRight": rightSensor
})
@app.route('/api/v1/garageleft', methods=['GET'])
def get_garage_left():
io.output(gpioGarageLeft, io.HIGH)
time.sleep(2)
io.output(gpioGarageLeft, io.LOW)
leftSensor = io.input(mag_switch2)
return jsonify({
"status": 200,
"message": "Garage Door Left",
"garageLeft": leftSensor
})
@app.route('/api/v1/garagedoors', methods=['GET'])
def get_garage_doors():
rightSensor = io.input(mag_switch1)
leftSensor = io.input(mag_switch2)
return jsonify({
"status": 200,
"message": "States of the Garage Doors",
"garageRight": rightSensor,
"garageLeft": leftSensor
})
@app.route('/api/v1/temp1', methods=['GET'])
def temp1():
values = bus.read_i2c_block_data(I2C_ADDRESS, 0x00, 2)
tempMSB = values[0]
tempLSB = values[1]
temp = (((tempMSB << 8) | tempLSB) >> 7) * 0.5
if temp > 125:
temp = (((((tempMSB << 8) | tempLSB) >> 7) * 0.5) - 256)
return jsonify({
"status": 200,
"message": "Temperature 1 Sensor Value",
"temp": temp
})
@app.route('/')
def index():
return "Hello, Home Remote!!"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
| mit | -7,297,475,318,174,467,000 | 22.380952 | 206 | 0.603142 | false |
hirofumi0810/asr_preprocessing | swbd/main.py | 1 | 15071 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Make dataset for the End-to-End model (Switchboard corpus).
Note that feature extraction depends on transcripts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile
import sys
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
from collections import Counter
import pickle
sys.path.append('../')
from swbd.path import Path
from swbd.input_data import read_audio
from swbd.labels.ldc97s62.character import read_trans
from swbd.labels.fisher.character import read_trans as read_trans_fisher
from swbd.labels.eval2000.stm import read_stm
from utils.util import mkdir_join
from utils.inputs.wav_split import split_wav
from utils.dataset import add_element
parser = argparse.ArgumentParser()
parser.add_argument('--swbd_audio_path', type=str,
help='path to LDC97S62 audio files')
parser.add_argument('--swbd_trans_path', type=str,
help='path to LDC97S62 transciption files')
parser.add_argument('--fisher_path', type=str, help='path to Fisher dataset')
parser.add_argument('--eval2000_audio_path', type=str,
help='path to audio files of eval2000 dataset')
parser.add_argument('--eval2000_trans_path', type=str,
help='path to transcript files of eval2000 dataset')
parser.add_argument('--dataset_save_path', type=str,
help='path to save dataset')
parser.add_argument('--feature_save_path', type=str,
help='path to save input features')
parser.add_argument('--run_root_path', type=str,
help='path to run this script')
parser.add_argument('--tool', type=str,
choices=['htk', 'python_speech_features', 'librosa'])
parser.add_argument('--wav_save_path', type=str, help='path to wav files.')
parser.add_argument('--htk_save_path', type=str, help='path to htk files.')
parser.add_argument('--normalize', type=str,
choices=['global', 'speaker', 'utterance', 'no'])
parser.add_argument('--save_format', type=str, choices=['numpy', 'htk', 'wav'])
parser.add_argument('--feature_type', type=str, choices=['fbank', 'mfcc'])
parser.add_argument('--channels', type=int,
help='the number of frequency channels')
parser.add_argument('--window', type=float,
help='window width to extract features')
parser.add_argument('--slide', type=float, help='extract features per slide')
parser.add_argument('--energy', type=int, help='if 1, add the energy feature')
parser.add_argument('--delta', type=int, help='if 1, add the energy feature')
parser.add_argument('--deltadelta', type=int,
help='if 1, double delta features are also extracted')
parser.add_argument('--fisher', type=int,
help='If True, create large-size dataset (2000h).')
args = parser.parse_args()
path = Path(swbd_audio_path=args.swbd_audio_path,
swbd_trans_path=args.swbd_trans_path,
fisher_path=args.fisher_path,
eval2000_audio_path=args.eval2000_audio_path,
eval2000_trans_path=args.eval2000_trans_path,
wav_save_path=args.wav_save_path,
htk_save_path=args.htk_save_path,
run_root_path='./')
CONFIG = {
'feature_type': args.feature_type,
'channels': args.channels,
'sampling_rate': 8000,
'window': args.window,
'slide': args.slide,
'energy': bool(args.energy),
'delta': bool(args.delta),
'deltadelta': bool(args.deltadelta)
}
if args.save_format == 'htk':
assert args.tool == 'htk'
def main(data_size):
print('=' * 50)
print(' data_size: %s' % data_size)
print('=' * 50)
########################################
# labels
########################################
print('=> Processing transcripts...')
speaker_dict_dict = {} # dict of speaker_dict
print('---------- train ----------')
if data_size == '300h':
speaker_dict_dict['train'] = read_trans(
label_paths=path.trans(corpus='swbd'),
word_boundary_paths=path.word(corpus='swbd'),
run_root_path='./',
vocab_file_save_path=mkdir_join('./config/vocab_files'),
save_vocab_file=True)
elif data_size == '2000h':
speaker_dict_a, char_set_a, char_capital_set_a, word_count_dict_a = read_trans_fisher(
label_paths=path.trans(corpus='fisher'),
target_speaker='A')
speaker_dict_b, char_set_b, char_capital_set_b, word_count_dict_b = read_trans_fisher(
label_paths=path.trans(corpus='fisher'),
target_speaker='B')
# Meage 2 dictionaries
speaker_dict = merge_dicts([speaker_dict_a, speaker_dict_b])
char_set = char_set_a | char_set_b
char_capital_set = char_capital_set_a | char_capital_set_b
word_count_dict_fisher = dict(
Counter(word_count_dict_a) + Counter(word_count_dict_b))
speaker_dict_dict['train'] = read_trans(
label_paths=path.trans(corpus='swbd'),
word_boundary_paths=path.word(corpus='swbd'),
run_root_path='./',
vocab_file_save_path=mkdir_join('./config/vocab_files'),
save_vocab_file=True,
speaker_dict_fisher=speaker_dict,
char_set=char_set,
char_capital_set=char_capital_set,
word_count_dict=word_count_dict_fisher)
del speaker_dict
print('---------- eval2000 (swbd + ch) ----------')
speaker_dict_dict['eval2000_swbd'], speaker_dict_dict['eval2000_ch'] = read_stm(
stm_path=path.stm_path,
pem_path=path.pem_path,
glm_path=path.glm_path,
run_root_path='./')
########################################
# inputs
########################################
print('\n=> Processing input data...')
input_save_path = mkdir_join(
args.feature_save_path, args.save_format, data_size)
for data_type in ['train', 'eval2000_swbd', 'eval2000_ch']:
print('---------- %s ----------' % data_type)
if isfile(join(input_save_path, data_type, 'complete.txt')):
print('Already exists.')
else:
if args.save_format == 'wav':
########################################
# Split WAV files per utterance
########################################
if data_type == 'train':
wav_paths = path.wav(corpus='swbd')
if data_size == '2000h':
wav_paths += path.wav(corpus='fisher')
else:
wav_paths = path.wav(corpus=data_type)
split_wav(wav_paths=wav_paths,
speaker_dict=speaker_dict_dict[data_type],
save_path=mkdir_join(input_save_path, data_type))
# NOTE: ex.) save_path:
# swbd/feature/save_format/data_size/data_type/speaker/utt_name.npy
elif args.save_format in ['numpy', 'htk']:
if data_type == 'train':
if args.tool == 'htk':
audio_paths = path.htk(corpus='swbd')
if data_size == '2000h':
audio_paths += path.htk(corpus='fisher')
else:
audio_paths = path.wav(corpus='swbd')
if data_size == '2000h':
audio_paths += path.wav(corpus='fisher')
is_training = True
global_mean, global_std = None, None
else:
if args.tool == 'htk':
audio_paths = path.htk(corpus=data_type)
else:
audio_paths = path.wav(corpus=data_type)
is_training = False
# Load statistics over train dataset
global_mean = np.load(
join(input_save_path, 'train/global_mean.npy'))
global_std = np.load(
join(input_save_path, 'train/global_std.npy'))
read_audio(audio_paths=audio_paths,
tool=args.tool,
config=CONFIG,
normalize=args.normalize,
speaker_dict=speaker_dict_dict[data_type],
is_training=is_training,
save_path=mkdir_join(input_save_path, data_type),
save_format=args.save_format,
global_mean=global_mean,
global_std=global_std)
# NOTE: ex.) save_path:
# swbd/feature/save_format/data_size/data_type/speaker/*.npy
# Make a confirmation file to prove that dataset was saved
# correctly
with open(join(input_save_path, data_type, 'complete.txt'), 'w') as f:
f.write('')
########################################
# dataset (csv)
########################################
print('\n=> Saving dataset files...')
dataset_save_path = mkdir_join(
args.dataset_save_path, args.save_format, data_size, data_type)
print('---------- %s ----------' % data_type)
df_columns = ['frame_num', 'input_path', 'transcript']
df_char = pd.DataFrame([], columns=df_columns)
df_char_capital = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
with open(join(input_save_path, data_type, 'frame_num.pickle'), 'rb') as f:
frame_num_dict = pickle.load(f)
utt_count = 0
df_char_list, df_char_capital_list = [], []
df_word_freq1_list, df_word_freq5_list = [], []
df_word_freq10_list, df_word_freq15_list = [], []
speaker_dict = speaker_dict_dict[data_type]
for speaker, utt_dict in tqdm(speaker_dict.items()):
for utt_index, utt_info in utt_dict.items():
if args.save_format == 'numpy':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.npy')
elif args.save_format == 'htk':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.htk')
elif args.save_format == 'wav':
input_utt_save_path = path.utt2wav(utt_index)
else:
raise ValueError('save_format is numpy or htk or wav.')
assert isfile(input_utt_save_path)
frame_num = frame_num_dict[speaker + '_' + utt_index]
char_indices, char_indices_capital, word_freq1_indices = utt_info[2:5]
word_freq5_indices, word_freq10_indices, word_freq15_indices = utt_info[5:8]
df_char = add_element(
df_char, [frame_num, input_utt_save_path, char_indices])
df_char_capital = add_element(
df_char_capital, [frame_num, input_utt_save_path, char_indices_capital])
df_word_freq1 = add_element(
df_word_freq1, [frame_num, input_utt_save_path, word_freq1_indices])
df_word_freq5 = add_element(
df_word_freq5, [frame_num, input_utt_save_path, word_freq5_indices])
df_word_freq10 = add_element(
df_word_freq10, [frame_num, input_utt_save_path, word_freq10_indices])
df_word_freq15 = add_element(
df_word_freq15, [frame_num, input_utt_save_path, word_freq15_indices])
utt_count += 1
# Reset
if utt_count == 10000:
df_char_list.append(df_char)
df_char_capital_list.append(df_char_capital)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
df_char = pd.DataFrame([], columns=df_columns)
df_char_capital = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
utt_count = 0
# Last dataframe
df_char_list.append(df_char)
df_char_capital_list.append(df_char_capital)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
# Concatenate all dataframes
df_char = df_char_list[0]
df_char_capital = df_char_capital_list[0]
df_word_freq1 = df_word_freq1_list[0]
df_word_freq5 = df_word_freq5_list[0]
df_word_freq10 = df_word_freq10_list[0]
df_word_freq15 = df_word_freq15_list[0]
for df_i in df_char_list[1:]:
df_char = pd.concat([df_char, df_i], axis=0)
for df_i in df_char_list[1:]:
df_char_capital = pd.concat([df_char_capital, df_i], axis=0)
for df_i in df_word_freq1_list[1:]:
df_word_freq1 = pd.concat([df_word_freq1, df_i], axis=0)
for df_i in df_word_freq5_list[1:]:
df_word_freq5 = pd.concat([df_word_freq5, df_i], axis=0)
for df_i in df_word_freq10_list[1:]:
df_word_freq10 = pd.concat([df_word_freq10, df_i], axis=0)
for df_i in df_word_freq15_list[1:]:
df_word_freq15 = pd.concat([df_word_freq15, df_i], axis=0)
df_char.to_csv(join(dataset_save_path, 'character.csv'))
df_char_capital.to_csv(
join(dataset_save_path, 'character_capital_divide.csv'))
df_word_freq1.to_csv(join(dataset_save_path, 'word_freq1.csv'))
df_word_freq5.to_csv(join(dataset_save_path, 'word_freq5.csv'))
df_word_freq10.to_csv(join(dataset_save_path, 'word_freq10.csv'))
df_word_freq15.to_csv(join(dataset_save_path, 'word_freq15.csv'))
def merge_dicts(dicts):
return {k: v for dic in dicts for k, v in dic.items()}
if __name__ == '__main__':
data_sizes = ['2000h']
# data_sizes = ['300h']
# if bool(args.fisher):
# data_sizes += ['2000h']
for data_size in data_sizes:
main(data_size)
| mit | -8,193,133,252,302,720,000 | 43.196481 | 96 | 0.544224 | false |
ar0551/Wasp | devFiles/data/waspCatalogFix.py | 1 | 54170 | # Wasp: Discrete Design with Grasshopper plug-in (GPL) initiated by Andrea Rossi
#
# This file is part of Wasp.
#
# Copyright (c) 2017, Andrea Rossi <[email protected]>
# Wasp is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Wasp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Wasp; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0 <https://www.gnu.org/licenses/gpl.html>
#
# Significant parts of Wasp have been developed by Andrea Rossi
# as part of research on digital materials and discrete design at:
# DDU Digital Design Unit - Prof. Oliver Tessmann
# Technische Universitat Darmstadt
#########################################################################
## IMPORTS ##
#########################################################################
import random
import math
import bisect
from Rhino.RhinoDoc import ActiveDoc
import Rhino.Geometry as rg
#########################################################################
## GLOBAL VARIABLES ##
#########################################################################
global_tolerance = ActiveDoc.ModelAbsoluteTolerance*2
#########################################################################
## CLASSES ##
#########################################################################
#################################################################### Connection ####################################################################
class Connection(object):
## constructor
def __init__(self, _plane, _type, _part, _id):
self.pln = _plane
flip_pln_Y = rg.Vector3d(self.pln.YAxis)
flip_pln_Y.Reverse()
self.flip_pln = rg.Plane(self.pln.Origin, self.pln.XAxis, flip_pln_Y)
self.type = _type
self.part = _part
self.id = _id
self.rules_table = []
self.active_rules = []
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspConnection [id: %s, type: %s]" % (self.id, self.type)
## return a transformed copy of the connection
def transform(self, trans):
pln_trans = rg.Plane(self.pln.Origin, self.pln.XAxis, self.pln.YAxis)
conn_trans = Connection(pln_trans, self.type, self.part, self.id)
conn_trans.pln.Transform(trans)
conn_trans.flip_pln.Transform(trans)
return conn_trans
## return a copy of the connection
def copy(self):
pln_copy = rg.Plane(self.pln.Origin, self.pln.XAxis, self.pln.YAxis)
conn_copy = Connection(pln_copy, self.type, self.part, self.id)
return conn_copy
## generate the rules-table for the connection
def generate_rules_table(self, rules):
count = 0
self.rules_table = []
self.active_rules = []
for rule in rules:
if rule.part1 == self.part and rule.conn1 == self.id:
self.rules_table.append(rule)
self.active_rules.append(count)
count += 1
#################################################################### Base Part ####################################################################
class Part(object):
## constructor
def __init__(self, name, geometry, connections, collider, attributes, dim=None, id=None, field=None):
self.name = name
self.id = id
self.geo = geometry
self.field = field
self.connections = []
self.active_connections = []
count = 0
for conn in connections:
conn.part = self.name
conn.id = count
self.connections.append(conn)
self.active_connections.append(count)
count += 1
self.transformation = rg.Transform.Identity
self.center = self.geo.GetBoundingBox(False).Center
self.collider = collider
##part size
if dim is not None:
self.dim = dim
else:
max_collider_dist = None
for coll_geo in self.collider.geometry:
for v in coll_geo.Vertices:
dist = self.center.DistanceTo(v)
if dist > max_collider_dist or max_collider_dist is None:
max_collider_dist = dist
self.dim = max_collider_dist
self.parent = None
self.children = []
self.attributes = []
if len(attributes) > 0:
self.attributes = attributes
self.is_constrained = False
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspPart [name: %s, id: %s]" % (self.name, self.id)
## reset the part and connections according to new provided aggregation rules
def reset_part(self, rules):
count = 0
self.active_connections = []
for conn in self.connections:
conn.generate_rules_table(rules)
self.active_connections.append(count)
count += 1
## return a dictionary containing all part data
def return_part_data(self):
data_dict = {}
data_dict['name'] = self.name
data_dict['id'] = self.id
data_dict['geo'] = self.geo
data_dict['connections'] = self.connections
data_dict['transform'] = self.transformation
data_dict['collider'] = self.collider
data_dict['center'] = self.center
data_dict['parent'] = self.parent
data_dict['children'] = self.children
data_dict['attributes'] = self.attributes
return data_dict
## return a transformed copy of the part
def transform(self, trans, transform_sub_parts=False):
geo_trans = self.geo.Duplicate()
geo_trans.Transform(trans)
collider_trans = self.collider.transform(trans)
connections_trans = []
for conn in self.connections:
connections_trans.append(conn.transform(trans))
attributes_trans = []
if len(self.attributes) > 0:
for attr in self.attributes:
attributes_trans.append(attr.transform(trans))
part_trans = Part(self.name, geo_trans, connections_trans, collider_trans, attributes_trans, dim=self.dim, id=self.id, field=self.field)
part_trans.transformation = trans
return part_trans
## return a copy of the part
def copy(self):
geo_copy = self.geo.Duplicate()
collider_copy = self.collider.copy()
connections_copy = []
for conn in self.connections:
connections_copy.append(conn.copy())
attributes_copy = []
if len(self.attributes) > 0:
for attr in self.attributes:
attributes_copy.append(attr.copy())
part_copy = Part(self.name, geo_copy, connections_copy, collider_copy, attributes_copy, dim=self.dim, id=self.id, field=self.field)
part_copy.transformation = self.transformation
return part_copy
## return transformed center point of the part
def transform_center(self, trans):
center_trans = rg.Point3d(self.center)
center_trans.Transform(trans)
return center_trans
## return transformed collider
def transform_collider(self, trans):
return self.collider.transform(trans)
#################################################################### Constrained Part ####################################################################
class AdvancedPart(Part):
## constructor
def __init__(self, name, geometry, connections, collider, attributes, additional_collider, supports, dim = None, id=None, field=None, sub_parts=[]):
super(self.__class__, self).__init__(name, geometry, connections, collider, attributes, dim=dim, id=id, field=field)
self.add_collider = None
if additional_collider != None:
self.add_collider = additional_collider
self.supports = []
if len(supports) > 0:
self.supports = supports
## hierarchical sub-parts
self.sub_parts = sub_parts
self.hierarchy_level = 0
if len(self.sub_parts) > 0:
self.hierarchy_level = self.sub_parts[0].hierarchy_level + 1
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspAdvPart [name: %s, id: %s]" % (self.name, self.id)
## return all part data
def return_part_data(self):
data_dict = {}
data_dict['name'] = self.name
data_dict['id'] = self.id
data_dict['geo'] = self.geo
data_dict['connections'] = self.connections
data_dict['transform'] = self.transformation
data_dict['collider'] = self.collider
data_dict['center'] = self.center
data_dict['parent'] = self.parent
data_dict['children'] = self.children
data_dict['attributes'] = self.attributes
data_dict['add_collider'] = self.add_collider
return data_dict
## return a transformed copy of the part
def transform(self, trans, transform_sub_parts=False, sub_level = 0):
geo_trans = self.geo.Duplicate()
geo_trans.Transform(trans)
collider_trans = self.collider.transform(trans)
connections_trans = []
for conn in self.connections:
connections_trans.append(conn.transform(trans))
attributes_trans = []
if len(self.attributes) > 0:
for attr in self.attributes:
attributes_trans.append(attr.transform(trans))
add_collider_trans = None
if(self.add_collider != None):
add_collider_trans = self.add_collider.transform(trans, transform_connections=True, maintain_valid=True)
supports_trans = []
if len(self.supports) > 0:
for sup in self.supports:
sup_trans = sup.transform(trans)
supports_trans.append(sup_trans)
if transform_sub_parts and len(self.sub_parts) > 0 and sub_level > 0:
sub_parts_trans = []
for sp in self.sub_parts:
sp_trans = sp.transform(trans, transform_sub_parts = True, sub_level = sub_level - 1)
sub_parts_trans.append(sp_trans)
part_trans = AdvancedPart(self.name, geo_trans, connections_trans, collider_trans, attributes_trans, add_collider_trans, supports_trans, dim=self.dim, id=self.id, field=self.field, sub_parts=sub_parts_trans)
part_trans.transformation = trans
part_trans.is_constrained = True
return part_trans
else:
part_trans = AdvancedPart(self.name, geo_trans, connections_trans, collider_trans, attributes_trans, add_collider_trans, supports_trans, dim=self.dim, id=self.id, field=self.field, sub_parts=self.sub_parts)
part_trans.transformation = trans
part_trans.is_constrained = True
return part_trans
## return a copy of the part
def copy(self):
geo_copy = self.geo.Duplicate()
collider_copy = self.collider.copy()
connections_copy = []
for conn in self.connections:
connections_copy.append(conn.copy())
attributes_copy = []
if len(self.attributes) > 0:
for attr in self.attributes:
attributes_copy.append(attr.copy())
add_collider_copy = None
if(self.add_collider != None):
add_collider_copy = self.add_collider.copy()
supports_copy = []
if len(self.supports) > 0:
for sup in self.supports:
sup_copy = sup.copy()
supports_copy.append(sup_copy)
if len(self.sub_parts) > 0:
sub_parts_copy = []
for sp in self.sub_parts:
sp_copy = sp.copy()
sub_parts_copy.append(sp_copy)
part_copy = AdvancedPart(self.name, geo_copy, connections_copy, collider_copy, attributes_copy, add_collider_copy, supports_copy, dim=self.dim, id=self.id, field=self.field, sub_parts=sub_parts_copy)
part_copy.transformation = self.transformation
part_copy.is_constrained = True
return part_copy
else:
part_copy = AdvancedPart(self.name, geo_copy, connections_copy, collider_copy, attributes_copy, add_collider_copy, supports_copy, dim=self.dim, id=self.id, field=self.field, sub_parts=self.sub_parts)
part_copy.transformation = self.transformation
part_copy.is_constrained = True
return part_copy
#################################################################### Rule ####################################################################
class Rule(object):
def __init__(self, _part1, _conn1, _part2, _conn2, _active = True):
self.part1 = _part1
self.conn1 = _conn1
self.part2 = _part2
self.conn2 = _conn2
self.active = _active
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspRule [%s|%s_%s|%s]" % (self.part1, self.conn1, self.part2, self.conn2)
#################################################################### Field ####################################################################
class Field(object):
## constructor
def __init__(self, name, boundaries, pts, count_vec, resolution, values = []):
self.name = name
self.resolution = resolution
self.boundaries = boundaries
self.pts = pts
self.bbox = rg.BoundingBox(pts)
self.x_count = int(count_vec.X)
self.y_count = int(count_vec.Y)
self.z_count = int(count_vec.Z)
self.vals = []
pts_count = 0
self.is_tensor_field = False
try:
v = values[0][2]
self.is_tensor_field = True
except:
pass
if len(values) > 0:
for z in range(0, self.z_count):
self.vals.append([])
for y in range(0, self.y_count):
self.vals[z].append([])
for x in range(0, self.x_count):
if len(self.boundaries) > 0:
inside = False
for bou in self.boundaries:
if bou.IsPointInside(self.pts[pts_count], global_tolerance, True) == True:
self.vals[z][y].append(values[pts_count])
inside = True
break
if inside == False:
if self.is_tensor_field:
self.vals[z][y].append(rg.Vector3d(0,0,0))
else:
self.vals[z][y].append(0.0)
else:
self.vals[z][y].append(values[pts_count])
pts_count += 1
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspField [name: %s, res: %s, count: %s]" % (self.name, self.resolution, len(self.pts))
def return_values_list(self):
values_list = []
for z in range(0, self.z_count):
for y in range(0, self.y_count):
for x in range(0, self.x_count):
values_list.append(self.vals[z][y][x])
return values_list
## return value associated to the closest point of the field to the given point
def return_pt_val(self, pt):
pt_trans = pt - self.bbox.Min
x = int(math.floor(pt_trans.X/self.resolution))
y = int(math.floor(pt_trans.Y/self.resolution))
z = int(math.floor(pt_trans.Z/self.resolution))
value = self.vals[z][y][x]
return value
## find and return highest value in the field
def return_highest_pt(self, constraints = None):
max_val = -1
max_coords = None
for z in range(0, self.z_count):
for y in range(0, self.y_count):
for x in range(0, self.x_count):
value = self.vals[z][y][x]
## tensor field aggregation (WIP)
if self.is_tensor_field:
if value.Length > max_val:
if constraints is not None:
constraint_check = False
pt = rg.Point3d(x*self.resolution, y*self.resolution, z*self.resolution)
pt += self.bbox.Min
for constraint in constraints:
if constraint.check_soft(pt) == False:
constraint_check = True
break
if constraint_check == False:
max_val = value.Length
max_coords = (x,y,z)
else:
max_val = value.Length
max_coords = (x,y,z)
else:
if value > max_val:
if constraints is not None:
constraint_check = False
pt = rg.Point3d(x*self.resolution, y*self.resolution, z*self.resolution)
pt += self.bbox.Min
for constraint in constraints:
if constraint.check_soft(pt) == False:
constraint_check = True
break
if constraint_check == False:
max_val = value
max_coords = (x,y,z)
else:
max_val = value
max_coords = (x,y,z)
highest_pt = rg.Point3d(max_coords[0]*self.resolution, max_coords[1]*self.resolution, max_coords[2]*self.resolution)
highest_pt = highest_pt + self.bbox.Min
return highest_pt
#################################################################### Attribute ####################################################################
class Attribute(object):
## constructor
def __init__(self, name, values, transformable):
self.name = name
self.values = values
self.transformable = transformable
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspAttribute [name: %s]" % (self.name)
## return a transformed copy of the attribute
def transform(self, trans):
if self.transformable == True:
values_trans = []
for val in self.values:
val_trans = None
if type(val) == rg.Point3d:
val_trans = rg.Point3d(val)
elif type(val) == rg.Plane:
val_trans = rg.Plane(val)
elif type(val) == rg.Line:
val_trans = rg.Line(val.From, val.To)
else:
val_trans = val.Duplicate()
val_trans.Transform(trans)
values_trans.append(val_trans)
attr_trans = Attribute(self.name, values_trans, self.transformable)
else:
attr_trans = Attribute(self.name, self.values, self.transformable)
return attr_trans
## return a copy of the attribute
def copy(self):
if self.transformable == True:
values_copy = []
for val in self.values:
val_copy = None
if type(val) == rg.Point3d:
val_copy = rg.Point3d(val)
elif type(val) == rg.Plane:
val_copy = rg.Plane(val)
elif type(val) == rg.Line:
val_copy = rg.Line(val.From, val.To)
else:
val_copy = val.Duplicate()
values_copy.append(val_copy)
attr_copy = Attribute(self.name, values_copy, self.transformable)
else:
attr_copy = Attribute(self.name, self.values, self.transformable)
return attr_copy
#################################################################### Support ####################################################################
class Support(object):
## constructor
def __init__(self, support_directions):
self.sup_dir = support_directions
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspSupport [len: %s]" % (len(self.sup_dir))
## return a transformed copy of the support
def transform(self, trans):
sup_dir_trans = []
for dir in self.sup_dir:
dir = dir.ToNurbsCurve()
start_trans = dir.PointAtStart
end_trans = dir.PointAtEnd
start_trans.Transform(trans)
end_trans.Transform(trans)
dir_trans = rg.Line(start_trans, end_trans)
sup_dir_trans.append(dir_trans)
sup_trans = Support(sup_dir_trans)
return sup_trans
## return a copy of the support
def copy(self):
sup_dir_copy = []
for dir in self.sup_dir:
dir = dir.ToNurbsCurve()
start_copy = dir.PointAtStart
end_copy = dir.PointAtEnd
dir_copy = rg.Line(start_copy, end_copy)
sup_dir_copy.append(dir_copy)
sup_copy = Support(sup_dir_copy)
return sup_copy
#################################################################### Aggregation ####################################################################
class Aggregation(object):
## class constructor
def __init__(self, _name, _parts, _rules, _mode, _prev = [], _coll_check = True, _field = [], _global_constraints = [], _catalog = None):
## basic parameters
self.name = _name
self.parts = {}
for part in _parts:
self.parts[part.name] = part
self.rules = _rules
self.mode = _mode
self.coll_check = _coll_check
self.aggregated_parts = []
## fields
self.multiple_fields = False
if len(_field) == 0 or _field is None:
self.field = None
elif len(_field) == 1:
self.field = _field[0]
else:
self.field = {}
for f in _field:
self.field[f.name] = f
self.multiple_fields = True
## reset base parts
self.reset_base_parts()
## temp list to store possible colliders to newly added parts
self.possible_collisions = []
## aggregation queue, storing sorted possible next states in the form (part, f_val)
self.aggregation_queue = []
self.queue_values = []
self.queue_count = 0
## previous aggregated parts
self.prev_num = 0
if len(_prev) > 0:
self.prev_num = len(_prev)
for prev_p in _prev:
prev_p_copy = prev_p.copy()
prev_p_copy.reset_part(self.rules)
prev_p_copy.id = len(self.aggregated_parts)
self.aggregated_parts.append(prev_p_copy)
if self.field is not None:
self.compute_next_w_field(prev_p_copy)
## global constraints applied to the aggregation
self.global_constraints = _global_constraints
self.catalog = _catalog
#### WIP ####
self.collision_shapes = []
self.graph = None
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspAggregation [name: %s, size: %s]" % (self.name, len(self.aggregated_parts))
## reset base parts
def reset_base_parts(self, new_parts = None):
if new_parts != None:
self.parts = {}
for part in new_parts:
self.parts[part.name] = part
for p_key in self.parts:
self.parts[p_key].reset_part(self.rules)
## reset rules and regenerate rule tables for each part
def reset_rules(self, rules):
if rules != self.rules:
self.rules = rules
self.reset_base_parts()
for part in self.aggregated_parts:
part.reset_part(rules)
## recompute aggregation queue
def recompute_aggregation_queue(self):
self.aggregation_queue = []
self.queue_values = []
self.queue_count = 0
for part in self.aggregated_parts:
self.compute_next_w_field(part)
## trim aggregated parts list to a specific length
def remove_elements(self, num):
self.aggregated_parts = self.aggregated_parts[:num]
for part in self.aggregated_parts:
part.reset_part(self.rules)
if self.field is not None:
self.recompute_aggregation_queue()
## compute all possible parts which can be placed given an existing part and connection
def compute_possible_children(self, part_id, conn_id, check_constraints = False):
possible_children = []
current_part = self.aggregated_parts[part_id]
if conn_id in current_part.active_connections:
current_conn = current_part.connections[conn_id]
for rule_id in current_conn.active_rules:
rule = current_conn.rules_table[rule_id]
next_part = self.parts[rule.part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[rule.conn2].flip_pln, current_conn.pln)
## boolean checks for all constraints
coll_check = False
add_coll_check = False
valid_connections = []
missing_sup_check = False
global_const_check = False
if check_constraints:
## collision check
self.possible_collisions = []
coll_check = self.collision_check(next_part, orientTransform)
## constraints check
if self.mode == 1: ## only local constraints mode
if coll_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
elif self.mode == 2: ## onyl global constraints mode
if coll_check == False and len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
elif self.mode == 3: ## local+global constraints mode
if coll_check == False:
if len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
if global_const_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
if coll_check == False and add_coll_check == False and missing_sup_check == False and global_const_check == False:
next_part_trans = next_part.transform(orientTransform)
possible_children.append(next_part_trans)
return possible_children
else:
return -1
## add a custom pre-computed part which has been already transformed in place and checked for constraints
def add_custom_part(self, part_id, conn_id, next_part):
next_part.reset_part(self.rules)
next_part.id = len(self.aggregated_parts)
self.aggregated_parts[part_id].children.append(next_part)
next_part.parent = self.aggregated_parts[part_id]
self.aggregated_parts.append(next_part)
for i in range(len(self.aggregated_parts[part_id].active_connections)):
if self.aggregated_parts[part_id].active_connections[i] == conn_id:
self.aggregated_parts[part_id].active_connections.pop(i)
break
#### constraints checks ####
## function grouping all constraints checks (not yet implemented)
def constraints_check(self, part, trans):
pass
## overlap // part-part collision check
def collision_check(self, part, trans):
part_center = part.transform_center(trans)
## overlap check
coll_count = 0
for ex_part in self.aggregated_parts:
dist = ex_part.center.DistanceTo(part_center)
if dist < global_tolerance:
return True
elif dist < ex_part.dim + part.dim:
self.possible_collisions.append(coll_count)
coll_count += 1
## collision check
if self.coll_check == True:
part_collider = part.transform_collider(trans)
if part_collider.check_collisions_by_id(self.aggregated_parts, self.possible_collisions):
return True
return False
## additional collider check
def additional_collider_check(self, part, trans):
if part.add_collider != None:
add_collider = part.add_collider.transform(trans, transform_connections=True, maintain_valid = False)
if add_collider.check_collisions_w_parts(self.aggregated_parts):
return True
## assign computed valid connections according to collider location
part.add_collider.valid_connections = list(add_collider.valid_connections)
return False
## support check
def missing_supports_check(self, part, trans):
if len(part.supports) > 0:
for sup in part.supports:
supports_count = 0
sup_trans = sup.transform(trans)
for dir in sup_trans.sup_dir:
for id in self.possible_collisions:
if self.aggregated_parts[id].collider.check_intersection_w_line(dir):
supports_count += 1
break
if supports_count == len(sup_trans.sup_dir):
return False
return True
else:
return False
## global constraints check
def global_constraints_check(self, part, trans):
for constraint in self.global_constraints:
part_center = part.transform_center(trans)
if constraint.soft:
if constraint.check(pt = part_center) == False:
return True
else:
part_collider = part.transform_collider(trans)
if constraint.check(pt = part_center, collider = part_collider) == False:
return True
return False
#### aggregation methods ####
## sequential aggregation with Graph Grammar
def aggregate_sequence(self, graph_rules):
for rule in graph_rules:
## first part
if len(self.aggregated_parts) == 0:
aggr_rule = rule.split(">")[0]
rule_parts = aggr_rule.split("_")
part1 = str(rule_parts[0].split("|")[0])
conn1 = int(rule_parts[0].split("|")[1])
part2 = str(rule_parts[1].split("|")[0])
conn2 = int(rule_parts[1].split("|")[1])
rule_ids = rule.split(">")[1].split("_")
first_part = self.parts[part1]
first_part_trans = first_part.transform(rg.Transform.Identity)
first_part_trans.id = rule_ids[0]
next_part = self.parts[part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[conn2].flip_pln, first_part.connections[conn1].pln)
next_part_trans = next_part.transform(orientTransform)
next_part_trans.id = rule_ids[1]
## check additional collider (for fabrication constraints)
self.additional_collider_check(next_part, orientTransform)
## parent-child tracking
first_part_trans.children.append(next_part_trans)
next_part_trans.parent = first_part_trans
self.aggregated_parts.append(first_part_trans)
self.aggregated_parts.append(next_part_trans)
first_part_trans.children.append(next_part_trans)
else:
aggr_rule = rule.split(">")[0]
rule_parts = aggr_rule.split("_")
part1_id = str(rule_parts[0].split("|")[0])
conn1 = int(rule_parts[0].split("|")[1])
part2 = str(rule_parts[1].split("|")[0])
conn2 = int(rule_parts[1].split("|")[1])
rule_ids = rule.split(">")[1].split("_")
first_part = None
for part in self.aggregated_parts:
if part.id == part1_id:
first_part = part
break
if first_part is not None:
first_part.id = rule_ids[0]
next_part = self.parts[part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[conn2].flip_pln, first_part.connections[conn1].pln)
next_part_trans = next_part.transform(orientTransform)
next_part_trans.id = rule_ids[1]
## parent-child tracking
first_part.children.append(next_part_trans.id)
next_part_trans.parent = first_part.id
self.aggregated_parts.append(next_part_trans)
else:
pass ## implement error handling
## stochastic aggregation
def aggregate_rnd(self, num, use_catalog = False):
added = 0
loops = 0
while added < num:
loops += 1
if loops > num*100:
break
## if no part is present in the aggregation, add first random part
if len(self.aggregated_parts) == 0:
first_part = None
if use_catalog:
first_part = self.parts[self.catalog.return_weighted_part()]
else:
first_part = self.parts[random.choice(self.parts.keys())]
if first_part is not None:
first_part_trans = first_part.transform(rg.Transform.Identity)
for conn in first_part_trans.connections:
conn.generate_rules_table(self.rules)
first_part_trans.id = 0
self.aggregated_parts.append(first_part_trans)
added += 1
if use_catalog:
self.catalog.update(first_part_trans.name, -1)
## otherwise add new random part
else:
next_rule = None
part_01_id = -1
conn_01_id = -1
next_rule_id = -1
new_rule_attempts = 0
while new_rule_attempts < 1000:
new_rule_attempts += 1
next_rule = None
if use_catalog:
if self.catalog.is_empty:
break
next_part = self.parts[self.catalog.return_weighted_part()]
if next_part is not None:
part_01_id = random.randint(0,len(self.aggregated_parts)-1)
part_01 = self.aggregated_parts[part_01_id]
if len(part_01.active_connections) > 0:
conn_01_id = part_01.active_connections[random.randint(0, len(part_01.active_connections)-1)]
conn_01 = part_01.connections[conn_01_id]
if len(conn_01.active_rules) > 0:
next_rule_id = conn_01.active_rules[random.randint(0, len(conn_01.active_rules)-1)]
next_rule = conn_01.rules_table[next_rule_id]
if next_rule.part2 == next_part.name:
break
else:
part_01_id = random.randint(0,len(self.aggregated_parts)-1)
part_01 = self.aggregated_parts[part_01_id]
if len(part_01.active_connections) > 0:
conn_01_id = part_01.active_connections[random.randint(0, len(part_01.active_connections)-1)]
conn_01 = part_01.connections[conn_01_id]
if len(conn_01.active_rules) > 0:
next_rule_id = conn_01.active_rules[random.randint(0, len(conn_01.active_rules)-1)]
next_rule = conn_01.rules_table[next_rule_id]
break
if next_rule is not None:
next_part = self.parts[next_rule.part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[next_rule.conn2].flip_pln, conn_01.pln)
## boolean checks for all constraints
coll_check = False
add_coll_check = False
valid_connections = []
missing_sup_check = False
global_const_check = False
## collision check
self.possible_collisions = []
coll_check = self.collision_check(next_part, orientTransform)
## constraints check
if self.mode == 1: ## only local constraints mode
if coll_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
elif self.mode == 2: ## onyl global constraints mode
if coll_check == False and len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
elif self.mode == 3: ## local+global constraints mode
if coll_check == False:
if len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
if global_const_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
if coll_check == False and add_coll_check == False and missing_sup_check == False and global_const_check == False:
next_part_trans = next_part.transform(orientTransform)
next_part_trans.reset_part(self.rules)
for i in range(len(next_part_trans.active_connections)):
if next_part_trans.active_connections[i] == next_rule.conn2:
next_part_trans.active_connections.pop(i)
break
next_part_trans.id = len(self.aggregated_parts)
## parent-child tracking
self.aggregated_parts[part_01_id].children.append(next_part_trans.id)
next_part_trans.parent = self.aggregated_parts[part_01_id].id
self.aggregated_parts.append(next_part_trans)
if use_catalog:
self.catalog.update(next_part_trans.name, -1)
for i in range(len(self.aggregated_parts[part_01_id].active_connections)):
if self.aggregated_parts[part_01_id].active_connections[i] == conn_01_id:
self.aggregated_parts[part_01_id].active_connections.pop(i)
break
added += 1
## TO FIX --> do not remove rules when only caused by missing supports
else:
## remove rules if they cause collisions or overlappings
for i in range(len(self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules)):
if self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules[i] == next_rule_id:
self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules.pop(i)
break
## check if the connection is still active (still active rules available)
if len(self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules) == 0:
for i in range(len(self.aggregated_parts[part_01_id].active_connections)):
if self.aggregated_parts[part_01_id].active_connections[i] == conn_01_id:
self.aggregated_parts[part_01_id].active_connections.pop(i)
break
else:
## if no part is available, exit the aggregation routine and return an error message
msg = "Could not place " + str(num-added) + " parts"
return msg
## stochastic aggregation with catalog
def aggregate_rnd_catalog(self, catalog, num = None):
added = 0
loops = 0
if num is None:
num = catalog.parts_total
while added < num:
loops += 1
if loops > num*100:
break
## if no part is present in the aggregation, add first random part
if len(self.aggregated_parts) == 0:
first_part = self.parts[catalog.return_weighted_part()]
first_part_trans = first_part.transform(rg.Transform.Identity)
for conn in first_part_trans.connections:
conn.generate_rules_table(self.rules)
first_part_trans.id = 0
self.aggregated_parts.append(first_part_trans)
catalog.update(first_part.name, -1)
added += 1
## otherwise add new random part
else:
next_rule = None
part_01_id = -1
conn_01_id = -1
next_rule_id = -1
new_rule_attempts = 0
while new_rule_attempts < 10000:
new_rule_attempts += 1
selected_part = catalog.return_weighted_part()
if selected_part is None or catalog.is_empty == True:
break
if len(part_01.active_connections) > 0:
conn_01_id = part_01.active_connections[random.randint(0, len(part_01.active_connections)-1)]
conn_01 = part_01.connections[conn_01_id]
if len(conn_01.active_rules) > 0:
next_rule_id = conn_01.active_rules[random.randint(0, len(conn_01.active_rules)-1)]
if conn_01.rules_table[next_rule_id].part2 == selected_part:
next_rule = conn_01.rules_table[next_rule_id]
break
if next_rule is not None:
next_part = self.parts[next_rule.part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[next_rule.conn2].flip_pln, conn_01.pln)
## boolean checks for all constraints
coll_check = False
add_coll_check = False
valid_connections = []
missing_sup_check = False
global_const_check = False
## collision check
self.possible_collisions = []
coll_check = self.collision_check(next_part, orientTransform)
## constraints check
if self.mode == 1: ## only local constraints mode
if coll_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
elif self.mode == 2: ## onyl global constraints mode
if coll_check == False and len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
elif self.mode == 3: ## local+global constraints mode
if coll_check == False:
if len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
if global_const_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
if coll_check == False and add_coll_check == False and missing_sup_check == False and global_const_check == False:
next_part_trans = next_part.transform(orientTransform)
next_part_trans.reset_part(self.rules)
for i in range(len(next_part_trans.active_connections)):
if next_part_trans.active_connections[i] == next_rule.conn2:
next_part_trans.active_connections.pop(i)
break
next_part_trans.id = len(self.aggregated_parts)
## parent-child tracking
self.aggregated_parts[part_01_id].children.append(next_part_trans.id)
next_part_trans.parent = self.aggregated_parts[part_01_id].id
self.aggregated_parts.append(next_part_trans)
catalog.update(next_part_trans.name, -1)
for i in range(len(self.aggregated_parts[part_01_id].active_connections)):
if self.aggregated_parts[part_01_id].active_connections[i] == conn_01_id:
self.aggregated_parts[part_01_id].active_connections.pop(i)
break
added += 1
## TO FIX --> do not remove rules when only caused by missing supports
else:
## remove rules if they cause collisions or overlappings
for i in range(len(self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules)):
if self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules[i] == next_rule_id:
self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules.pop(i)
break
## check if the connection is still active (still active rules available)
if len(self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules) == 0:
for i in range(len(self.aggregated_parts[part_01_id].active_connections)):
if self.aggregated_parts[part_01_id].active_connections[i] == conn_01_id:
self.aggregated_parts[part_01_id].active_connections.pop(i)
break
else:
## if no part is available, exit the aggregation routine and return an error message
msg = "Could not place " + str(num-added) + " parts"
return msg
## compute all possibilities for child-parts of the given part, and store them in the aggregation queue
def compute_next_w_field(self, part):
for i in xrange(len(part.active_connections)-1, -1, -1):
conn_id = part.active_connections[i]
conn = part.connections[conn_id]
for i2 in xrange(len(conn.active_rules)-1, -1, -1):
rule_id = conn.active_rules[i2]
rule = conn.rules_table[rule_id]
next_part = self.parts[rule.part2]
next_center = rg.Point3d(next_part.center)
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[rule.conn2].flip_pln, conn.pln)
next_center.Transform(orientTransform)
if self.multiple_fields:
f_name = next_part.field
if self.field[f_name].bbox.Contains(next_center) == True:
field_val = self.field[f_name].return_pt_val(next_center)
queue_index = bisect.bisect_left(self.queue_values, field_val)
queue_entry = (next_part.name, part.id, orientTransform)
self.queue_values.insert(queue_index, field_val)
self.aggregation_queue.insert(queue_index, queue_entry)
self.queue_count += 1
else:
if self.field.bbox.Contains(next_center) == True:
field_val = self.field.return_pt_val(next_center)
queue_index = bisect.bisect_left(self.queue_values, field_val)
queue_entry = (next_part.name, part.id, orientTransform)
self.queue_values.insert(queue_index, field_val)
self.aggregation_queue.insert(queue_index, queue_entry)
self.queue_count += 1
## field-driven aggregation
def aggregate_field(self, num):
added = 0
loops = 0
while added < num:
## avoid endless loops
loops += 1
if loops > num*100:
break
## if no part is present in the aggregation, add first random part
if len(self.aggregated_parts) == 0 and self.prev_num == 0:
first_part = self.parts[random.choice(self.parts.keys())]
start_point = None
if self.multiple_fields:
f_name = first_part.field
if (self.mode == 2 or self.mode == 3) and len(self.global_constraints) > 0:
start_point = self.field[f_name].return_highest_pt(constraints=self.global_constraints)
else:
start_point = self.field[f_name].return_highest_pt()
else:
if (self.mode == 2 or self.mode == 3) and len(self.global_constraints) > 0:
start_point = self.field.return_highest_pt(constraints=self.global_constraints)
else:
start_point = self.field.return_highest_pt()
mov_vec = rg.Vector3d.Subtract(rg.Vector3d(start_point), rg.Vector3d(first_part.center))
move_transform = rg.Transform.Translation(mov_vec.X, mov_vec.Y, mov_vec.Z)
first_part_trans = first_part.transform(move_transform)
for conn in first_part_trans.connections:
conn.generate_rules_table(self.rules)
first_part_trans.id = 0
self.aggregated_parts.append(first_part_trans)
## compute all possible next parts and append to list
self.compute_next_w_field(first_part_trans)
added += 1
else:
## if no part is available, exit the aggregation routine and return an error message
if self.queue_count == 0:
msg = "Could not place " + str(num-added) + " parts"
return msg
next_data = self.aggregation_queue[self.queue_count-1]
next_part = self.parts[next_data[0]]
next_center = rg.Point3d(next_part.center)
orientTransform = next_data[2]
## boolean checks for all constraints
coll_check = False
add_coll_check = False
missing_sup_check = False
global_const_check = False
## collision check
self.possible_collisions = []
coll_check = self.collision_check(next_part, orientTransform)
## constraints check
if self.mode == 1: ## only local constraints mode
if coll_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
elif self.mode == 2: ## onyl global constraints mode
if coll_check == False and len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
elif self.mode == 3: ## local+global constraints mode
if coll_check == False:
if len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
if global_const_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
if coll_check == False and add_coll_check == False and missing_sup_check == False and global_const_check == False:
next_part_trans = next_part.transform(orientTransform)
next_part_trans.reset_part(self.rules)
for conn in next_part_trans.connections:
conn.generate_rules_table(self.rules)
next_part_trans.id = len(self.aggregated_parts)
self.aggregated_parts[next_data[1]].children.append(next_part_trans.id)
next_part_trans.parent = self.aggregated_parts[next_data[1]].id
self.aggregated_parts.append(next_part_trans)
## compute all possible next parts and append to list
self.compute_next_w_field(next_part_trans)
added += 1
self.aggregation_queue.pop()
self.queue_values.pop()
self.queue_count -=1
#################################################################### Plane Constraint ####################################################################
class Plane_Constraint(object):
## constructor
def __init__(self, _plane, _positive = True, _soft = True):
self.type = 'plane'
self.plane = _plane
self.positive = _positive
self.soft = _soft
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspPlaneConst [+: %s, soft: %s]" % (self.positive, self.soft)
## constraint check method
def check(self, pt = None, collider = None):
if self.soft:
return self.check_soft(pt)
else:
return self.check_hard(pt, collider)
## hard constraint check method
def check_hard(self, pt, collider):
if self.check_soft(pt):
for geo in collider.geometry:
if rg.Intersect.Intersection.MeshPlane(geo, self.plane) is not None:
return False
return True
else:
return False
## soft constraint check method
def check_soft(self, pt):
mapped_pt = self.plane.RemapToPlaneSpace(pt)[1]
if self.positive:
if mapped_pt.Z > 0:
return True
else:
if mapped_pt.Z < 0:
return True
return False
#################################################################### Mesh Constraint ####################################################################
class Mesh_Constraint(object):
## constructor
def __init__(self, _geo, _inside = True, _soft = True):
self.type = 'mesh_collider'
self.geo = _geo
self.inside = _inside
self.soft = _soft
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspMeshConst [in: %s, soft: %s]" % (self.inside, self.soft)
## constraint check method
def check(self, pt = None, collider = None):
if self.soft:
return self.check_soft(pt)
else:
return self.check_hard(pt, collider)
## hard constraint check method
def check_hard(self, pt, collider):
if self.check_soft(pt):
for geo in collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(self.geo, geo)) > 0:
return False
return True
else:
return False
## soft constraint check method
def check_soft(self, pt):
is_inside = self.geo.IsPointInside(pt, global_tolerance, False)
if self.inside:
if is_inside:
return True
else:
if not is_inside:
return True
return False
#########################################################################
## WIP ##
#########################################################################
#################################################################### Collider ####################################################################
class Collider(object):
## constructor
def __init__(self, _geo, _multiple=False, _check_all = False, _connections=[], _valid_connections = []):
self.geometry = _geo
self.multiple = _multiple
self.check_all = _check_all
self.connections = _connections
self.valid_connections = _valid_connections
self.set_connections = False
if len(self.connections) == len(self.geometry) and self.multiple == True:
self.set_connections = True
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspCollider"
## return a transformed copy of the collider
########################################################################### check if valid connections need to be transformed or re-generated!!!
def transform(self, trans, transform_connections = False, maintain_valid = False):
geometry_trans = []
for geo in self.geometry:
geo_trans = geo.Duplicate()
geo_trans.Transform(trans)
geometry_trans.append(geo_trans)
connections_trans = []
if transform_connections:
for conn in self.connections:
connections_trans.append(conn.transform(trans))
if maintain_valid:
valid_connection_trans = list(self.valid_connections)
coll_trans = Collider(geometry_trans, _multiple=self.multiple, _check_all=self.check_all, _connections=connections_trans, _valid_connections=valid_connection_trans)
else:
coll_trans = Collider(geometry_trans, _multiple=self.multiple, _check_all=self.check_all, _connections=connections_trans)
return coll_trans
## return a copy of the collider
def copy(self):
geometry_copy = []
for geo in self.geometry:
geo_copy = geo.Duplicate()
geometry_copy.append(geo_copy)
connections_copy = []
for conn in self.connections:
connections_copy.append(conn.copy())
valid_connection_copy = list(self.valid_connections)
coll_copy = Collider(geometry_copy, _multiple=self.multiple, _check_all=self.check_all, _connections=connections_copy, _valid_connections=valid_connection_copy)
return coll_copy
## check collisions between collider and given part
def check_collisions_w_parts(self, parts):
## multiple collider with associated connections
if self.multiple:
valid_colliders = []
self.valid_connections = []
count = 0
for geo in self.geometry:
valid_coll = True
for part in parts:
for other_geo in part.collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(geo, other_geo)) > 0:
valid_coll = False
break
if valid_coll == False:
break
valid_colliders.append(valid_coll)
if self.set_connections and valid_coll:
self.valid_connections.append(count)
if valid_coll and self.check_all == False:
break
count+=1
if True in valid_colliders:
return False
return True
## simple collider
else:
for geo in self.geometry:
for part in parts:
for other_geo in part.collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(geo, other_geo)) > 0:
return True
return False
## check collisions between collider and given ids in the given parts list
def check_collisions_by_id(self, parts, ids):
## multiple collider with associated connections
if self.multiple:
valid_colliders = []
count = 0
for geo in self.geometry:
valid_coll = True
for id in ids:
for other_geo in parts[id].collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(geo, other_geo)) > 0:
valid_coll = False
break
valid_colliders.append(valid_coll)
if valid_coll and self.check_all == False:
break
count+=1
if True in valid_colliders:
return False
return True
## simple collider
else:
for geo in self.geometry:
for id in ids:
for other_geo in parts[id].collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(geo, other_geo)) > 0:
return True
return False
## check intersection between collider and line (for supports check)
def check_intersection_w_line(self, ln):
for geo in self.geometry:
if len(rg.Intersect.Intersection.MeshLine(geo, ln)[0]) > 0:
return True
return False
#### WIP ####
def check_global_constraints(self, constraint):
return False
################################################################# Parts Catalog ##################################################################
class PartCatalog(object):
##constructor
def __init__(self, _parts, _amounts):
self.parts = _parts
self.amounts = _amounts
self.dict = {}
for i in xrange(len(self.parts)):
self.dict[self.parts[i].name] = _amounts[i]
self.is_empty = False
self.parts_total = sum(self.dict.values())
## return a random part type
def return_random_part(self):
choices = [key for key in self.dict.keys() if self.dict[key] > 0]
if len(choices) > 0:
return random.choice(choices)
else:
self.is_empty = True
return None
## return a weighted-choice between the available parts, give the available parts amounts
def return_weighted_part(self):
if self.parts_total == 0:
self.is_empty = True
return None
n = random.uniform(0, self.parts_total)
for key in self.dict:
if n < self.dict[key]:
return key
n = n - self.dict[key]
return None
def update(self, part_name, difference):
self.dict[part_name] += difference
self.parts_total = sum(self.dict.values())
if self.parts_total == 0:
self.is_empty = True
def copy(self):
amounts = [self.dict[part.name] for part in self.parts]
return PartCatalog(self.parts, amounts)
| gpl-3.0 | 6,861,793,551,399,903,000 | 33.263757 | 210 | 0.642644 | false |
NCPlayz/CassBotPy | cassandra/bot.py | 1 | 4014 | import datetime
import json
import discord
import os
from discord.ext import commands
from discord.ext.commands.converter import *
class CassandraContext(commands.Context):
def is_float(self, argument):
"""Checks if the argument is a float."""
try:
return float(string) # True if string is a number contains a dot
except ValueError: # String is not a number
return False
async def send(self, content=None, *args, **kwargs):
"""Override for send to add message filtering"""
if content:
if self.is_float(content) or content.isdigit():
content = str(content)
content.replace("@everyone", "@\u200beveryone").replace("@here", "@\u200bhere")
sent_message = await super().send(content, *args, **kwargs)
return sent_message
@property
def session(self):
"""Returns the aiohttp.ClientSession() instance in CassandraBase."""
return self.bot.session
class CassandraBase(commands.Bot):
"""This is the class that initializes the bot."""
def __init__(self):
self.token = os.environ['TOKEN']
self.presence = discord.Game(name='in a Digital Haunt...',
url="https://www.twitch.tv/ghostofsparkles", type=1)
self.archive_file = []
def get_package_info():
"""Fetches `arg` in `package.json`."""
with open("./package.json") as f:
config = json.load(f)
return config
def get_prefix():
"""Fetches all known prefixes."""
prefixes = ["-",
"Cassandra "]
return commands.when_mentioned_or(*prefixes)
def get_description():
"""Fetches description."""
return f"{get_package_info()['name']}"
def get_game():
"""Fetches game presence."""
return self.presence
super().__init__(command_prefix=get_prefix(), game=get_game(), description=get_description(), pm_help=None,
help_attrs=dict(hidden=True))
startup_extensions = []
for file in os.listdir("./cogs"):
if file.endswith(".py"):
startup_extensions.append(file.replace('.py', ''))
for extension in startup_extensions:
try:
self.load_extension(f'cogs.{extension}')
print(f'Loaded {extension}')
except Exception as e:
error = f'{extension}\n {type(e).__name__}: {e}'
print(f'Failed to load extension {error}')
self.session = None
def run(self):
"""Runs the bot."""
super().run(self.token)
async def on_message(self, message):
"""An event triggered when a message is sent."""
ctx = await self.get_context(message, cls=CassandraContext)
await self.invoke(ctx)
async def fetch(self, url: str, headers: dict = None, timeout: float = None,
return_type: str = None, **kwargs):
"""Fetches data from a url via aiohttp."""
async with self.session.get(url, headers=headers, timeout=timeout, **kwargs) as resp:
if return_type:
cont = getattr(resp, return_type)
return resp, await cont()
else:
return resp, None
class Cassandra(CassandraBase):
pass
class ConvertError(Exception):
pass
class Union(Converter):
def __init__(self, *converters):
self.converters = converters
async def convert(self, ctx: CassandraContext, argument: str):
"""Converts an argument"""
for converter in self.converters:
try:
return await ctx.command.do_conversion(ctx, converter, argument)
except:
raise ConvertError('Conversion Failed.')
| mit | -1,071,383,508,198,149,500 | 32.016949 | 115 | 0.550573 | false |
GNOME/gnome-lirc-properties | test/test-policykit-is-authorized.py | 1 | 1051 | #!/usr/bin/python
import dbus
import os
bus = dbus.SystemBus()
policy_kit = bus.get_object('org.freedesktop.PolicyKit', '/')
#print policy_kit_mechanism.Introspect()
if(policy_kit == None):
print("Error: Could not get the PolicyKit interface.\n")
action_id = "org.gnome.clockapplet.mechanism.settimezone"
result = "";
# Check whether the process is authorized:
try:
result = policy_kit.IsProcessAuthorized(action_id, (dbus.UInt32)(os.getpid()), False)
except dbus.exceptions.DBusException, e:
print "exception: ", e
except Exception, e:
print "other exception: ", e
print "IsProcessAuthorized() result=", result
print "IsProcessAuthorized() authorized=", (result == "yes")
# Check whether the dbus session is authorized:
# Only works in a dbus service, so we have a sender dbus session name:
#try:
# result = policy_kit.IsSystemBusNameAuthorized(action_id, "fake-sender-dbus-session-name", False)
#except dbus.exceptions.DBusException, e:
# print "exception: ", e
#except Exception, e:
# print "other exception: ", e
| gpl-2.0 | -5,090,127,664,131,929,000 | 28.194444 | 101 | 0.720266 | false |
Franky333/crazyflie-clients-python | src/cfclient/ui/tabs/LogTab.py | 1 | 3242 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Shows the Log TOC of available variables in the Crazyflie.
"""
import cfclient
from cfclient.ui.tab import Tab
from PyQt5 import QtWidgets
from PyQt5 import uic
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import Qt
__author__ = 'Bitcraze AB'
__all__ = ['LogTab']
param_tab_class = uic.loadUiType(cfclient.module_path +
"/ui/tabs/logTab.ui")[0]
class LogTab(Tab, param_tab_class):
connectedSignal = pyqtSignal(str)
disconnectedSignal = pyqtSignal(str)
def __init__(self, tabWidget, helper, *args):
super(LogTab, self).__init__(*args)
self.setupUi(self)
self.tabName = "Log TOC"
self.menuName = "Log TOC"
self.helper = helper
self.tabWidget = tabWidget
self.cf = helper.cf
# Init the tree widget
self.logTree.setHeaderLabels(['Name', 'ID', 'Unpack', 'Storage'])
self.cf.connected.add_callback(self.connectedSignal.emit)
self.connectedSignal.connect(self.connected)
# Clear the log TOC list when the Crazyflie is disconnected
self.cf.disconnected.add_callback(self.disconnectedSignal.emit)
self.disconnectedSignal.connect(self.disconnected)
@pyqtSlot('QString')
def disconnected(self, linkname):
self.logTree.clear()
@pyqtSlot(str)
def connected(self, linkURI):
self.logTree.clear()
toc = self.cf.log.toc
for group in list(toc.toc.keys()):
groupItem = QtWidgets.QTreeWidgetItem()
groupItem.setData(0, Qt.DisplayRole, group)
for param in list(toc.toc[group].keys()):
item = QtWidgets.QTreeWidgetItem()
item.setData(0, Qt.DisplayRole, param)
item.setData(1, Qt.DisplayRole, toc.toc[group][param].ident)
item.setData(2, Qt.DisplayRole, toc.toc[group][param].pytype)
item.setData(3, Qt.DisplayRole, toc.toc[group][param].ctype)
groupItem.addChild(item)
self.logTree.addTopLevelItem(groupItem)
self.logTree.expandItem(groupItem)
| gpl-2.0 | 3,133,045,078,609,853,000 | 33.860215 | 79 | 0.614436 | false |
m4tx/egielda | utils/test_utils.py | 1 | 3014 | # This file is part of e-Giełda.
# Copyright (C) 2014-2015 Mateusz Maćkowski and Tomasz Zieliński
#
# e-Giełda is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with e-Giełda. If not, see <http://www.gnu.org/licenses/>.
from datetime import timedelta
from django.utils import timezone
from books.models import BookType, Book
from categories.models import Category
from authentication.models import AppUser
from orders.models import Order
from settings.settings import Settings
from utils.dates import datetime_to_string
def create_test_superuser():
if AppUser.objects.filter(username="test").count() == 0:
AppUser.objects.create_superuser(
username="test", first_name="Some", last_name="User", year="2015", class_letter="A",
phone_number="111222333", email="test@localhost", password="test"
)
def create_test_accepted_user():
if AppUser.objects.filter(username="test").count() == 0:
user = AppUser.objects.create_user(
username="test", first_name="Some", last_name="User", year="2015", class_letter="A",
phone_number="111222333", email="test@localhost", password="test"
)
user.verify()
def login(selenium, live_server_url, username, password):
selenium.get('%s%s' % (live_server_url, '/accounts/login/'))
selenium.find_element_by_name('username').send_keys(username)
selenium.find_element_by_name('password').send_keys(password)
selenium.find_element_by_xpath('//button[@type="submit"]').click()
def create_test_book_type():
book_type = BookType(isbn="9780262533058", publisher="MIT Press", title="Introduction to Algorithms",
publication_year=2009, price=60.50, visible=True)
book_type.save()
return book_type
def create_test_category():
category = Category(name="Test category")
category.save()
return category
def create_test_app_user():
app_user = AppUser(first_name="Some", last_name="User", year=2013, class_letter="A", phone_number="111222333")
app_user.save()
return app_user
def create_test_book(book_type, owner, accepted=True):
book = Book(book_type=book_type, owner=owner, accepted=accepted, accept_date=timezone.now() if accepted else None)
book.save()
return book
def create_test_order(user):
order = Order(user=user, date=timezone.now())
order.save()
return order
def set_sell_purchase_timespan():
settings = Settings()
settings.start_purchase = datetime_to_string(timezone.now())
settings.end_purchase = datetime_to_string(timezone.now() + timedelta(1))
settings.start_sell = datetime_to_string(timezone.now())
settings.end_sell = datetime_to_string(timezone.now() + timedelta(1))
| agpl-3.0 | -6,714,624,736,140,098,000 | 36.148148 | 118 | 0.698239 | false |
spinningbytes/deep-mlsa | code/architectures/default_cnn.py | 1 | 2153 | import logging
from keras.layers import Dense, ZeroPadding1D, Embedding, Convolution1D, MaxPooling1D, Flatten, Input
from keras.models import Model
from utils.data_utils import load_embedding_matrix
def create_default_model(config_data):
nb_filter = 200
filter_length = 6
hidden_dims = nb_filter
embedding_matrix = load_embedding_matrix(config_data)
max_features = embedding_matrix.shape[0]
embedding_dims = embedding_matrix.shape[1]
max_len = config_data['max_sentence_length']
logging.info('Build Model...')
logging.info('Embedding Dimensions: ({},{})'.format(max_features, embedding_dims))
main_input = Input(batch_shape=(None, max_len), dtype='int32', name='main_input')
if not config_data.get('random_embedding', None):
logging.info('Pretrained Word Embeddings')
embeddings = Embedding(
max_features,
embedding_dims,
input_length=max_len,
weights=[embedding_matrix],
trainable=False
)(main_input)
else:
logging.info('Random Word Embeddings')
embeddings = Embedding(max_features, embedding_dims, init='lecun_uniform', input_length=max_len)(main_input)
zeropadding = ZeroPadding1D(filter_length - 1)(embeddings)
conv1 = Convolution1D(
nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1)(zeropadding)
max_pooling1 = MaxPooling1D(pool_length=4, stride=2)(conv1)
conv2 = Convolution1D(
nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1)(max_pooling1)
max_pooling2 = MaxPooling1D(pool_length=conv2._keras_shape[1])(conv2)
flatten = Flatten()(max_pooling2)
hidden = Dense(hidden_dims)(flatten)
softmax_layer1 = Dense(3, activation='softmax', name='sentiment_softmax', init='lecun_uniform')(hidden)
model = Model(input=[main_input], output=softmax_layer1)
test_model = Model(input=[main_input], output=[softmax_layer1, hidden])
return model, test_model | apache-2.0 | 6,691,548,136,573,418,000 | 33.190476 | 116 | 0.668834 | false |
openstack/networking-plumgrid | networking_plumgrid/neutron/tests/unit/extensions/test_providernet.py | 1 | 2158 | # Copyright 2015 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
PLUMgrid plugin provider network extension unit tests
"""
import mock
from oslo_utils import importutils
from networking_plumgrid.neutron.plugins import plugin as plumgrid_plugin
from neutron.tests.unit.extensions import test_providernet as pnet
PLUM_DRIVER = ('networking_plumgrid.neutron.plugins.drivers.'
'fake_plumlib.Plumlib')
FAKE_DIRECTOR = '1.1.1.1'
FAKE_PORT = '1234'
FAKE_USERNAME = 'fake_admin'
FAKE_PASSWORD = 'fake_password'
FAKE_TIMEOUT = '0'
class ProviderNetworksTestCase(pnet.ProvidernetExtensionTestCase):
_plugin_name = ('networking_plumgrid.neutron.plugins.'
'plugin.NeutronPluginPLUMgridV2')
def setUp(self):
def mocked_plumlib_init(self):
director_plumgrid = FAKE_DIRECTOR
director_port = FAKE_PORT
director_username = FAKE_USERNAME
director_password = FAKE_PASSWORD
timeout = FAKE_TIMEOUT
self._plumlib = importutils.import_object(PLUM_DRIVER)
self._plumlib.director_conn(director_plumgrid,
director_port, timeout,
director_username,
director_password)
with mock.patch.object(plumgrid_plugin.NeutronPluginPLUMgridV2,
'plumgrid_init', new=mocked_plumlib_init):
super(ProviderNetworksTestCase, self).setUp()
def tearDown(self):
super(ProviderNetworksTestCase, self).tearDown()
| apache-2.0 | -443,667,551,993,928,500 | 36.859649 | 78 | 0.659407 | false |
rndusr/stig | stig/client/filters/base.py | 1 | 22315 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
import itertools
import operator
import re
from collections import abc
from ...utils import cliparser
from ...logging import make_logger # isort:skip
log = make_logger(__name__)
BOOLEAN = 'boolean'
COMPARATIVE = 'comparative'
class BoolFilterSpec():
"""Boolean filter specification"""
type = BOOLEAN
def __init__(self, func, *, needed_keys=(), aliases=(), description='No description'):
if not func:
self.filter_function = None
needed_keys = ()
else:
self.filter_function = func
self.needed_keys = needed_keys
self.aliases = aliases
self.description = description
class CmpFilterSpec():
"""Comparative filter specification"""
type = COMPARATIVE
def __init__(self, *, value_type, value_getter=None, value_matcher=None,
value_convert=None, as_bool=None, needed_keys=(), aliases=(),
description='No description'):
"""
value_type : Subclass of `type` (i.e. something that returns an instance when
called and can be passed to `isinstance` as the second argument
value_getter : Callable that takes an item and returns one or more
values to match against the user-provided value;
Multiple values must be given as an iterator (list,
tuple, generator, etc), and the item matches if any
match
value_convert : Callable that takes a value and converts it to something
comparable (e.g. "42" (str) -> 42 (int))
value_matcher : Callable that takes (item, operator, value) and returns True/False
as_bool : Callable that takes an item and returns True/False
needed_keys : Needed keys for this filter
aliases : Alternative names of this filter
"""
self.value_type = value_type
self.needed_keys = needed_keys
self.aliases = aliases
self.description = description
self.value_convert = value_convert if value_convert is not None else value_type
if value_getter is not None:
self.value_getter = value_getter
elif len(self.needed_keys) == 1:
self.value_getter = lambda dct, k=needed_keys[0]: dct[k]
else:
raise TypeError('Missing argument with needed_keys=%r: value_getter', self.needed_keys)
if value_matcher is None:
def value_matcher(item, op, user_value, vg=self.value_getter):
item_value = vg(item)
if isinstance(item_value, abc.Iterator):
return any(op(ival, user_value) for ival in item_value)
else:
return op(item_value, user_value)
self.value_matcher = value_matcher
if as_bool is None:
def as_bool(item, vg=self.value_getter):
item_value = vg(item)
if isinstance(item_value, abc.Iterator):
return any(item_value)
else:
return bool(item_value)
self.as_bool = as_bool
def make_filter(self, operator, user_value, invert):
if operator is None and user_value is None:
# Abuse comparative filter as boolean filter
# (e.g. 'peers-connected' matches torrents with peers-connected!=0)
return (self.as_bool, self.needed_keys, invert)
elif user_value is None:
# Operator with no value matches everything
return (None, (), False)
else:
def f(obj, vm=self.value_matcher, op=operator, val=user_value):
return vm(obj, op, val)
return (f, self.needed_keys, invert)
class FilterSpecDict(abc.Mapping):
"""TODO"""
_NOT_FOUND = object()
def __init__(self, dct):
self._dct = dct
def __getitem__(self, key):
value = self._dct.get(key, self._NOT_FOUND)
if value is not self._NOT_FOUND:
return value
for value in self._dct.values():
if key in value.aliases:
return value
raise KeyError(key)
def __iter__(self):
return iter(self._dct)
def __len__(self):
return len(self._dct)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._dct)
class Filter():
"""Match sequences of objects against a single filter"""
OPERATORS = {
'=' : operator.__eq__, '~' : operator.__contains__,
'>' : operator.__gt__, '<' : operator.__lt__,
'>=' : operator.__ge__, '<=' : operator.__le__,
'=~' : lambda a, b: re.search(b, a),
}
INVERT_CHAR = '!'
POSSIBLE_OPERATORS = tuple(itertools.chain.from_iterable((op, '!' + op)
for op in OPERATORS))
DEFAULT_FILTER = None
DEFAULT_OPERATOR = '~'
BOOLEAN_FILTERS = {}
COMPARATIVE_FILTERS = {}
@classmethod
def _resolve_alias(cls, name):
"""
Return real filter name or `name` if it does not resolve
"""
if not hasattr(cls, '_aliases'):
aliases = {}
for fspecs in (cls.BOOLEAN_FILTERS, cls.COMPARATIVE_FILTERS):
for fname,f in fspecs.items():
for a in f.aliases:
if a in aliases:
raise RuntimeError('Multiple aliases: %r' % (a,))
else:
aliases[a] = fname
cls._aliases = aliases
if name is None:
name = ''
return cls._aliases.get(name.strip(), name)
@classmethod
def _get_filter_spec(cls, name):
"""
Get filter spec by `name`
Raise ValueError on error
"""
fspec = cls.BOOLEAN_FILTERS.get(name)
if fspec is not None:
return fspec
fspec = cls.COMPARATIVE_FILTERS.get(name)
if fspec is not None:
return fspec
if name:
raise ValueError('Invalid filter name: %r' % (name,))
else:
raise ValueError('No filter expression given')
@classmethod
def _make_filter(cls, name, op, user_value, invert):
"""
Return filter function, needed keys and invert
Filter function takes a value and returns whether it matches
`user_value`.
Filter function and needed keys are both `None` if everything is
matched.
Raise ValueError on error
"""
# Ensure value is wanted by filter, compatible to operator and of proper type
user_value = cls._validate_user_value(name, op, user_value)
log.debug(' Validated user_value: %r', user_value)
fspec = cls._get_filter_spec(name)
if fspec.type is BOOLEAN:
return (fspec.filter_function, fspec.needed_keys, invert)
elif fspec.type is COMPARATIVE:
return fspec.make_filter(cls.OPERATORS.get(op), user_value, invert)
@classmethod
def _validate_user_value(cls, name, op, user_value):
"""
Ensure that the `name`, `op`, and `user_value` make sense in conjunction
Return user value as correct type (e.g. `int`) for filter `name`
Raise ValueError if anything smells funky
"""
log.debug(' Validating user value: name=%r, op=%r, user_value=%r',
name, op, user_value)
if name in cls.BOOLEAN_FILTERS:
# log.debug('%r is a valid boolean filter: %r', name, cls.BOOLEAN_FILTERS[name])
if user_value:
raise ValueError('Boolean filter does not take a value: %s' % (name,))
elif op:
raise ValueError('Boolean filter does not take an operator: %s' % (name,))
if op is None or user_value is None:
# Filter `name` could still be (ab)used as boolean filter
return None
fspec = cls.COMPARATIVE_FILTERS.get(name)
if fspec is None:
if name:
raise ValueError('Invalid filter name: %r' % (name,))
else:
raise ValueError('No filter expression given')
# Convert user_value to proper type
if type(user_value) is not fspec.value_type:
log.debug(' Converting %r to %r', user_value, fspec.value_type)
try:
user_value = fspec.value_convert(user_value)
except ValueError:
raise ValueError('Invalid value for filter %r: %r' % (name, user_value))
# In case of regex operator, compile user_value
if op == '=~':
try:
user_value = re.compile(user_value)
except re.error as e:
raise ValueError('Invalid regular expression: %s: %s' % (str(e).capitalize(), user_value))
else:
# Test if target_type supports operator
try:
log.debug('Trying %r(%r [%r], %r [%r])',
cls.OPERATORS[op], user_value, type(user_value), user_value, type(user_value))
cls.OPERATORS[op](user_value, user_value)
except TypeError:
raise ValueError('Invalid operator for filter %r: %s' % (name, op))
return user_value
@classmethod
def _parse_inverter(cls, string, invert):
if not string:
return string, invert
# Find INVERT_CHAR at start or end of string
parts = cliparser.tokenize(string.strip(), delims=(cls.INVERT_CHAR,), escapes=('\\',), quotes=())
if cls.INVERT_CHAR in parts:
if parts and parts[0] == cls.INVERT_CHAR:
parts.pop(0)
invert = not invert
if parts and parts[-1] == cls.INVERT_CHAR:
parts.pop(-1)
invert = not invert
return ''.join(parts), invert
else:
# Return string unchanged
return string, invert
def __init__(self, filter_str=''):
# name: Name of filter (user-readable string)
# invert: Whether to invert filter (bool)
# op: Comparison operator as string (see OPERATORS)
# user_value: User-given value that is matched against items
# The *_raw variables contain original quotes and backslashes.
name_raw, op_raw, user_value_raw, invert = (None, None, None, False)
log.debug('Parsing %r', filter_str)
parts = cliparser.tokenize(filter_str, maxdelims=1, delims=self.OPERATORS, escapes=('\\',))
log.debug('Parts: %r', parts)
if len(parts) == 3:
name_raw, op_raw, user_value_raw = parts
elif len(parts) == 2:
if parts[0] in self.OPERATORS:
op_raw, user_value_raw = parts
name_raw = self.DEFAULT_FILTER
elif parts[1] in self.OPERATORS:
name_raw, op_raw = parts
else:
raise ValueError('Malformed filter expression: %r' % (filter_str,))
elif len(parts) == 1:
if parts[0] in self.OPERATORS:
op_raw = parts[0]
else:
name_raw = parts[0]
else:
raise ValueError('Malformed filter expression: %r' % (filter_str,))
name_raw, invert = self._parse_inverter(name_raw, invert)
log.debug('Parsed %r into raw: name=%r, invert=%r, op=%r, user_value=%r',
filter_str, name_raw, invert, op_raw, user_value_raw)
# Remove all special characters (backslashes, quotes)
name, op, user_value = map(lambda x: None if x is None else cliparser.plaintext(x),
(name_raw, op_raw, user_value_raw))
log.debug(' Plaintext: name=%r, invert=%r, op=%r, user_value=%r',
name, invert, op, user_value)
name = self._resolve_alias(name)
log.debug(' Resolved alias: name=%r, op=%r, user_value=%r', name, op, user_value)
if not name:
name = self.DEFAULT_FILTER
log.debug(' Falling back to default filter: %r', name)
try:
log.debug(' Getting filter spec: name=%r, op=%r, user_value=%r', name, op, user_value)
# Get filter spec by `name`
filter_func, needed_keys, invert = self._make_filter(name, op, user_value, invert)
except ValueError:
# Filter spec lookup failed
if self.DEFAULT_FILTER and user_value is op is None:
# No `user_value` or `op` given - use the first part of the
# filter expression (normally the filter name) as `user_value`
# for DEFAULT_FILTER.
name, op, user_value = self.DEFAULT_FILTER, self.DEFAULT_OPERATOR, name
log.debug(' Using name as value for default filter: name=%r, op=%r, user_value=%r',
name, op, user_value)
filter_func, needed_keys, invert = self._make_filter(name, op, user_value, invert)
else:
# No DEFAULT_FILTER is set, so we can't default to it
raise
log.debug(' Final filter: name=%r, invert=%r, op=%r, user_value=%r',
name, invert, op, user_value)
self._filter_func = filter_func
self._needed_keys = needed_keys
self._name, self._invert, self._op, self._user_value = name, invert, op, user_value
self._hash = hash((name, invert, op, user_value))
def apply(self, objs, invert=False, key=None):
"""Yield matching objects or `key` of each matching object"""
invert = self._invert ^ bool(invert) # xor
is_wanted = self._filter_func
if is_wanted is None:
if invert:
# This filter matches nothing
yield from ()
else:
# This filter matches everything
if key is None:
yield from objs
else:
for obj in objs:
yield obj[key]
else:
if key is None:
for obj in objs:
if bool(is_wanted(obj)) ^ invert:
yield obj
else:
for obj in objs:
if bool(is_wanted(obj)) ^ invert:
yield obj[key]
def match(self, obj):
"""Return True if `obj` matches, False otherwise"""
is_wanted = self._filter_func
if is_wanted is None:
# This filter matches everything/nothing
return not self._invert
else:
return bool(is_wanted(obj)) ^ self._invert
def __str__(self):
if self._name is None:
return self.DEFAULT_FILTER or ''
elif self._op is None:
return ('!' if self._invert else '') + self._name
else:
name = self._name if self._name != self.DEFAULT_FILTER else ''
op = ('!' if self._invert else '') + self._op
user_value = self._user_value
if user_value is None:
return name + op
else:
val = str(user_value)
if val == '':
val = "''"
elif len(val) == 1:
val = cliparser.escape(val, delims=(' ', '&', '|'), quotes=("'", '"'))
else:
val = cliparser.quote(val, delims=(' ', '&', '|'), quotes=("'", '"'))
return name + op + val
@property
def needed_keys(self):
return self._needed_keys
@property
def match_everything(self):
return not self._filter_func
@property
def inverted(self):
return self._invert
def __eq__(self, other):
if isinstance(other, type(self)):
for attr in ('_name', '_user_value', '_invert', '_op'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
else:
return NotImplemented
def __repr__(self):
return '%s(%r)' % (type(self).__name__, str(self))
def __hash__(self):
return self._hash
# The filter specs are specified on the Filter subclasses in each module, but we
# only want to export the classes derived from FilterChain, so this metalcass
# grabs attributes that are missing from FilterChain from it's 'filterclass'
# attribute.
class _forward_attrs(type):
def __getattr__(cls, name):
attr = getattr(cls.filterclass, name)
setattr(cls, name, attr)
return attr
class FilterChain(metaclass=_forward_attrs):
"""One or more filters combined with AND and OR operators"""
filterclass = NotImplemented
def __init__(self, filters=''):
if not isinstance(self.filterclass, type) or not issubclass(self.filterclass, Filter):
raise RuntimeError('Attribute "filterclass" must be set to a Filter subclass')
if isinstance(filters, str): # Because str is also instance of abc.Sequence
pass
elif isinstance(filters, abc.Sequence) and all(isinstance(f, str) for f in filters):
filters = '|'.join(filters)
elif isinstance(filters, (type(self), self.filterclass)):
filters = str(filters)
elif not isinstance(filters, str):
raise ValueError('Filters must be string or sequence of strings, not %s: %r'
% (type(filters).__name__, filters))
self._filterchains = ()
# Split `filters` at boolean operators
parts = cliparser.tokenize(filters, delims=('&', '|'))
if len(parts) > 0 and parts[0]:
if parts[0] in ('&', '|'):
raise ValueError("Filter can't start with operator: %r" % (parts[0],))
elif parts[-1] in ('&', '|'):
raise ValueError("Filter can't end with operator: %r" % (parts[-1],))
# The filter chain is represented by a tuple of tuples. Each inner
# tuple combines filters with AND. The outer tuple combines the
# inner tuples with OR.
filters = []
ops = []
expect = 'filter'
for i,part in enumerate(parts):
if expect == 'filter':
if part not in '&|':
f = self.filterclass(part)
if f.match_everything:
# One catch-all filter is the same as no filters
filters = [f]
ops.clear()
break
else:
filters.append(f)
expect = 'operator'
continue
elif expect == 'operator' and part in '&|':
if part in '&|':
ops.append(part)
expect = 'filter'
continue
raise ValueError('Consecutive operators: {!r}'.format(''.join(parts[i - 2 : i + 2])))
fchain = [[]]
for filter,op in itertools.zip_longest(filters, ops):
fchain[-1].append(filter)
if op == '|':
fchain.append([])
log.debug('Chained %r and %r to %r', filters, ops, fchain)
self._filterchains = tuple(tuple(x) for x in fchain)
def apply(self, objects):
"""Yield matching objects from iterable `objects`"""
chains = self._filterchains
if chains:
for obj in objects:
if any(all(f.match(obj) for f in AND_chain)
for AND_chain in chains):
yield obj
else:
yield from objects
def match(self, obj):
"""Whether `obj` matches this filter chain"""
# All filters in an AND_chain must match for the AND_chain to
# match. At least one AND_chain must match.
chains = self._filterchains
if not chains:
return True
else:
return any(all(f.match(obj) for f in AND_chain)
for AND_chain in chains)
@property
def needed_keys(self):
"""The object keys needed for filtering"""
keys = set()
for chain in self._filterchains:
for filter in chain:
keys.update(filter.needed_keys)
return tuple(keys)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
else:
# Compare sets because order doesn't matter (foo&bar|baz is the
# same as baz|bar&foo). Use frozensets because sets are not
# hashable.
self_fc_sets = set(frozenset(x) for x in self._filterchains)
other_fc_sets = set(frozenset(x) for x in other._filterchains)
return self_fc_sets == other_fc_sets
def __str__(self):
if len(self._filterchains) < 1:
return ''
else:
OR_chains = []
for AND_chain in self._filterchains:
OR_chains.append('&'.join(str(f) for f in AND_chain))
return '|'.join(OR_chains)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, str(self))
def __and__(self, other):
cls = type(self)
if not isinstance(other, cls):
return NotImplemented
else:
return cls(str(self) + '&' + str(other))
def __or__(self, other):
cls = type(self)
if not isinstance(other, cls):
return NotImplemented
else:
return cls(str(self) + '|' + str(other))
| gpl-3.0 | 6,356,354,278,953,972,000 | 37.407917 | 106 | 0.540444 | false |
pattisdr/osf.io | api/collections/urls.py | 1 | 1807 | from django.conf.urls import url
from api.collections import views
app_name = 'osf'
urlpatterns = [
url(r'^$', views.CollectionList.as_view(), name=views.CollectionList.view_name),
url(r'^(?P<collection_id>\w+)/$', views.CollectionDetail.as_view(), name=views.CollectionDetail.view_name),
url(r'^(?P<collection_id>\w+)/collected_metadata/$', views.CollectedMetaList.as_view(), name=views.CollectedMetaList.view_name),
url(r'^(?P<collection_id>\w+)/collected_metadata/(?P<cgm_id>\w+)/$', views.CollectedMetaDetail.as_view(), name=views.CollectedMetaDetail.view_name),
url(r'^(?P<collection_id>\w+)/linked_nodes/$', views.LinkedNodesList.as_view(), name=views.LinkedNodesList.view_name),
url(r'^(?P<collection_id>\w+)/linked_preprints/$', views.LinkedPreprintsList.as_view(), name=views.LinkedPreprintsList.view_name),
url(r'^(?P<collection_id>\w+)/linked_registrations/$', views.LinkedRegistrationsList.as_view(), name=views.LinkedRegistrationsList.view_name),
url(r'^(?P<collection_id>\w+)/node_links/$', views.NodeLinksList.as_view(), name=views.NodeLinksList.view_name),
url(r'^(?P<collection_id>\w+)/node_links/(?P<node_link_id>\w+)/', views.NodeLinksDetail.as_view(), name=views.NodeLinksDetail.view_name),
url(r'^(?P<collection_id>\w+)/relationships/linked_nodes/$', views.CollectionLinkedNodesRelationship.as_view(), name=views.CollectionLinkedNodesRelationship.view_name),
url(r'^(?P<collection_id>\w+)/relationships/linked_preprints/$', views.CollectionLinkedPreprintsRelationship.as_view(), name=views.CollectionLinkedPreprintsRelationship.view_name),
url(r'^(?P<collection_id>\w+)/relationships/linked_registrations/$', views.CollectionLinkedRegistrationsRelationship.as_view(), name=views.CollectionLinkedRegistrationsRelationship.view_name),
]
| apache-2.0 | 3,097,952,686,044,146,000 | 89.35 | 196 | 0.741007 | false |
mcellteam/gamer | tools/blendgamer/src/colormap_enums.py | 1 | 1151 | # ***************************************************************************
# This file is part of the GAMer software.
# Copyright (C) 2016-2018
# by Christopher Lee, Tom Bartol, John Moody, Rommie Amaro, J. Andrew McCammon,
# and Michael Holst
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ***************************************************************************
colormap_enums = [
('VIRIDIS', 'viridis', 'Viridis'),
('PRGN', 'PRGn', 'Purple-Green diverging'),
] | gpl-2.0 | -8,186,576,608,163,925,000 | 45.08 | 79 | 0.646394 | false |
sajeeshcs/nested_quota_final | nova/tests/unit/virt/libvirt/test_vif.py | 1 | 48234 | # Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from lxml import etree
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova import objects
from nova.pci import utils as pci_utils
from nova import test
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
CONF = cfg.CONF
class LibvirtVifTestCase(test.NoDBTestCase):
gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
routes=None,
dhcp_server='191.168.1.1')
gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gateway_bridge_6,
ips=None,
routes=None)
network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_BRIDGE,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface=None,
vlan=99)
network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface=None,
vlan=99)
vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'ovs_hybrid_plug': True,
'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid=None)
vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid='aaa')
vif_ivs_filter_direct = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_filter_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
details={
'port_filter': True,
'ovs_hybrid_plug': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_none = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0',
vlan=99)
vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBH,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_PROFILEID:
'MyPortProfile'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: '100'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_macvtap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_MACVTAP,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: '100'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBG,
ovs_interfaceid=None,
qbg_params=network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
typeid="aaa-bbb-ccc",
typeidversion="1",
instanceid="ddd-eee-fff"))
network_mlnx = network_model.Network(id='network-id-xxx-yyy-zzz',
label=None,
bridge=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0')
network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz',
label=None,
bridge=None,
subnets=[subnet_bridge_4],
interface='eth0')
vif_mlnx = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_mlnx,
type=network_model.VIF_TYPE_MLNX_DIRECT,
devname='tap-xxx-yyy-zzz')
vif_mlnx_net = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_mlnx,
type=network_model.VIF_TYPE_MLNX_DIRECT,
details={'physical_network':
'fake_phy_network'},
devname='tap-xxx-yyy-zzz')
vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_midonet,
type=network_model.VIF_TYPE_MIDONET,
devname='tap-xxx-yyy-zzz')
vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_IOVISOR,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid'
}
bandwidth = {
'quota:vif_inbound_peak': '200',
'quota:vif_outbound_peak': '20',
'quota:vif_inbound_average': '100',
'quota:vif_outbound_average': '10',
'quota:vif_inbound_burst': '300',
'quota:vif_outbound_burst': '30'
}
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.flags(allow_same_net_traffic=True)
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def _get_node(self, xml):
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
return ret[0]
def _assertMacEquals(self, node, vif):
mac = node.find("mac").get("address")
self.assertEqual(mac, vif['address'])
def _assertTypeEquals(self, node, type, attr, source, br_want,
prefix=None):
self.assertEqual(node.get("type"), type)
br_name = node.find(attr).get(source)
if prefix is None:
self.assertEqual(br_name, br_want)
else:
self.assertTrue(br_name.startswith(prefix))
def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
br_want=None, size=0, prefix=None):
ret = node.findall("filterref")
self.assertEqual(len(ret), size)
self._assertTypeEquals(node, type, attr, source, br_want,
prefix)
self._assertMacEquals(node, vif)
def _assertModel(self, xml, model_want=None, driver_want=None):
node = self._get_node(xml)
if model_want is None:
ret = node.findall("model")
self.assertEqual(len(ret), 0)
else:
model = node.find("model").get("type")
self.assertEqual(model, model_want)
if driver_want is None:
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
else:
driver = node.find("driver").get("name")
self.assertEqual(driver, driver_want)
def _assertTypeAndPciEquals(self, node, type, vif):
self.assertEqual(node.get("type"), type)
address = node.find("source").find("address")
addr_type = address.get("type")
self.assertEqual("pci", addr_type)
pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
'domain': address.get("domain")[2:],
'bus': address.get("bus")[2:],
'slot': address.get("slot")[2:],
'func': address.get("function")[2:]}
pci_slot_want = vif['profile']['pci_slot']
self.assertEqual(pci_slot, pci_slot_want)
def _get_conf(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
return conf
def _get_instance_xml(self, driver, vif, image_meta=None, flavor=None):
if flavor is None:
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=1,
root_gb=0,
ephemeral_gb=0,
swap=0,
extra_specs=dict(self.bandwidth),
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
conf = self._get_conf()
nic = driver.get_config(self.instance, vif, image_meta,
flavor, CONF.libvirt.virt_type)
conf.add_device(nic)
return conf.to_xml()
def test_multiple_nics(self):
conf = self._get_conf()
# Tests multiple nic configuration and that target_dev is
# set for each
nics = [{'net_type': 'bridge',
'mac_addr': '00:00:00:00:00:0b',
'source_dev': 'b_source_dev',
'target_dev': 'b_target_dev'},
{'net_type': 'ethernet',
'mac_addr': '00:00:00:00:00:0e',
'source_dev': 'e_source_dev',
'target_dev': 'e_target_dev'},
{'net_type': 'direct',
'mac_addr': '00:00:00:00:00:0d',
'source_dev': 'd_source_dev',
'target_dev': 'd_target_dev'}]
for nic in nics:
nic_conf = vconfig.LibvirtConfigGuestInterface()
nic_conf.net_type = nic['net_type']
nic_conf.target_dev = nic['target_dev']
nic_conf.mac_addr = nic['mac_addr']
nic_conf.source_dev = nic['source_dev']
conf.add_device(nic_conf)
xml = conf.to_xml()
doc = etree.fromstring(xml)
for nic in nics:
path = "./devices/interface/[@type='%s']" % nic['net_type']
node = doc.find(path)
self.assertEqual(nic['net_type'], node.get("type"))
self.assertEqual(nic['mac_addr'],
node.find("mac").get("address"))
self.assertEqual(nic['target_dev'],
node.find("target").get("dev"))
def test_model_novirtio(self):
self.flags(use_virtio_for_bridges=False,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_model_kvm(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_kvm_qemu_custom(self):
for virt in ('kvm', 'qemu'):
self.flags(use_virtio_for_bridges=True,
virt_type=virt,
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
supported = (network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN)
for model in supported:
image_meta = {'properties': {'hw_vif_model': model}}
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta)
self._assertModel(xml, model)
def test_model_kvm_bogus(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
image_meta = {'properties': {'hw_vif_model': 'acme'}}
self.assertRaises(exception.UnsupportedHardware,
self._get_instance_xml,
d,
self.vif_bridge,
image_meta)
def _test_model_qemu(self, *vif_objs, **kw):
libvirt_version = kw.get('libvirt_version')
self.flags(use_virtio_for_bridges=True,
virt_type='qemu',
group='libvirt')
for vif_obj in vif_objs:
d = vif.LibvirtGenericVIFDriver()
if libvirt_version is not None:
d.libvirt_version = libvirt_version
xml = self._get_instance_xml(d, vif_obj)
doc = etree.fromstring(xml)
bandwidth = doc.find('./devices/interface/bandwidth')
self.assertNotEqual(bandwidth, None)
inbound = bandwidth.find('inbound')
self.assertEqual(inbound.get("average"),
self.bandwidth['quota:vif_inbound_average'])
self.assertEqual(inbound.get("peak"),
self.bandwidth['quota:vif_inbound_peak'])
self.assertEqual(inbound.get("burst"),
self.bandwidth['quota:vif_inbound_burst'])
outbound = bandwidth.find('outbound')
self.assertEqual(outbound.get("average"),
self.bandwidth['quota:vif_outbound_average'])
self.assertEqual(outbound.get("peak"),
self.bandwidth['quota:vif_outbound_peak'])
self.assertEqual(outbound.get("burst"),
self.bandwidth['quota:vif_outbound_burst'])
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
def test_model_qemu_no_firewall(self):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_mlnx,
self.vif_ovs,
)
def test_model_qemu_iptables(self):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_ovs,
self.vif_ivs,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_mlnx,
)
def test_model_xen(self):
self.flags(use_virtio_for_bridges=True,
virt_type='xen',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_generic_driver_none(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.NovaException,
self._get_instance_xml,
d,
self.vif_none)
def _check_bridge_driver(self, d, vif, br_want):
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_bridge, br_want, 1)
def test_generic_driver_bridge(self):
d = vif.LibvirtGenericVIFDriver()
self._check_bridge_driver(d,
self.vif_bridge,
self.vif_bridge['network']['bridge'])
def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, prefix=dev_prefix)
script = node.find("script").get("path")
self.assertEqual(script, "")
def test_unplug_ivs_ethernet(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
delete.side_effect = processutils.ProcessExecutionError
d.unplug_ivs_ethernet(None, self.vif_ovs)
def test_plug_ovs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
'_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
'qvovif-xxx-yyy')],
'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
run_as_root=True),
mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
run_as_root=True),
mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
run_as_root=True),
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True)],
'create_ovs_vif_port': [mock.call('br0',
'qvovif-xxx-yyy', 'aaa-bbb-ccc',
'ca:fe:de:ad:be:ef',
'instance-uuid')]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
mock.patch.object(linux_net, 'create_ovs_vif_port')
) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.plug_ovs_hybrid(self.instance, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
_create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
execute.assert_has_calls(calls['execute'])
create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
def test_unplug_ovs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy')],
'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('ip', 'link', 'set',
'qbrvif-xxx-yyy', 'down', run_as_root=True),
mock.call('brctl', 'delbr',
'qbrvif-xxx-yyy', run_as_root=True)],
'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=True),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, 'delete_ovs_vif_port')
) as (device_exists, execute, delete_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug_ovs_hybrid(None, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
execute.assert_has_calls(calls['execute'])
delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
@mock.patch.object(utils, 'execute')
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
@mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
def _test_hw_veb_op(self, op, vlan, mock_get_vf_num, mock_get_ifname,
mock_execute):
mock_get_ifname.side_effect = ['eth1', 'eth13']
exit_code = [0, 2, 254]
port_state = 'up' if vlan > 0 else 'down'
calls = {
'get_ifname':
[mock.call(self.vif_macvtap['profile']['pci_slot'],
pf_interface=True),
mock.call(self.vif_macvtap['profile']['pci_slot'])],
'get_vf_num':
[mock.call(self.vif_macvtap['profile']['pci_slot'])],
'execute': [mock.call('ip', 'link', 'set', 'eth1',
'vf', 1, 'mac', self.vif_macvtap['address'],
'vlan', vlan,
run_as_root=True,
check_exit_code=exit_code),
mock.call('ip', 'link', 'set',
'eth13', port_state,
run_as_root=True,
check_exit_code=exit_code)]
}
op(None, self.vif_macvtap)
mock_get_ifname.assert_has_calls(calls['get_ifname'])
mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
mock_execute.assert_has_calls(calls['execute'])
def test_plug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(
d.plug_hw_veb,
self.vif_macvtap['details'][network_model.VIF_DETAILS_VLAN])
def test_unplug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(d.unplug_hw_veb, 0)
def test_unplug_ovs_hybrid_bridge_does_not_exist(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy')],
'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(linux_net, 'delete_ovs_vif_port')
) as (device_exists, delete_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug_ovs_hybrid(None, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
def test_plug_ivs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
'_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
'qvovif-xxx-yyy')],
'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
run_as_root=True),
mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
run_as_root=True),
mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
run_as_root=True),
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True)],
'create_ivs_vif_port': [mock.call('qvovif-xxx-yyy', 'aaa-bbb-ccc',
'ca:fe:de:ad:be:ef',
'instance-uuid')]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
mock.patch.object(linux_net, 'create_ivs_vif_port')
) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.plug_ivs_hybrid(self.instance, self.vif_ivs)
device_exists.assert_has_calls(calls['device_exists'])
_create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
execute.assert_has_calls(calls['execute'])
create_ivs_vif_port.assert_has_calls(calls['create_ivs_vif_port'])
def test_unplug_ivs_hybrid(self):
calls = {
'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('ip', 'link', 'set',
'qbrvif-xxx-yyy', 'down', run_as_root=True),
mock.call('brctl', 'delbr',
'qbrvif-xxx-yyy', run_as_root=True)],
'delete_ivs_vif_port': [mock.call('qvovif-xxx-yyy')]
}
with contextlib.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, 'delete_ivs_vif_port')
) as (execute, delete_ivs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug_ivs_hybrid(None, self.vif_ivs)
execute.assert_has_calls(calls['execute'])
delete_ivs_vif_port.assert_has_calls(calls['delete_ivs_vif_port'])
def test_unplug_ivs_hybrid_bridge_does_not_exist(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.unplug_ivs_hybrid(None, self.vif_ivs)
def test_unplug_iovisor(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
mynetwork = network_model.Network(id='network-id-xxx-yyy-zzz',
label='mylabel')
myvif = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=mynetwork)
d.unplug_iovisor(None, myvif)
@mock.patch('nova.network.linux_net.device_exists')
def test_plug_iovisor(self, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid',
'project_id': 'myproject'
}
d.plug_iovisor(instance, self.vif_ivs)
def test_unplug_mlnx_with_details(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.unplug_mlnx_direct(None, self.vif_mlnx_net)
execute.assert_called_once_with('ebrctl', 'del-port',
'fake_phy_network',
'ca:fe:de:ad:be:ef',
run_as_root=True)
def test_plug_mlnx_with_details(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
d.plug_mlnx_direct(self.instance, self.vif_mlnx_net)
execute.assert_called_once_with('ebrctl', 'add-port',
'ca:fe:de:ad:be:ef',
'instance-uuid',
'fake_phy_network',
'mlnx_direct',
'eth-xxx-yyy-zzz',
run_as_root=True)
def test_plug_mlnx_no_physical_network(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
self.assertRaises(exception.NovaException,
d.plug_mlnx_direct,
self.instance,
self.vif_mlnx)
self.assertEqual(0, execute.call_count)
def test_ivs_ethernet_driver(self):
d = vif.LibvirtGenericVIFDriver()
self._check_ivs_ethernet_driver(d,
self.vif_ivs,
"tap")
def _check_ivs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
vif, vif['devname'])
def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, "br0")
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
self.assertEqual(iface_id, want_iface_id)
iface_id_found = True
self.assertTrue(iface_id_found)
def test_generic_ovs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ovs['ovs_interfaceid']
self._check_ovs_virtualport_driver(d,
self.vif_ovs,
want_iface_id)
def test_generic_ivs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ivs['ovs_interfaceid']
self._check_ivs_virtualport_driver(d,
self.vif_ivs,
want_iface_id)
def test_ivs_plug_with_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ivs, br_want, 1)
def test_ivs_plug_with_port_filter_direct_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs_filter_hybrid['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ivs_filter_hybrid)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ivs_filter_hybrid, br_want, 0)
def test_ivs_plug_with_port_filter_hybrid_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_ivs_filter_direct['devname']
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ivs_filter_direct)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs_filter_direct, br_want, 0)
def test_hybrid_plug_without_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs_hybrid['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ovs_hybrid)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ovs_hybrid, br_want, 0)
def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
self.vif_ovs_filter_cap, br_want)
def _check_neutron_hybrid_driver(self, d, vif, br_want):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, br_want, 1)
def test_generic_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ovs,
br_want)
def test_ivs_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ivs,
br_want)
def test_mlnx_direct_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_mlnx)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth-xxx-yyy-zzz")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_mlnx)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_midonet_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_midonet)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_midonet, br_want)
def test_generic_8021qbh_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbh)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
self._assertMacEquals(node, self.vif_8021qbh)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
details = self.vif_8021qbh["details"]
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
details[network_model.VIF_DETAILS_PROFILEID])
profile_id_found = True
self.assertTrue(profile_id_found)
def test_hw_veb_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
self._assertMacEquals(node, self.vif_hw_veb)
vlan = node.find("vlan").find("tag").get("id")
vlan_want = self.vif_hw_veb["details"]["vlan"]
self.assertEqual(vlan, vlan_want)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='eth1')
def test_hw_veb_driver_macvtap(self, mock_get_ifname):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth1")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_macvtap)
vlan = node.find("vlan")
self.assertIsNone(vlan)
def test_generic_iovisor_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_ivs['devname']
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, br_want)
def test_generic_8021qbg_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbg)
node = self._get_node(xml)
self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
self._assertMacEquals(node, self.vif_8021qbg)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbg")
manager_id_found = False
type_id_found = False
typeversion_id_found = False
instance_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.vif_8021qbg['qbg_params']
manager_id = p_elem.get("managerid", None)
type_id = p_elem.get("typeid", None)
typeversion_id = p_elem.get("typeidversion", None)
instance_id = p_elem.get("instanceid", None)
if manager_id:
self.assertEqual(manager_id,
wantparams['managerid'])
manager_id_found = True
if type_id:
self.assertEqual(type_id,
wantparams['typeid'])
type_id_found = True
if typeversion_id:
self.assertEqual(typeversion_id,
wantparams['typeidversion'])
typeversion_id_found = True
if instance_id:
self.assertEqual(instance_id,
wantparams['instanceid'])
instance_id_found = True
self.assertTrue(manager_id_found)
self.assertTrue(type_id_found)
self.assertTrue(typeversion_id_found)
self.assertTrue(instance_id_found)
| apache-2.0 | 7,805,289,528,933,048,000 | 46.103516 | 79 | 0.475432 | false |
jeremy24/494-graph-algos | python/hw2/timeit.py | 1 | 1308 |
from __future__ import print_function
import time
import os
from graph import Graph
from graph import make
from graph import GraphException
from graph import Matrix
def run(name):
graph = make( name )
ret = [0,0]
start = time.time()
graph.dfs(0)
ret[1] = (time.time()-start)
start = time.time()
graph.bfs(0)
ret[0] = (time.time()-start)
return ret
def go():
names = list()
bfs = list()
dfs = list()
for name in os.listdir("./graphs"):
names.append(name)
name = "./graphs/" + name
res = run(name)
bfs.append(res[0])
dfs.append(res[1])
for index in range(0, len(names)):
name = names[index]
b = bfs[index]
d = dfs[index]
first = "%s" % str(object=name).ljust(30, " ")
second = "%s" % str(object=b).rjust(18, " ")
third = "%s" % str(object=d).ljust(20, " ")
print("dfs: " + str(d) + " bfs: " + str(b))
if d > b:
print("dfs is faster on " + first + " by " + str(abs(b-d)) + " seconds")
else:
print("bfs is faster on " + first + " by " + str(abs(b-d)) + " seconds")
# print(first + " took " + second + " " + third)
go()
| mit | 3,221,412,540,978,325,000 | 17.818182 | 84 | 0.482416 | false |
Jackman3005/CUAutograding | CUAutogradingScripts/GradingScriptLibrary/LocalBatchGrading/BatchGrader.py | 1 | 3612 | #THESE LINES ARE NEEDED TO ADD THE GradingScriptLibrary path to the system path so they can be imported!!!
import os,sys,inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../../")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
#DO NOT DELETE THE LINES ABOVE
import configparser
import importlib.machinery
from GradingScriptLibrary.LocalBatchGrading import Extraction
from GradingScriptLibrary.LocalBatchGrading import StudentGradingDictionary
from GradingScriptLibrary.LocalBatchGrading import StudentCSV
def deductionsToGradeAndComments(deductions):
grade = 100
comments = ""
for gradeDeduction, comment in deductions:
grade += gradeDeduction
comments += ("\n[%.1f] " % gradeDeduction) + comment + ", "
if (len(deductions)==0):
comments = "Great work!"
else:
comments = comments.rstrip(", ")
grade = max(round(grade),0)
return (grade,comments)
try:
configFileLocation = sys.argv[1]
config = configparser.ConfigParser()
config.read(configFileLocation)
zipFilesFolderLocation = config['Paths']['zip_submissions_folder']
outputCsvFileLocation = config['Paths']['grade_output_file']
outputLogFileLocation = config['Paths']['output_log_file']
universityRosterLocation = config['Paths']['university_roster']
crossReferenceRosterLocation = config['Paths'].get('cross_reference_roster',fallback=False)
gradingScriptFileLocation = config['Paths']['grading_script_file']
except Exception as e:
print("Problem Reading config file! make sure you pass one in when you call this script!")
print(e.strerror)
else:
#import the grading script file dynamically
loader = importlib.machinery.SourceFileLoader("module.name", gradingScriptFileLocation)
autogradingScript = loader.load_module()
gradingScriptFolder = os.path.split(gradingScriptFileLocation)[0]
#redirect the stderr to the log file
sys.stderr = open(outputLogFileLocation,"w")
print ("extracting zip files from",zipFilesFolderLocation)
Extraction.ExtractSubmissions(zipFilesFolderLocation)
print ("Finished extracting")
Students = StudentGradingDictionary.ReturnStudentGradingDictionary(universityRosterLocation,crossReferenceRosterLocation,zipFilesFolderLocation)
studentIdKeys = Students.keys()
numOfStudents = len(studentIdKeys)
studentCount = 1
for studentId in studentIdKeys:
#if (studentCount >= 30):
#break
print("Grading student " + str(studentCount) + "/" + str(numOfStudents) + " " + Students[studentId]["FN"] + " " + Students[studentId]["LN"])
if (Students[studentId]['Directory'] != None):
deductions = autogradingScript.gradeSubmission(zipFilesFolderLocation + "/" + Students[studentId]['Directory'],gradingScriptFolder)
Students[studentId]['Grade'], Students[studentId]['Comments'] =deductionsToGradeAndComments(deductions)
print("\nSuccessfuly Graded student: " +Students[studentId]["FN"] + " " + Students[studentId]["LN"] + "\tGrade: " + str( Students[studentId]['Grade']) + "\tComments: " + Students[studentId]['Comments'] + "\n\n\n\n",file=sys.stderr)
else:
Students[studentId]['Grade'], Students[studentId]['Comments'] = (0,"[-100] Could not find a valid submission")
studentCount +=1
print ("Writing csv file",outputCsvFileLocation)
StudentCSV.makeCSV(Students,outputCsvFileLocation)
| agpl-3.0 | -1,942,210,317,177,647,000 | 43.04878 | 243 | 0.703765 | false |
keepkey/python-keepkey | tests/test_msg_signtx_xfer.py | 1 | 22824 | # This file is part of the TREZOR project.
#
# Copyright (C) 2012-2016 Marek Palatinus <[email protected]>
# Copyright (C) 2012-2016 Pavol Rusnak <[email protected]>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# The script has been modified for KeepKey Device.
import unittest
import common
import binascii
import itertools
import keepkeylib.messages_pb2 as proto
import keepkeylib.types_pb2 as proto_types
from keepkeylib.client import CallException
from keepkeylib import tx_api
class TestMsgSigntx(common.KeepKeyTest):
def test_xfer_node_error(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto_types.TxInputType(address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
)
# Transfer Output address
out1 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000000, 1, 0 ],
#error -^-
amount=390000 - 10000,
script_type=proto_types.PAYTOADDRESS,
address_type=1,
)
# Change Output address
out2 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000000, 1, 1 ],
amount=8000,
script_type=proto_types.PAYTOADDRESS,
address_type=2,
)
try:
self.client.sign_tx('Bitcoin', [inp1, ], [out1, out2])
except CallException as e:
self.assertEqual(e.args[0], proto_types.Failure_Other)
else:
self.assert_(False, "Failed to detect invalid Address node for transfer transaction")
def test_xfer_addressT_error(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto_types.TxInputType(address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
)
# Spend Output address
out1 = proto_types.TxOutputType(address='1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1',
amount=390000 - 80000 - 12000 - 10000,
script_type=proto_types.PAYTOADDRESS,
address_type=0,
)
# Transfer Output address
out2 = proto_types.TxOutputType(address='1EfKbQupktEMXf4gujJ9kCFo83k1iMqwqK',
#error -^-
amount=10000,
script_type=proto_types.PAYTOADDRESS,
address_type=1,
)
try:
self.client.sign_tx('Bitcoin', [inp1, ], [out1, out2])
except CallException as e:
self.assertEqual(e.args[0], proto_types.Failure_Other)
else:
self.assert_(False, "Failed to detect invalid Address Type for transfer transaction")
def test_xfer_change_addressT_error(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto_types.TxInputType(address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
)
# Spend Output address
out1 = proto_types.TxOutputType(address='1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1',
amount=390000 - 80000 - 12000 - 10000,
script_type=proto_types.PAYTOADDRESS,
address_type=0,
)
# Transfer Output address
out2 = proto_types.TxOutputType(address='1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz2',
#error -^-
amount=10000,
script_type=proto_types.PAYTOADDRESS,
address_type=2,
)
try:
self.client.sign_tx('Bitcoin', [inp1, ], [out1, out2])
except CallException as e:
self.assertEqual(e.args[0], proto_types.Failure_Other)
else:
self.assert_(False, "Failed to detect invalid address type for change transaction")
def test_xfer_spend_addressT_error(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto_types.TxInputType(address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
)
# Spend Output address
out1 = proto_types.TxOutputType(address='1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1',
amount=390000 - 80000 - 12000 - 10000,
script_type=proto_types.PAYTOADDRESS,
address_type=0,
)
# Transfer Output address
out2 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000000, 1, 0 ],
#error -^-
amount=10000,
script_type=proto_types.PAYTOADDRESS,
address_type=0,
)
try:
self.client.sign_tx('Bitcoin', [inp1, ], [out1, out2])
except CallException as e:
self.assertEqual(e.args[0], proto_types.Failure_Other)
else:
self.assert_(False, "Failed to detect invalid address type for spend transaction")
def test_xfer_change_fee(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto_types.TxInputType(address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
)
# Transfer Output address
out1 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000000, 0, 0 ],
amount=390000 - 10000,
script_type=proto_types.PAYTOADDRESS,
address_type=1,
)
# Change Output address
out2 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000000, 1, 1 ],
amount=8000,
script_type=proto_types.PAYTOADDRESS,
address_type=2,
)
with self.client:
# self.client.set_tx_api(tx_api.TxApiTestnet)
self.client.set_expected_responses([
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXMETA, details=proto_types.TxRequestDetailsType(tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=1, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmTransferToAccount),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmTransferToAccount),
proto.ButtonRequest(code=proto_types.ButtonRequest_SignTx),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.TxRequest(request_type=proto_types.TXFINISHED),
])
(signatures, serialized_tx) = self.client.sign_tx('Bitcoin', [inp1, ], [out1, out2])
self.assertEqual(binascii.hexlify(serialized_tx), '010000000182488650ef25a58fef6788bd71b8212038d7f2bbe4750bc7bcb44701e85ef6d5000000006a473044022029270ff6991d953cb89135dc43723a64f5be00a69db42efeb845f2918dec50c302201f6589bb44c9c4b6c8152966d5de9b56c2a6ca61172d0f63c6c62e48ad8975130121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0260cc0500000000001976a9149c9d21f47382762df3ad81391ee0964b28dd951788ac401f0000000000001976a914db302d9f1dd36faa220d8dfd7ff18ff5e308a53688ac00000000')
def test_xfer_multi_account(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto_types.TxInputType(address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
)
# Transfer Output address1
out1 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000001, 0, 0 ],
amount= 10000,
script_type=proto_types.PAYTOADDRESS,
address_type=1,
)
# Transfer Output address2
out2 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000002, 0, 0 ],
amount= 10000,
script_type=proto_types.PAYTOADDRESS,
address_type=1,
)
# Transfer Output address3
out3 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000003, 0, 0 ],
amount= 390000 - 20000 - 90000, #280000
script_type=proto_types.PAYTOADDRESS,
address_type=1,
)
# Change Output address
out4 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000000, 1, 1 ],
amount=80000,
script_type=proto_types.PAYTOADDRESS,
address_type=2,
)
with self.client:
# self.client.set_tx_api(tx_api.TxApiTestnet)
self.client.set_expected_responses([
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXMETA, details=proto_types.TxRequestDetailsType(tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=1, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmTransferToAccount),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmTransferToAccount),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=2)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmTransferToAccount),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=3)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmTransferToAccount),
proto.ButtonRequest(code=proto_types.ButtonRequest_SignTx),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=2)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=3)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=2)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=3)),
proto.TxRequest(request_type=proto_types.TXFINISHED),
])
(signatures, serialized_tx) = self.client.sign_tx('Bitcoin', [inp1, ], [out1, out2, out3, out4])
self.assertEqual(binascii.hexlify(serialized_tx), '010000000182488650ef25a58fef6788bd71b8212038d7f2bbe4750bc7bcb44701e85ef6d5000000006a473044022001e609489d66ab9a96e26b23125e372e68990fe995a4026bbb0a2ee53dcf008b0220545ddfec0b6fd186623d793158437cdfcead11b6464507db533ca6a3f9c64a340121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0410270000000000001976a914c5c94a31b13d1223520fa92f1e0a127dbfd82ed188ac10270000000000001976a9140d9d0435f01563c6f01b7c9a404590c21f4e2d0188acc0450400000000001976a914e68990fa670c2910e8eaec96c2db2568ec132c2288ac80380100000000001976a914db302d9f1dd36faa220d8dfd7ff18ff5e308a53688ac00000000')
def test_one_three_fee(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto_types.TxInputType(address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
)
# Spend Output address
out1 = proto_types.TxOutputType(address='1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1',
amount=390000 - 80000 - 12000 - 10000,
script_type=proto_types.PAYTOADDRESS,
address_type=0,
)
# Transfer Output address
out2 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000000, 0, 0 ],
amount=10000,
script_type=proto_types.PAYTOADDRESS,
address_type=1,
)
# Change Output address
out3 = proto_types.TxOutputType(address_n=[0x8000002c, 0x80000000, 0x80000000, 1, 1 ],
amount=80000,
script_type=proto_types.PAYTOADDRESS,
address_type=2,
)
with self.client:
# self.client.set_tx_api(tx_api.TxApiTestnet)
self.client.set_expected_responses([
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXMETA, details=proto_types.TxRequestDetailsType(tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=1, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0, tx_hash=binascii.unhexlify("d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"))),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmOutput),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmTransferToAccount),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=2)),
proto.ButtonRequest(code=proto_types.ButtonRequest_ConfirmTransferToAccount),
proto.ButtonRequest(code=proto_types.ButtonRequest_SignTx),
proto.TxRequest(request_type=proto_types.TXINPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=2)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=0)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=1)),
proto.TxRequest(request_type=proto_types.TXOUTPUT, details=proto_types.TxRequestDetailsType(request_index=2)),
proto.TxRequest(request_type=proto_types.TXFINISHED),
])
(signatures, serialized_tx) = self.client.sign_tx('Bitcoin', [inp1, ], [out1, out2, out3])
self.assertEqual(binascii.hexlify(serialized_tx), '010000000182488650ef25a58fef6788bd71b8212038d7f2bbe4750bc7bcb44701e85ef6d5000000006b483045022100c3ceed2bd2365ccdb978d605e9ae6347790b978e2747bbbec623364270be723c022017680d050c2bf0dae4bd249d83b43bc93a33661c1a0b18c7e8a05eb47815f9aa0121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0300650400000000001976a914de9b2a8da088824e8fe51debea566617d851537888ac10270000000000001976a9149c9d21f47382762df3ad81391ee0964b28dd951788ac80380100000000001976a914db302d9f1dd36faa220d8dfd7ff18ff5e308a53688ac00000000')
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -6,478,775,842,627,732,000 | 62.840909 | 647 | 0.637443 | false |
gnarula/eden_deployment | modules/s3db/asset.py | 1 | 56243 | # -*- coding: utf-8 -*-
""" Sahana Eden Assets Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3AssetModel",
"S3AssetHRModel",
"S3AssetTeamModel",
#"asset_rheader",
"asset_types",
"asset_log_status",
"asset_controller",
"asset_AssetRepresent",
)
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
ASSET_TYPE_VEHICLE = 1 # => Extra Tab(s) for Registration Documents, Fuel Efficiency
ASSET_TYPE_RADIO = 2 # => Extra Tab(s) for Radio Channels/Frequencies
ASSET_TYPE_TELEPHONE = 3 # => Extra Tab(s) for Contact Details & Airtime Billing
ASSET_TYPE_OTHER = 4 # => No extra Tabs
# To pass to global scope
asset_types = {"VEHICLE" : ASSET_TYPE_VEHICLE,
"RADIO" : ASSET_TYPE_RADIO,
"TELEPHONE" : ASSET_TYPE_TELEPHONE,
"OTHER" : ASSET_TYPE_OTHER,
}
ASSET_LOG_SET_BASE = 1
ASSET_LOG_ASSIGN = 2
ASSET_LOG_RETURN = 3
ASSET_LOG_CHECK = 4
ASSET_LOG_REPAIR = 5
ASSET_LOG_DONATED = 32
ASSET_LOG_LOST = 33
ASSET_LOG_STOLEN = 34
ASSET_LOG_DESTROY = 35
# To pass to global scope
asset_log_status = {"SET_BASE" : ASSET_LOG_SET_BASE,
"ASSIGN" : ASSET_LOG_ASSIGN,
"RETURN" : ASSET_LOG_RETURN,
"CHECK" : ASSET_LOG_CHECK,
"REPAIR" : ASSET_LOG_REPAIR,
"DONATED" : ASSET_LOG_DONATED,
"LOST" : ASSET_LOG_LOST,
"STOLEN" : ASSET_LOG_STOLEN,
"DESTROY" : ASSET_LOG_DESTROY,
}
# =============================================================================
class S3AssetModel(S3Model):
"""
Asset Management
"""
names = ("asset_asset",
"asset_item",
"asset_log",
"asset_asset_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
s3 = current.response.s3
item_id = self.supply_item_id
item_entity_id = self.supply_item_entity_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
settings = current.deployment_settings
org_site_label = settings.get_org_site_label()
vehicle = settings.has_module("vehicle")
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
#--------------------------------------------------------------------------
# Assets
#
asset_type_opts = {ASSET_TYPE_VEHICLE : T("Vehicle"),
#ASSET_TYPE_RADIO : T("Radio"),
#ASSET_TYPE_TELEPHONE : T("Telephone"),
ASSET_TYPE_OTHER : T("Other"),
}
asset_condition_opts = {1: T("Good Condition"),
2: T("Minor Damage"),
3: T("Major Damage"),
4: T("Un-Repairable"),
5: T("Needs Maintenance"),
}
ctable = self.supply_item_category
itable = self.supply_item
supply_item_represent = self.supply_item_represent
asset_items_set = db((ctable.can_be_asset == True) & \
(itable.item_category_id == ctable.id))
tablename = "asset_asset"
define_table(tablename,
# Instances
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
item_entity_id,
Field("number",
label = T("Asset Number"),
),
# @ToDo: We could set this automatically based on Item Category
Field("type", "integer",
default = ASSET_TYPE_OTHER,
label = T("Type"),
represent = lambda opt: \
asset_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_type_opts),
readable = vehicle,
writable = vehicle,
),
item_id(represent = supply_item_represent,
requires = IS_ONE_OF(asset_items_set,
"supply_item.id",
supply_item_represent,
sort = True,
),
script = None, # No Item Pack Filter
widget = None,
),
Field("kit", "boolean",
default = False,
label = T("Kit?"),
represent = lambda opt: \
(opt and [T("Yes")] or [NONE])[0],
# Enable in template if-required
readable = False,
writable = False,
),
organisation_id(requires=self.org_organisation_requires(
updateable=True,
#required=True
),
required = True,
script = '''
S3OptionsFilter({
'triggerName':'organisation_id',
'targetName':'site_id',
'lookupResource':'site',
'lookupPrefix':'org',
'lookupField':'site_id',
'lookupURL':S3.Ap.concat('/org/sites_for_org/'),
})''',
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = auth.user.site_id if auth.is_logged_in() else None,
empty = False,
label = org_site_label,
ondelete = "RESTRICT",
readable = True,
writable = True,
represent = self.org_site_represent,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Warehouse"),
# messages.AUTOCOMPLETE_HELP)),
),
Field("sn",
label = T("Serial Number"),
),
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
s3_date("purchase_date",
label = T("Purchase Date"),
),
Field("purchase_price", "double",
#default = 0.00,
represent = lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
),
s3_currency("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable = False,
writable = False,
),
# Populated onaccept of the log to make a component tab
person_id("assigned_to_id",
readable = False,
writable = False,
comment = self.pr_person_comment(child="assigned_to_id"),
),
# Populated onaccept of the log for reporting/filtering
Field("cond", "integer",
label = T("Condition"),
represent = lambda opt: \
asset_condition_opts.get(opt, UNKNOWN_OPT),
#readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Asset"),
title_display = T("Asset Details"),
title_list = T("Assets"),
title_update = T("Edit Asset"),
title_upload = T("Import Assets"),
label_list_button = T("List Assets"),
label_delete_button = T("Delete Asset"),
msg_record_created = T("Asset added"),
msg_record_modified = T("Asset updated"),
msg_record_deleted = T("Asset deleted"),
msg_list_empty = T("No Assets currently registered"))
asset_represent = asset_AssetRepresent(show_link=True)
# Reusable Field
asset_id = S3ReusableField("asset_id", "reference %s" % tablename,
label = T("Asset"),
ondelete = "CASCADE",
represent = asset_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "asset_asset.id",
asset_represent,
sort=True)),
sortby = "number",
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
list_fields = ["id",
"item_id$item_category_id",
"item_id",
"number",
#"type",
#"purchase_date",
(T("Assigned To"), "assigned_to_id"),
"organisation_id",
"site_id",
]
report_fields = ["number",
(T("Category"), "item_id$item_category_id"),
(T("Item"), "item_id"),
"organisation_id",
"site_id",
"cond",
]
text_fields = ["number",
"item_id$name",
#"item_id$category_id$name",
"comments",
]
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
text_fields.append(lfield)
list_fields.append(lfield)
list_fields.extend(("cond",
"comments"))
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
comment = T("You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets."),
#_class = "filter-search",
),
S3OptionsFilter("item_id$item_category_id",
),
S3OptionsFilter("organisation_id",
represent = "%(name)s",
hidden = True,
),
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("cond",
hidden = True,
),
]
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = [(T("Number of items"), "count(number)")],
defaults=Storage(cols = "location_id$%s" % levels[0], # Highest-level of hierarchy
fact = "count(number)",
rows = "item_id$item_category_id",
totals = True,
)
)
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
]
# Resource Configuration
configure(tablename,
# Open Tabs after creation
create_next = URL(c="asset", f="asset",
args=["[id]"]),
deduplicate = self.asset_duplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
mark_required = ["organisation_id"],
onaccept = self.asset_onaccept,
realm_components = ["log", "presence"],
report_options = report_options,
summary = summary,
super_entity = ("supply_item_entity", "sit_trackable"),
update_realm = True,
)
# Components
add_components(tablename,
asset_group = "asset_id",
asset_item = "asset_id",
asset_log = "asset_id",
asset_human_resource = "asset_id",
hrm_human_resource = {"link": "asset_human_resource",
"joinby": "asset_id",
"key": "human_resource_id",
"actuate": "hide",
},
vehicle_gps = "asset_id",
vehicle_vehicle = {"joinby": "asset_id",
"multiple": False,
},
)
# =====================================================================
# Asset Items
# - to allow building ad-hoc Kits
#
tablename = "asset_item"
define_table(tablename,
item_entity_id,
asset_id(ondelete="CASCADE"),
item_id(represent = supply_item_represent,
requires = IS_ONE_OF(asset_items_set,
"supply_item.id",
supply_item_represent,
sort = True,
),
script = None, # No Item Pack Filter
widget = None,
),
Field("quantity", "integer", notnull=True,
default = 1,
label = T("Quantity"),
requires = IS_INT_IN_RANGE(1, 1000),
),
Field("sn",
label = T("Serial Number")),
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL"),
s3_date("purchase_date",
label = T("Purchase Date")),
Field("purchase_price", "double",
#default=0.00,
represent=lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2)),
s3_currency("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable=False,
writable=False),
s3_comments(comment=None),
*s3_meta_fields())
# =====================================================================
# Asset Log
#
asset_log_status_opts = {ASSET_LOG_SET_BASE : T("Base %(facility)s Set") % dict(facility = org_site_label),
ASSET_LOG_ASSIGN : T("Assigned"),
ASSET_LOG_RETURN : T("Returned"),
ASSET_LOG_CHECK : T("Checked"),
ASSET_LOG_REPAIR : T("Repaired"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
}
if auth.permission.format == "html":
# T isn't JSON serializable
site_types = auth.org_site_types
for key in site_types.keys():
site_types[key] = str(site_types[key])
site_types = json.dumps(site_types)
script = '''
S3OptionsFilter({
'triggerName':'organisation_id',
'targetName':'site_id',
'lookupPrefix':'org',
'lookupResource':'site',
'lookupField':'site_id',
'fncRepresent': function(record,PrepResult){
var InstanceTypeNice=%(instance_type_nice)s
return record.name+" ("+InstanceTypeNice[record.instance_type]+")"
}})''' % dict(instance_type_nice = site_types)
else:
script = None
tablename = "asset_log"
define_table(tablename,
asset_id(),
Field("status", "integer",
label = T("Status"),
represent = lambda opt: \
asset_log_status_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_log_status_opts),
),
s3_datetime("datetime",
default = "now",
empty = False,
represent = "date",
),
s3_datetime("datetime_until",
label = T("Date Until"),
represent = "date",
),
person_id(label = T("Assigned To")),
Field("check_in_to_person", "boolean",
#label = T("Mobile"), # Relabel?
label = T("Track with this Person?"),
comment = DIV(_class="tooltip",
#_title="%s|%s" % (T("Mobile"),
_title="%s|%s" % (T("Track with this Person?"),
T("If selected, then this Asset's Location will be updated whenever the Person's Location is updated."))),
readable = False,
writable = False,
),
# The Organisation to whom the loan is made
organisation_id(readable = False,
widget = None,
writable = False,
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
label = org_site_label,
#filterby = "site_id",
#filter_opts = auth.permitted_facilities(redirect_on_error=False),
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
#default = user.site_id if is_logged_in() else None,
readable = True,
writable = True,
empty = False,
represent = self.org_site_represent,
#widget = S3SiteAutocompleteWidget(),
script = script,
),
self.org_room_id(),
#location_id(),
Field("cancel", "boolean",
default = False,
label = T("Cancel Log Entry"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Cancel Log Entry"),
T("'Cancel' will indicate an asset log entry did not occur")))
),
Field("cond", "integer", # condition is a MySQL reserved word
label = T("Condition"),
represent = lambda opt: \
asset_condition_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_condition_opts,
zero = "%s..." % T("Please select")),
),
person_id("by_person_id",
default = auth.s3_logged_in_person(), # This can either be the Asset controller if signed-out from the store
label = T("Assigned By"), # or the previous owner if passed on directly (e.g. to successor in their post)
comment = self.pr_person_comment(child="by_person_id"),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ASSIGN = T("New Entry in Asset Log")
crud_strings[tablename] = Storage(
label_create = ADD_ASSIGN,
title_display = T("Asset Log Details"),
title_list = T("Asset Log"),
title_update = T("Edit Asset Log Entry"),
label_list_button = T("Asset Log"),
label_delete_button = T("Delete Asset Log Entry"),
msg_record_created = T("Entry added to Asset Log"),
msg_record_modified = T("Asset Log Entry updated"),
msg_record_deleted = T("Asset Log Entry deleted"),
msg_list_empty = T("Asset Log Empty"))
# Resource configuration
configure(tablename,
listadd = False,
list_fields = ["id",
"datetime",
"status",
"datetime_until",
"organisation_id",
"site_id",
"room_id",
"person_id",
#"location_id",
"cancel",
"cond",
"comments",
],
onaccept = self.asset_log_onaccept,
orderby = "asset_log.datetime desc",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(asset_asset_id = asset_id,
asset_represent = asset_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(asset_asset_id = lambda **attr: dummy("asset_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def asset_duplicate(item):
"""
Deduplication of Assets
"""
if item.tablename != "asset_asset":
return
table = item.table
data = item.data
number = data.get("number", None)
query = (table.number == number)
organisation_id = data.get("organisation_id", None)
if organisation_id:
query &= (table.organisation_id == organisation_id)
site_id = data.get("site_id", None)
if site_id:
query &= (table.site_id == site_id)
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
item.id = _duplicate.id
item.data.id = _duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def asset_onaccept(form):
"""
After DB I/O
"""
if current.response.s3.bulk:
# Import or Sync
return
db = current.db
atable = db.asset_asset
form_vars = form.vars
kit = form_vars.get("kit", None)
site_id = form_vars.get("site_id", None)
if site_id:
stable = db.org_site
asset_id = form_vars.id
# Set the Base Location
location_id = db(stable.site_id == site_id).select(stable.location_id,
limitby=(0, 1)
).first().location_id
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
asset_tracker.set_base_location(location_id)
if kit:
# Also populate location_id field in component items
aitable = db.asset_item
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Add a log entry for this
ltable = db.asset_log
ltable.insert(asset_id = asset_id,
status = ASSET_LOG_SET_BASE,
organisation_id = form_vars.get("organisation_id", None),
site_id = site_id,
cond = 1,
)
if kit:
# Empty any inappropriate fields
db(atable.id == asset_id).update(supplier_org_id = None,
purchase_date = None,
purchase_price = None,
purchase_currency = None,
)
else:
# Delete any component items
aitable = db.asset_item
ids = db(aitable.asset_id == asset_id).select(aitable.id).as_list()
if ids:
resource = current.s3db.resource("asset_item", id=ids)
resource.delete()
return
# -------------------------------------------------------------------------
@staticmethod
def asset_log_onaccept(form):
"""
After DB I/O
"""
request = current.request
get_vars = request.get_vars
status = get_vars.get("status", None)
if not status:
if not current.response.s3.asset_import:
# e.g. Record merger or Sync
return
# Import
db = current.db
form_vars = form.vars
asset_id = form_vars.asset_id
status = int(form_vars.status)
if status == ASSET_LOG_ASSIGN:
# Only type supported right now
# @ToDo: Support more types
type == "person"
new = True
else:
# Interactive
form_vars = form.vars
status = int(form_vars.status or status)
db = current.db
ltable = db.asset_log
row = db(ltable.id == form_vars.id).select(ltable.asset_id,
limitby=(0, 1)
).first()
try:
asset_id = row.asset_id
except:
return
current_log = asset_get_current_log(asset_id)
type = get_vars.get("type", None)
log_time = current_log.datetime
current_time = form_vars.get("datetime", None).replace(tzinfo=None)
new = log_time <= current_time
if new:
# This is a current assignment
atable = db.asset_asset
aitable = db.asset_item
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
if status == ASSET_LOG_SET_BASE:
# Set Base Location
site_id = form_vars.get("site_id", None)
stable = db.org_site
location_id = db(stable.site_id == site_id).select(stable.location_id,
limitby=(0, 1)
).first().location_id
asset_tracker.set_base_location(location_id)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
elif status == ASSET_LOG_ASSIGN:
if type == "person":
if form_vars.check_in_to_person:
asset_tracker.check_in(db.pr_person, form_vars.person_id,
timestmp = request.utcnow)
# Also do component items
# @ToDo: Have these move when the person moves
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
location_id = asset_tracker.set_location(form_vars.person_id,
timestmp = request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update main record for component
db(atable.id == asset_id).update(assigned_to_id=form_vars.person_id)
elif type == "site":
asset_tracker.check_in(db.org_site, form_vars.site_id,
timestmp = request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
elif type == "organisation":
site_id = form_vars.get("site_id", None)
if site_id:
asset_tracker.check_in(db.org_site, site_id,
timestmp = request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
# We can no longer track location
asset_tracker.check_out()
elif status == ASSET_LOG_RETURN:
# Set location to base location
location_id = asset_tracker.set_location(asset_tracker,
timestmp = request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update condition in main record
db(atable.id == asset_id).update(cond=form_vars.cond)
return
# =============================================================================
class S3AssetHRModel(S3Model):
"""
Optionally link Assets to Human Resources
- useful for staffing a vehicle
"""
names = ("asset_human_resource",)
def model(self):
#T = current.T
#--------------------------------------------------------------------------
# Assets <> Human Resources
#
tablename = "asset_human_resource"
self.define_table(tablename,
self.asset_asset_id(empty = False),
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# =============================================================================
class S3AssetTeamModel(S3Model):
"""
Optionally link Assets to Teams
"""
names = ("asset_group",)
def model(self):
#T = current.T
#--------------------------------------------------------------------------
# Assets <> Groups
#
tablename = "asset_group"
self.define_table(tablename,
self.asset_asset_id(empty = False),
self.pr_group_id(comment = None,
empty = False,
),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# =============================================================================
def asset_get_current_log(asset_id):
"""
Get the current log entry for this asset
"""
table = current.s3db.asset_log
query = (table.asset_id == asset_id) & \
(table.cancel == False) & \
(table.deleted == False)
# Get the log with the maximum time
asset_log = current.db(query).select(table.id,
table.status,
table.datetime,
table.cond,
table.person_id,
table.organisation_id,
table.site_id,
#table.location_id,
orderby = ~table.datetime,
limitby=(0, 1)).first()
if asset_log:
return Storage(datetime = asset_log.datetime,
person_id = asset_log.person_id,
cond = int(asset_log.cond or 0),
status = int(asset_log.status or 0),
organisation_id = asset_log.organisation_id,
site_id = asset_log.site_id,
#location_id = asset_log.location_id
)
else:
return Storage()
# =============================================================================
def asset_log_prep(r):
"""
Called by Controller
"""
T = current.T
db = current.db
request = current.request
table = db.asset_log
if r.record:
asset = Storage(r.record)
else:
# This is a new record
asset = Storage()
table.cancel.readable = False
table.cancel.writable = False
# This causes an error with the dataTables paginate
# if used only in r.interactive & not also r.representation=="aadata"
if r.method != "read" and r.method != "update":
table.cancel.readable = False
table.cancel.writable = False
current_log = asset_get_current_log(asset.id)
if request.vars.status:
status = int(request.vars.status)
else:
status = 0
if status and status != "None":
field = table.status
field.default = status
field.readable = False
field.writable = False
elif current_log:
table.status.default = current_log.status
if current_log.organisation_id:
table.organisation_id.default = current_log.organisation_id
table.site_id.requires = IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent,
filterby = "organisation_id",
filter_opts = (current_log.organisation_id,))
crud_strings = current.response.s3.crud_strings.asset_log
if status == ASSET_LOG_SET_BASE:
crud_strings.msg_record_created = T("Base Facility/Site Set")
table.by_person_id.label = T("Set By")
table.site_id.writable = True
table.datetime_until.readable = False
table.datetime_until.writable = False
table.person_id.readable = False
table.person_id.writable = False
table.organisation_id.readable = True
table.organisation_id.writable = True
table.site_id.requires = IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent)
elif status == ASSET_LOG_RETURN:
crud_strings.msg_record_created = T("Returned")
table.person_id.label = T("Returned From")
table.person_id.default = current_log.person_id
table.site_id.readable = False
table.site_id.writable = False
elif status == ASSET_LOG_ASSIGN:
type = request.vars.type
# table["%s_id" % type].required = True
if type == "person":
crud_strings.msg_record_created = T("Assigned to Person")
table["person_id"].requires = IS_ONE_OF(db, "pr_person.id",
table.person_id.represent,
orderby="pr_person.first_name",
sort=True,
error_message="Person must be specified!")
table.check_in_to_person.readable = True
table.check_in_to_person.writable = True
table.site_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent))
elif type == "site":
crud_strings.msg_record_created = T("Assigned to Facility/Site")
elif type == "organisation":
crud_strings.msg_record_created = T("Assigned to Organization")
table.organisation_id.readable = True
table.organisation_id.writable = True
table.organisation_id.requires = IS_ONE_OF(db, "org_organisation.id",
table.organisation_id.represent,
orderby="org_organisation.name",
sort=True)
table.site_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent))
elif "status" in request.get_vars:
crud_strings.msg_record_created = T("Status Updated")
table.person_id.label = T("Updated By")
field = table.status
field.readable = True
field.writable = True
field.requires = IS_IN_SET({ASSET_LOG_CHECK : T("Check"),
ASSET_LOG_REPAIR : T("Repair"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
})
# =============================================================================
def asset_rheader(r):
""" Resource Header for Assets """
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
s3 = current.response.s3
NONE = current.messages["NONE"]
if record.type == ASSET_TYPE_VEHICLE:
STAFF = current.deployment_settings.get_hrm_staff_label()
tabs = [(T("Asset Details"), None, {"native": True}),
(T("Vehicle Details"), "vehicle"),
(STAFF, "human_resource"),
(T("Assign %(staff)s") % dict(staff=STAFF), "assign"),
(T("Check-In"), "check-in"),
(T("Check-Out"), "check-out"),
(T("GPS Data"), "gps"),
]
else:
tabs = [(T("Edit Details"), None)]
#elif record.type == s3.asset.ASSET_TYPE_RADIO:
# tabs.append((T("Radio Details"), "radio"))
#elif record.type == s3.asset.ASSET_TYPE_TELEPHONE:
# tabs.append((T("Telephone Details"), "phone"))
tabs.append((T("Log"), "log"))
tabs.append((T("Documents"), "document"))
rheader_tabs = s3_rheader_tabs(r, tabs)
if current.request.controller == "vehicle":
func = "vehicle"
else:
func = "asset"
# @ToDo: Check permissions before displaying buttons
asset_action_btns = [
A(T("Set Base Facility/Site"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_SET_BASE)
),
_class = "action-btn",
)
]
current_log = asset_get_current_log(record.id)
status = current_log.status
#if record.location_id:
# A Base Site has been set
# Return functionality removed - as it doesn't set site_id & organisation_id in the logs
#if status == ASSET_LOG_ASSIGN:
# asset_action_btns += [ A( T("Return"),
# _href = URL(f=func,
# args = [record.id, "log", "create"],
# vars = dict(status = ASSET_LOG_RETURN)
# ),
# _class = "action-btn"
# )
# ]
if status < ASSET_LOG_DONATED:
# @ToDo: deployment setting to prevent assigning assets before returning them
# The Asset is available for assignment (not disposed)
asset_action_btns += [
A(T("Assign to Person"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "person")
),
_class = "action-btn",
),
A(T("Assign to Facility/Site"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "site")
),
_class = "action-btn",
),
A(T("Assign to Organization"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "organisation")
),
_class = "action-btn",
),
]
asset_action_btns += [
A(T("Update Status"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = None
),
_class = "action-btn",
),
]
table = r.table
ltable = s3db.asset_log
rheader = DIV(TABLE(TR(TH("%s: " % table.number.label),
record.number,
TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id)
),
TR(TH("%s: " % ltable.cond.label),
ltable.cond.represent(current_log.cond),
TH("%s: " % ltable.status.label),
ltable.status.represent(status),
),
TR(TH("%s: " % ltable.person_id.label),
ltable.person_id.represent(current_log.person_id),
TH("%s: " % ltable.site_id.label),
ltable.site_id.represent(current_log.site_id),
),
),
DIV(_style = "margin-top:5px", # @ToDo: Move to CSS
*asset_action_btns
),
rheader_tabs)
return rheader
return None
# =============================================================================
def asset_controller():
""" RESTful CRUD controller """
s3db = current.s3db
s3 = current.response.s3
# Pre-process
def prep(r):
# Location Filter
current.s3db.gis_location_filter(r)
if r.component_name == "log":
asset_log_prep(r)
return True
s3.prep = prep
# Import pre-process
def import_prep(data):
"""
Flag that this is an Import (to distinguish from Sync)
@ToDo: Find Person records from their email addresses
"""
current.response.s3.asset_import = True
return
# @ToDo: get this working
ctable = s3db.pr_contact
ptable = s3db.pr_person
resource, tree = data
elements = tree.getroot().xpath("/s3xml//resource[@name='pr_person']/data[@field='first_name']")
persons = {}
for element in elements:
email = element.text
if email in persons:
# Replace email with uuid
element.text = persons[email]["uuid"]
# Don't check again
continue
query = (ctable.value == email) & \
(ctable.pe_id == ptable.pe_id)
person = db(query).select(ptable.uuid,
limitby=(0, 1)
).first()
if person:
# Replace email with uuid
uuid = person.uuid
else:
# Blank it
uuid = ""
element.text = uuid
# Store in case we get called again with same value
persons[email] = dict(uuid=uuid)
s3.import_prep = import_prep
# Post-processor
def postp(r, output):
if r.interactive and r.method != "import":
script = "/%s/static/scripts/S3/s3.asset.js" % r.application
s3.scripts.append(script)
S3CRUD.action_buttons(r, deletable=False)
#if not r.component:
#s3.actions.append({"url" : URL(c="asset", f="asset",
# args = ["[id]", "log", "create"],
# vars = {"status" : eden.asset.asset_log_status["ASSIGN"],
# "type" : "person"}),
# "_class" : "action-btn",
# "label" : str(T("Assign"))})
return output
s3.postp = postp
output = current.rest_controller("asset", "asset",
rheader = asset_rheader,
)
return output
# =============================================================================
class asset_AssetRepresent(S3Represent):
""" Representation of Assets """
def __init__(self,
fields = ("number",), # unused
show_link = False,
translate = False,
multiple = False,
):
# Need a custom lookup
self.lookup_rows = self.custom_lookup_rows
super(asset_AssetRepresent,
self).__init__(lookup="asset_asset",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for organisation rows, does a
left join with the parent organisation. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the organisation IDs
"""
db = current.db
s3db = current.s3db
table = s3db.asset_asset
itable = db.supply_item
btable = db.supply_brand
qty = len(values)
if qty == 1:
query = (table.id == values[0])
limitby = (0, 1)
else:
query = (table.id.belongs(values))
limitby = (0, qty)
query &= (itable.id == table.item_id)
rows = db(query).select(table.id,
table.number,
table.type,
itable.name,
btable.name,
left=btable.on(itable.brand_id == btable.id),
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the asset_asset Row
"""
# Custom Row (with the item & brand left-joined)
number = row["asset_asset.number"]
item = row["supply_item.name"]
brand = row.get("supply_brand.name", None)
if not number:
return self.default
represent = "%s (%s" % (number, item)
if brand:
represent = "%s, %s)" % (represent, brand)
else:
represent = "%s)" % represent
return s3_unicode(represent)
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (site_id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
type = row.get("asset_asset.type", None)
if type == 1:
return A(v, _href=URL(c="vehicle", f="vehicle", args=[k],
# remove the .aaData extension in paginated views
extension=""
))
k = s3_unicode(k)
return A(v, _href=self.linkto.replace("[id]", k) \
.replace("%5Bid%5D", k))
# END =========================================================================
| mit | 1,738,867,762,911,502,600 | 41.161169 | 178 | 0.406131 | false |
samw3/PyTweeps | pytweeps.py | 1 | 20531 | # PyTweeps: Simple Python program to help manage your twitter followers.
# https://github.com/samw3/PyTweeps
import pkg_resources
import tweepy
import webbrowser
import shelve
import pprint
import sys
import traceback
import time
import collections
from datetime import datetime
from datetime import timedelta
from config import *
import io
import urllib2
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
def initData(data):
# Set up the data shelf
if 'following' not in data.keys():
data['following'] = set()
if 'wasFollowing' not in data.keys():
data['wasFollowing'] = set()
if 'followers' not in data.keys():
data['followers'] = set()
if 'wasFollowedBy' not in data.keys():
data['wasFollowedBy'] = set()
if 'lastTweet' not in data.keys():
data['lastTweet'] = dict()
if 'followedOn' not in data.keys():
data['followedOn'] = dict()
if 'wasFollowingOn' not in data.keys():
data['wasFollowingOn'] = dict()
data.sync()
def follow(api, data, user):
api.create_friendship(user.id)
data['followedOn'][user.id] = datetime.now()
def authenticate(auth, data):
redirect_url = auth.get_authorization_url()
webbrowser.open(redirect_url)
try:
verifier = raw_input('Verifier:')
data['request_token'] = auth.request_token
# auth.set_request_token(auth.request_token.key, auth.request_token.secret)
try:
auth.get_access_token(verifier)
data['access_token_key'] = auth.access_token
data['access_token_secret'] = auth.access_token_secret
data.sync()
auth.set_access_token(data['access_token_key'], data['access_token_secret'])
except tweepy.TweepError:
print 'Error! Failed to get access token.'
except tweepy.TweepError:
print 'Error! Failed to get request token.'
def usageMessage():
print "Usage: python", sys.argv[0], "command [params]\n"
print "Commands:"
print " update"
print " Updates your list of followers and followed"
print " bury daysSinceLastTweet numberToUnfollow"
print " Remove any 'dead' tweeps. i.e. followers who no longer use twitter"
print " requite daysSinceFollowed numberToUnfollow"
print " Remove any tweeps who do not continue to follow you after daysSinceFollowed days"
print " shotgun user numTweeps "
print " Add numTweeps followers from a user. Doesn't follow previously followed users."
print " copycat user numTweeps"
print " Add numTweeps from the list of tweeps user is following. Doesn't follow previously followed users."
print " copykids numKids numTweeps"
print " Add numKids from *every* person you follow's following list. Stop after adding (approximately) numTweeps total."
print " ignore user"
print " Ignore a particular user, never try to follow them and unfollow if we are following."
print " follow user"
print " Follow a particular user, even if we retired them already."
print " unfollowers filename"
print " prints a list of unfollowers to filename"
def error(message):
usageMessage()
print "ERROR: %s\n" % message
sys.exit(-1)
def info(message):
print message
def update(api, data):
newUsers = 0
totalUsers = 0
stillFollowing = set()
for id in api.friends_ids():
stillFollowing.add(id)
if id not in data['following']:
newUsers += 1
totalUsers += 1
if id not in data['followedOn']:
data['followedOn'][id] = datetime.now()
data['wasFollowing'] |= data['following']
data['wasFollowing'] |= stillFollowing
removed = len(data['following'] - stillFollowing)
data['following'] = stillFollowing
noLongerFollowing = data['wasFollowing'] - stillFollowing
data.sync()
print "Following %d, new %d, removed %d" % (totalUsers, newUsers, removed)
newUsers = 0
totalUsers = 0
stillFollowedBy = set()
for id in api.followers_ids():
stillFollowedBy.add(id)
if id not in data['followers']:
newUsers += 1
totalUsers += 1
data['wasFollowedBy'] |= data['followers']
data['wasFollowedBy'] |= stillFollowedBy
removed = len(data['followers'] - stillFollowedBy)
data['followers'] = stillFollowedBy
noLongerFollowedBy = data['wasFollowedBy'] - stillFollowedBy
data.sync()
print "Followers %d, new %d, removed %d" % (totalUsers, newUsers, removed)
print "No Longer Following %d" % len(noLongerFollowing)
print "No Longer Followed by %d" % len(noLongerFollowedBy)
def copycat(api, data, copycatUser, numTweeps):
c = 0
x = 0
for f in tweepy.Cursor(api.friends, copycatUser).items():
x += 1
id = f.id
if id in data['wasFollowing']:
info("%d '%s' following or was following." % (x, f.screen_name))
elif id in data['wasFollowedBy']:
info("%d '%s' followed by or was followed." % (x, f.screen_name))
elif f.protected:
info("%d '%s' is protected." % (x, f.screen_name))
elif f.followers_count <= shotgunTargetMinFollowers:
info("%d '%s' not enough followers." % (x, f.screen_name))
elif f.friends_count <= shotgunTargetMinFollowing:
info("%d '%s' not following enough." % (x, f.screen_name))
elif f.description == "":
info("%d '%s' empty description." % (x, f.screen_name))
elif f.statuses_count <= shotgunTargetMinTweets:
info("%d '%s' not enough tweets." % (x, f.screen_name))
elif f.screen_name == username:
info("%d '%s' can't follow yourself!" % (x, f.screen_name))
else:
api.create_friendship(f.id)
c += 1
info("%d '%s' FOLLOWED(%d)." % (x, f.screen_name, c))
time.sleep(3)
if (c == numTweeps):
break;
return c
def main(argv):
pp = pprint.PrettyPrinter(indent=4)
print "\nPyTweeps v0.1 - using tweepy v%s\n" % pkg_resources.get_distribution('tweepy').version
if len(argv) == 0:
usageMessage()
sys.exit(-1)
data = shelve.open('pytweeps', writeback=True)
initData(data)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
if ('access_token_key' not in data.keys()) or ('access_token_secret' not in data.keys()):
authenticate(auth, data)
auth.set_access_token(data['access_token_key'], data['access_token_secret'])
api = tweepy.API(auth)
command = argv[0]
if command == "update":
update(api, data)
elif command == "bury":
# Check params
if len(argv) < 3:
error("Missing params daysSinceLastTweet or numberToUnfollow")
if not isInt(argv[1]):
error("daysSinceLastTweet is not an integer")
daysSinceLastTweet = int(argv[1])
if not isInt(argv[2]):
error("numberToUnfollow is not an integer")
numberToUnfollow = int(argv[2])
delay = 0
if len(argv) >= 4 and isInt(argv[3]):
delay = argv[3]
# death date is the cut off. if they haven't tweeted since then, bury them
cutoffDate = datetime.now() - timedelta(days=daysSinceLastTweet)
# Check the lastTweet cache, if their last tweet isn't after the cutoffDate don't bother checking against twitter
last = data['lastTweet']
lastKeys = last.keys()
toScan = set()
for f in data['following']:
if f in lastKeys:
if last[f] < cutoffDate:
toScan.add(f)
# else don't bother checking
else:
# not in cache, so check
toScan.add(f)
x = 0
numUnfollowed = 0
try:
for f in toScan:
tweets = api.user_timeline(f, count=1)
if len(tweets) == 0:
# Never tweeted? bury.
user = api.get_user(f)
if user.screen_name not in neverBury:
api.destroy_friendship(f)
print ""
info("Buried '%s' R.I.P. (No Tweets)" % user.screen_name)
numUnfollowed += 1
else:
lastTweet = tweets[0]
if (lastTweet.created_at < cutoffDate):
if lastTweet.user.screen_name not in neverBury:
api.destroy_friendship(f)
print ""
info("Buried '%s' R.I.P. (Last: %s)" % (
lastTweet.user.screen_name, unicode(lastTweet.created_at)))
numUnfollowed += 1
else:
data['lastTweet'][f] = lastTweet.created_at
data.sync()
if numUnfollowed == numberToUnfollow:
break
sys.stdout.write('.')
x += 1
if x % 100 == 0:
sys.stdout.write("[" + str(x) + "]")
sys.stdout.flush()
if delay > 0:
time.sleep(float(delay))
except tweepy.error.TweepError, e:
print ""
if e.message[0]['message'] == u'Rate limit exceeded':
info("Rate limit exceeded")
else:
print traceback.format_exc()
raise e
print ""
update(api, data)
elif command == "requite":
# Check params
if len(argv) < 3:
error("Missing params daysSinceFollowed or numberToUnfollow")
if not isInt(argv[1]):
error("daysSinceFollowed is not an integer")
daysSinceFollowed = int(argv[1])
if not isInt(argv[2]):
error("numberToUnfollow is not an integer")
numberToUnfollow = int(argv[2])
delay = 0
if len(argv) >= 4 and isInt(argv[3]):
delay = argv[3]
# death date is the cut off. if they haven't tweeted since then, bury them
cutoffDate = datetime.now() - timedelta(days=daysSinceFollowed)
# Check the wasFollowingOn cache, if their last tweet isn't after the cutoffDate don't bother checking against twitter
last = data['wasFollowingOn']
lastKeys = last.keys()
followedOn = data['followedOn']
followedOnKeys = followedOn.keys()
toScan = set()
for f in data['following']:
if f in lastKeys:
if last[f] < cutoffDate:
toScan.add(f)
# else don't bother checking
elif f in followedOnKeys:
if followedOn[f] < cutoffDate:
toScan.add(f)
else:
# doesn't have a followedOn date, so check
data['followedOn'][f] = datetime.now()
data.sync()
toScan.add(f)
print "Requiting %d tweeps. %d IDs to scan" % (numberToUnfollow, len(toScan))
x = 0
numUnfollowed = 0
me = api.me()
try:
for f in toScan:
try:
user = api.get_user(f)
except tweepy.error.TweepError, e:
if isinstance(e.message, collections.Iterable):
if e.message[0]['message'] == u'User not found.':
info("User not found, skipping...")
else:
print traceback.format_exc()
raise e
ref = api.show_friendship(source_id=f, target_id=me.id)
if ref[0].following:
# User follows me
data['wasFollowingOn'][f] = datetime.now()
data.sync()
else:
# User not following me
user = api.get_user(f)
if user.screen_name not in neverBury:
api.destroy_friendship(f)
print ""
info("Requited '%s' (Followed On: %s)" % (user.screen_name, unicode(data['followedOn'][f])))
numUnfollowed += 1
# else still has time to follow
if numUnfollowed == numberToUnfollow:
break
sys.stdout.write('.')
x += 1
if x % 100 == 0:
sys.stdout.write("[" + str(x) + "]")
sys.stdout.flush()
if delay > 0:
time.sleep(float(delay))
except tweepy.error.TweepError, e:
print ""
pp.pprint(e)
if isinstance(e.message, collections.Iterable):
if e.message[0]['message'] == u'Rate limit exceeded':
info("Rate limit exceeded")
else:
print traceback.format_exc()
raise e
else:
print traceback.format_exc()
raise e
print ""
update(api, data)
elif command == "shotgun":
if len(argv) != 3:
error("Missing params shotgun user or numTweeps")
shotgunUser = argv[1]
if not isInt(argv[2]):
error("numTweeps is not an integer")
numTweeps = int(argv[2])
info("Shotgunning '%s' for %d followers" % (shotgunUser, numTweeps))
c = 0
x = 0
try:
for f in tweepy.Cursor(api.followers, shotgunUser).items():
x += 1
id = f.id
if id in data['wasFollowing']:
info("%d '%s' following or was following." % (x, f.screen_name))
elif id in data['wasFollowedBy']:
info("%d '%s' followed by or was followed." % (x, f.screen_name))
elif f.protected:
info("%d '%s' is protected." % (x, f.screen_name))
elif f.followers_count <= shotgunTargetMinFollowers:
info("%d '%s' not enough followers." % (x, f.screen_name))
elif f.friends_count <= shotgunTargetMinFollowing:
info("%d '%s' not following enough." % (x, f.screen_name))
elif f.description == "":
info("%d '%s' empty description." % (x, f.screen_name))
elif f.statuses_count <= shotgunTargetMinTweets:
info("%d '%s' not enough tweets." % (x, f.screen_name))
elif f.screen_name == username:
info("%d '%s' can't follow yourself!" % (x, f.screen_name))
else:
try:
api.create_friendship(f.id)
c += 1
info("%d '%s' FOLLOWED(%d)." % (x, f.screen_name, c))
except tweepy.error.TweepError, e:
print ""
if e.message[0]['code'] == 162:
info("%d '%s' blocked you." % (x, f.screen_name))
api.destroy_friendship(f.id)
data['wasFollowing'].add(f.id)
else:
print traceback.format_exc()
raise e
time.sleep(3)
if (c == numTweeps):
break;
except tweepy.error.TweepError, e:
print ""
if e.message[0]['message'] == u'Rate limit exceeded':
info("Rate limit exceeded.")
else:
print traceback.format_exc()
raise e
update(api, data)
elif command == "copycat":
if len(argv) != 3:
error("Missing params copycat user or numTweeps")
copycatUser = argv[1]
if not isInt(argv[2]):
error("numTweeps is not an integer")
numTweeps = int(argv[2])
info("Copycatting '%s' for %d followers" % (copycatUser, numTweeps))
try:
copycat(api, data, copycatUser, numTweeps)
except tweepy.RateLimitError as err:
print ""
info("Rate limit exceeded")
except tweepy.error.TweepError, e:
print ""
print (e.api_code)
print traceback.format_exc()
raise e
update(api, data)
elif command == "copykids":
if len(argv) != 3:
error("Missing params numKids or numTweeps")
if not isInt(argv[1]):
error("numKids is not an integer")
numKids = int(argv[1])
if not isInt(argv[2]):
error("numTweeps is not an integer")
numTweeps = int(argv[2])
info("Copykidding %d follwers from each of your followers. %d followers total." % (numKids, numTweeps))
try:
c = 0
for f in tweepy.Cursor(api.followers).items():
info("********")
print("Copying %s's kids..." % (f.screen_name))
c += copycat(api, data, f.screen_name, numKids)
if (c >= numTweeps):
break;
except tweepy.RateLimitError as err:
print ""
info("Rate limit exceeded")
except tweepy.error.TweepError, e:
print ""
print (e.api_code)
print traceback.format_exc()
raise e
update(api, data)
elif command == "ignore":
if len(argv) != 2:
error("Missing params user")
user = api.get_user(argv[1])
api.destroy_friendship(user.id)
data['wasFollowing'].add(user.id)
print "'%s' ignored." % (user.screen_name)
elif command == "follow":
if len(argv) != 2:
error("Missing params user")
user = api.get_user(argv[1])
follow(api, data, user)
if (user.id in data['wasFollowing']):
data['wasFollowing'].remove(user.id)
print "'%s' FOLLOWED." % (user.screen_name)
elif command == "unfollow":
if len(argv) != 2:
error("Missing param fileName")
with io.open(argv[1], 'r', encoding='utf8') as f:
for line in f:
s = line.split("|",3)
if s[0] == 'x':
api.destroy_friendship(s[1])
print "Unfollowed", s[2]
elif command == "unfollowers":
if len(argv) != 2:
error("Missing param fileName")
old = []
ids = set()
try:
with io.open(argv[1], 'r', encoding='utf8') as f:
for line in f:
s = line.split("|",3)
old.append(s)
ids.add(int(s[1]))
except:
pass
print "Creating a list of unfollowers to %s" % argv[1]
me = api.me()
c = 0
with io.open(argv[1], 'a', encoding='utf8') as f:
for id in api.friends_ids():
print [id], id in ids
if id not in ids:
ref = api.show_friendship(source_id=id, target_id=me.id)
if not ref[0].following:
# User doesn't follow me
user = api.get_user(id)
desc = user.description.replace("\n",'').replace("\r",'')
try:
if user.url:
req = urllib2.urlopen(user.url)
url = req.url
else:
url = ""
except:
url = ""
f.write("|%s|%s|%s|%s|%s\n" % (id, user.screen_name, user.name, desc, url))
f.flush()
time.sleep(3)
c += 1
sys.stdout.write('.')
if c % 100 == 0:
sys.stdout.write("[" + str(c) + "]")
sys.stdout.flush()
else:
error("Unknown command '%s'" % command)
#print api.me().name
rate = api.rate_limit_status()
#pp.pprint(rate)
print ""
data.close()
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-2.0 | 3,758,599,949,243,339,000 | 35.859964 | 132 | 0.509814 | false |
3dfxsoftware/cbss-addons | invoice_report_per_journal/report/invoice_report_demo.py | 1 | 2131 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: Luis Torres ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp import pooler
from openerp.tools.translate import _
from openerp import tools
from openerp import tests
from openerp.osv import osv
from openerp import netsvc
import openerp
from report_webkit import webkit_report
import datetime
class invoice_report_demo_html(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(invoice_report_demo_html, self).__init__(
cr, uid, name, context=context)
self.localcontext.update({
})
webkit_report.WebKitParser('report.invoice.report.demo.webkit',
'account.invoice',
'addons/invoice_report_per_journal/report/invoice_report_demo.mako',
parser=invoice_report_demo_html)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 | 633,159,658,367,753,600 | 39.980769 | 80 | 0.604411 | false |
dataplumber/edge | src/main/python/libraries/edge/dateutility.py | 2 | 1665 | from datetime import date, datetime, timedelta
import dateutil.parser
import calendar
"""
Utility class for date and time conversion.
"""
class DateUtility(object):
RFC_822_GMT_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
@staticmethod
def convertTimeLongToIso(time):
isoTime = ''
try:
isoTime = datetime.utcfromtimestamp(float(time) / 1000).isoformat() + 'Z'
except ValueError:
pass
return isoTime
@staticmethod
def convertISOToUTCTimestamp(isoTime):
try:
#parse ISO date to datetime object
dt = dateutil.parser.parse(isoTime)
#return timestamp in milliseconds
return calendar.timegm(dt.utctimetuple()) * 1000
except:
return None
@staticmethod
def pastDateRFC822(hoursAgo):
return (datetime.utcnow() - timedelta(hours=hoursAgo)).strftime(DateUtility.RFC_822_GMT_FORMAT)
@staticmethod
def convertTimeLongToRFC822(time):
return DateUtility.convertTimeLong(time, DateUtility.RFC_822_GMT_FORMAT)
@staticmethod
def convertTimeLong(time, format):
strTime = ''
try:
strTime = datetime.utcfromtimestamp(float(time) / 1000).strftime(format)
except ValueError:
pass
return strTime
@staticmethod
def convertISOTime(isoTime, format):
try:
#parse ISO date to datetime object
dt = dateutil.parser.parse(isoTime)
#return timestamp in specified format
return dt.strftime(format)
except:
return None
| apache-2.0 | -214,491,296,849,177,860 | 28.210526 | 103 | 0.606006 | false |
MikeFair/www.gittip.com | gittip/csrf.py | 1 | 6543 | """Cross Site Request Forgery middleware, borrowed from Django.
See also:
https://github.com/django/django/blob/master/django/middleware/csrf.py
https://docs.djangoproject.com/en/dev/ref/contrib/csrf/
https://github.com/zetaweb/www.gittip.com/issues/88
"""
import rfc822
import re
import time
import urlparse
#from django.utils.cache import patch_vary_headers
cc_delim_re = re.compile(r'\s*,\s*')
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if 'Vary' in response.headers:
vary_headers = cc_delim_re.split(response.headers['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response.headers['Vary'] = ', '.join(vary_headers + additional_headers)
#from django.utils.http import same_origin
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
from aspen import Response
from crypto import constant_time_compare, get_random_string
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
TOKEN_LENGTH = 32
TIMEOUT = 60 * 60 * 24 * 7 * 52
def _get_new_csrf_key():
return get_random_string(TOKEN_LENGTH)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake
# of the post processing middleware.
if len(token) > TOKEN_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
def _is_secure(request):
import gittip
return gittip.canonical_scheme == 'https'
def _get_host(request):
"""Returns the HTTP host using the request headers.
"""
return request.headers.get('X-Forwarded-Host', request.headers['Host'])
def inbound(request):
"""Given a Request object, reject it if it's a forgery.
"""
try:
csrf_token = request.headers.cookie.get('csrf_token')
csrf_token = '' if csrf_token is None else csrf_token.value
csrf_token = _sanitize_token(csrf_token)
# Use same token next time
request.context['csrf_token'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's
# available to the view.
request.context['csrf_token'] = _get_new_csrf_key()
# Assume that anything not defined as 'safe' by RC2616 needs protection
if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if _is_secure(request):
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.headers.get('Referer')
if referer is None:
raise Response(403, REASON_NO_REFERER)
# Note that get_host() includes the port.
good_referer = 'https://%s/' % _get_host(request)
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
raise Response(403, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
raise Response(403, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.line.method == "POST":
request_csrf_token = request.body.get('csrf_token', '')
if request_csrf_token == "":
# Fall back to X-CSRF-TOKEN, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
raise Response(403, REASON_BAD_TOKEN)
def outbound(response):
csrf_token = response.request.context.get('csrf_token')
# If csrf_token is unset, then inbound was never called, probaby because
# another inbound hook short-circuited.
if csrf_token is None:
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.headers.cookie['csrf_token'] = csrf_token
cookie = response.headers.cookie['csrf_token']
# I am not setting domain, because it is supposed to default to what we
# want: the domain of the object requested.
#cookie['domain']
cookie['path'] = '/'
cookie['expires'] = rfc822.formatdate(time.time() + TIMEOUT)
#cookie['httponly'] = "Yes, please." Want js access for this.
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
| cc0-1.0 | -2,196,269,443,329,078,800 | 36.388571 | 81 | 0.643741 | false |
ksang/error-extractor | lib/markdown.py | 1 | 2193 | '''
MarkDown format generator
'''
class MarkDown:
'convert raw text to markdown syntax'
def __init__(self):
self.escape_table = {"\\": "\\\\", "`": "\`",
"*": "\*", "_": "\_",
"{": "\{", "}": "\}",
"[": "\[", "]": "\]",
"(": "\(", ")": "\)",
"#": "\#", "+": "\+",
"-": "\-", ".": "\.",
"|": "\|"
}
def __escape(self, data):
return "".join(self.escape_table.get(c,c) for c in data)
def __convert_lines(self, text='', prefix='', suffix='', olist=False):
if type(text) is str:
if olist:
return '1. ' + self.__escape(text)
else:
return prefix + self.__escape(text) + suffix
elif type(text) is list:
for idx, t in enumerate(text):
if olist:
nt = str(idx+1) + '. ' + self.__escape(t)
else:
nt = prefix + self.__escape(t) + suffix
text[idx] = nt
return text
return ''
def text(self, text):
return self.__convert_lines(text)
def error(self, text):
return self.__convert_lines(text)
def title(self, text):
return self.__convert_lines(text, '##')
def subtitle(self, text):
return self.__convert_lines(text, '###')
def ssubtitle(self, text):
return self.__convert_lines(text, '####')
def bold(self, text):
return self.__convert_lines(text, '**', '**')
def line_breaker(self, count=1):
if count > 1:
ret = []
for i in range(0,count):
ret.append("-------------")
return ret
return "-------------"
def reference(self, text):
return self.__convert_lines(text, '>')
def ordered_list(self, data):
return self.__convert_lines(data, olist=True)
def unordered_list(self, data):
return self.__convert_lines(data, '- ') | mit | 8,219,994,262,008,899,000 | 29.472222 | 74 | 0.401277 | false |
levilucio/SyVOLT | ECore_Copier_MM/transformation-Large/HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation.py | 1 | 5078 |
from core.himesis import Himesis
class HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation, self).__init__(name='HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation"""
self["GUID__"] = 5816395996192583717
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 8044970359314201378
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 1048396254969054700
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 4558494274367220420
self.vs[3]["associationType"] = """eAnnotations"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 2115966609539548178
self.vs[4]["associationType"] = """eAnnotations"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 1458168688512188010
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EOperation"""
self.vs[5]["mm__"] = """EOperation"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 3498868833057656827
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 1307123802579665829
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EAnnotation"""
self.vs[7]["mm__"] = """EAnnotation"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 5438034355437875093
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 347179529733664915
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EOperation"""
self.vs[9]["mm__"] = """EOperation"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 2062346932891848348
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 7369516320927345833
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EAnnotation"""
self.vs[11]["mm__"] = """EAnnotation"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 8754100728367131831
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 7003937250653372044
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 1875949330034786489
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 5523630539087955496
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 2583131276534883053
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 1181219036459105099
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 1530653583095677969
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 40237161015443598
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 7359435342082954621
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 6720296362885197874
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 7435363414672850123
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 206401628991295002
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 3235173079800635441
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 7728551407519580789
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 98859355129756548
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 6740085100061687672
| mit | 2,488,468,065,536,911,000 | 48.300971 | 298 | 0.522844 | false |
zrhans/pythonanywhere | pyscripts/ply_wrose.py | 1 | 1678 | """
DATA,Chuva,Chuva_min,Chuva_max,VVE,VVE_min,VVE_max,DVE,DVE_min,DVE_max,Temp.,Temp._min,Temp._max,Umidade,Umidade_min,Umidade_max,Rad.,Rad._min,Rad._max,Pres.Atm.,Pres.Atm._min,Pres.Atm._max,Temp.Int.,Temp.Int._min,Temp.Int._max,CH4,CH4_min,CH4_max,HCnM,HCnM_min,HCnM_max,HCT,HCT_min,HCT_max,SO2,SO2_min,SO2_max,O3,O3_min,O3_max,NO,NO_min,NO_max,NO2,NO2_min,NO2_max,NOx,NOx_min,NOx_max,CO,CO_min,CO_max,MP10,MP10_min,MP10_max,MPT,MPT_min,MPT_max,Fin,Fin_min,Fin_max,Vin,Vin_min,Vin_max,Vout,Vout_min,Vout_max
"""
import plotly.plotly as py # Every function in this module will communicate with an external plotly server
import plotly.graph_objs as go
import pandas as pd
DATAFILE = r'/home/zrhans/w3/bns/bns_2016-1.csv'
df = pd.read_csv(DATAFILE, parse_dates=True, sep=',', header=0, index_col='DATA')
x = df.DVE
y = df.VVE
#print(y)
# Definindo as series dedados
trace1 = go.Area(
r = y,#["2015-12-01","2015-12-01 01:00:00","2015-12-01 02:00:00","2015-12-01 03:00:00","2015-12-01 04:00:00","2015-12-01 05:00:00"],
t = x,#[74.73,76.59,76.5,79.03,77.89,81.9,],
name='Vento m/s',
marker=dict(
color='rgb(158,154,200)'
)
)
# Edit the layout
layout = go.Layout(
title='Distribuição da Velocidade do Vento no diagrama Laurel',
font = dict(size=16),
radialaxis=dict(
ticksuffix='m/s'
),
orientation=270
)
data = [trace1]
fig = go.Figure(data=data, layout=layout)
# Tracando o objeto
py.plot(
fig,
filename='hans/oi_wrose', # name of the file as saved in your plotly account
sharing='public'
) # 'public' | 'private' | 'secret': Learn more: https://plot.ly/python/privacy
| apache-2.0 | 3,732,984,452,064,682,000 | 32.52 | 507 | 0.665274 | false |
Travelport-Czech/apila | tasks/Lambda.py | 1 | 8808 | import zipfile
import tempfile
import shutil
import os
import os.path
import hashlib
import base64
import json
import logging
import subprocess
import re
import botocore
import tasks.name_constructor as name_constructor
import tasks.bototools as bototools
from tasks.Task import Task
class Lambda(Task):
"""Create a lambda function and upload the code from given folder"""
known_params = {
'name': 'function name',
'code': "path to the folder with function's source code",
'role': 'name of a role for the execution of the function',
'runtime': "name and a version of interpret for the execution i.e.: 'nodejs4.3'",
'handler': 'entrypoint to the function code',
'description': 'short description of the function',
'timeout': 'maximal time for the execution of the function',
'memory_size': 'amount of memory reserved for the execution of the function',
'publish': "I'm not sure, give always True ;-)",
'babelize': "flag if the source must be converted by babel (default True)",
'babelize_skip': "list of modules to be skipped by babel"
}
required_params = ('name', 'code', 'role', 'runtime', 'handler')
required_configs = ('user', 'branch')
task_name = 'lambda'
def __str__(self):
if self.name:
return self.name
else:
return 'Create a lambda function %s' % (self.params['description'] if 'description' in self.params else self.params['name'])
def get_files(self, path, rel_part):
out = []
for root, dirs, files in os.walk(os.path.join(path, rel_part)):
rel_root = root[len(path):].lstrip('/')
for filename in files:
out.append((os.path.join(root, filename), os.path.join(rel_root, filename)))
return sorted(out)
def create_zip(self, files):
zip_name = tempfile.mkstemp(suffix='.zip', prefix='lambda_')[1]
with zipfile.ZipFile(zip_name, 'w') as myzip:
for filedef in files:
os.utime(filedef[0], (946681200, 946681200)) # date '+%s' -d '2000-01-01'
myzip.write(filedef[0], filedef[1])
zip_data = open(zip_name, 'rb').read()
os.unlink(zip_name)
return zip_data
def run_npm_install(self, path):
cwd = os.getcwd()
os.chdir(path)
try:
npm_out = subprocess.check_output(['npm', 'install', '--production'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error(e.output)
raise e
finally:
os.chdir(cwd)
def babelize(self, base_path, clean_dir, babelized_dir):
cwd = os.getcwd()
if os.path.exists('../node_modules/.bin/babel'):
os.chdir('..')
if not os.path.exists('node_modules/.bin/babel'):
os.chdir(base_path)
preset_base = os.getcwd()
try:
babel_out = subprocess.check_output(' '.join(['node_modules/.bin/babel', '--no-babelrc --presets', os.path.join(preset_base, 'node_modules', 'babel-preset-es2015-node4'), '--copy-files', '--out-dir', babelized_dir, clean_dir]), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
logging.error('cwd: '+os.getcwd())
logging.error(e.output)
raise e
finally:
os.chdir(cwd)
def clean_packages(self, files, path_to_remove):
r_shasum = re.compile(r'"_shasum"[^,]+,')
for filename, rel in files:
if filename.endswith('package.json'):
with open(filename) as fin:
text = fin.read()
new_text = r_shasum.sub('', text.replace(path_to_remove, '/tmp'))
with open(filename, 'w') as fout:
fout.write(new_text)
def prepare_zipped_code(self, code_path, babelize):
excluded_mods = self.params['babelize_skip'] if 'babelize_skip' in self.params else set()
work_dir = tempfile.mkdtemp(prefix='lambda_')
clean_dir = os.path.join(work_dir, 'clean')
os.mkdir(clean_dir)
shutil.copytree(os.path.join(code_path, 'app'), os.path.join(clean_dir, 'app'))
shutil.copy(os.path.join(code_path, 'package.json'), os.path.join(clean_dir, 'package.json'))
self.run_npm_install(clean_dir)
if babelize:
babelized_dir = os.path.join(work_dir, 'babelized')
babelized_app_dir = os.path.join(babelized_dir, 'app')
babelized_mod_dir = os.path.join(babelized_dir, 'node_modules')
clean_mod_dir = os.path.join(clean_dir, 'node_modules')
os.mkdir(babelized_dir)
os.mkdir(babelized_app_dir)
os.mkdir(babelized_mod_dir)
self.babelize(code_path, os.path.join(clean_dir, 'app'), babelized_app_dir)
for module_name in os.listdir(clean_mod_dir):
src = os.path.join(clean_mod_dir, module_name)
dest = os.path.join(babelized_mod_dir, module_name)
if module_name in excluded_mods:
shutil.copytree(src, dest)
else:
os.mkdir(dest)
self.babelize(code_path, src, dest)
files = self.get_files(babelized_app_dir, '') + self.get_files(babelized_dir, 'node_modules')
else:
files = self.get_files(os.path.join(clean_dir, 'app'), '') + self.get_files(clean_dir, 'node_modules')
self.clean_packages(files, work_dir)
files_to_zip = [file_name for file_name in files if not file_name[0].endswith('.SAMPLE')]
zip_data = self.create_zip(files_to_zip)
shutil.rmtree(work_dir)
return zip_data
def run(self, clients, cache):
client = clients.get('lambda')
iam_client = clients.get('iam')
function_name = name_constructor.lambda_name(self.params['name'], self.config['user'], self.config['branch'])
role_arn = bototools.get_role_arn(iam_client, self.params['role'])
description = (self.params['description'] if 'description' in self.params else '') + self.get_version_description()
try:
zip_data = self.prepare_zipped_code(self.params['code'], True if 'babelize' not in self.params else self.params['babelize'])
except Exception as e:
logging.exception(str(e))
return (False, str(e))
if role_arn is None:
return (False, "Required role '%s' not found" % self.params['role'])
try:
function_conf = client.get_function_configuration(FunctionName=function_name)
except botocore.exceptions.ClientError:
return self.create(client, cache, function_name, role_arn, zip_data, description)
if role_arn == function_conf['Role'] and \
self.params['runtime'] == function_conf['Runtime'] and \
self.params['handler'] == function_conf['Handler'] and \
(description == function_conf['Description']) and \
('timeout' not in self.params or self.params['timeout'] == function_conf['Timeout']) and \
('memory_size' not in self.params or self.params['memory_size'] == function_conf['MemorySize']):
result = ''
else:
self.update(client, function_name, role_arn, description)
result = self.CHANGED
sha256_sumator = hashlib.sha256()
sha256_sumator.update(zip_data)
sha256_sum = sha256_sumator.digest()
sha256_sum_encoded = base64.b64encode(sha256_sum)
if sha256_sum_encoded != function_conf['CodeSha256']:
client.update_function_code(FunctionName=function_name, ZipFile=zip_data, Publish=self.params['publish'] if 'publish' in self.params else None)
result = self.CHANGED
cache.put('lambda', function_name, function_conf['FunctionArn'])
return (True, result)
def update(self, client, function_name, role_arn, description):
lambda_def = {
'FunctionName': function_name,
'Runtime': self.params['runtime'],
'Role': role_arn,
'Handler': self.params['handler']
}
lambda_def['Description'] = description
if 'timeout' in self.params:
lambda_def['Timeout'] = self.params['timeout']
if 'memory_size' in self.params:
lambda_def['MemorySize'] = self.params['memory_size']
client.update_function_configuration(**lambda_def)
def create(self, client, cache, function_name, role_arn, zip_data, description):
lambda_def = {
'FunctionName': function_name,
'Runtime': self.params['runtime'],
'Role': role_arn,
'Handler': self.params['handler'],
'Code': {'ZipFile': zip_data}
}
lambda_def['Description'] = description
if 'timeout' in self.params:
lambda_def['Timeout'] = self.params['timeout']
if 'memory_size' in self.params:
lambda_def['MemorySize'] = self.params['memory_size']
if 'publish' in self.params:
lambda_def['Publish'] = self.params['publish']
response = client.create_function(**lambda_def)
cache.put('lambda', function_name, response['FunctionArn'])
return (True, self.CREATED)
def get_version_description(self):
manifest_path = os.path.join(self.params['code'], 'package.json')
if os.path.exists(manifest_path):
manifest = json.load(open(manifest_path))
if 'version' in manifest:
return ' (v%s)' % manifest['version']
return ''
| mit | 2,048,214,956,815,787,300 | 41.346154 | 271 | 0.655654 | false |
kyuupichan/electrumx | docs/conf.py | 1 | 5054 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from electrumx import version
# -- Project information -----------------------------------------------------
project = 'ElectrumX'
copyright = '2016-2020, Neil Booth'
author = 'Neil Booth'
# The full version including branding
release = version
# The short X.Y version
version = version.split()[-1]
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'description': 'Lightweight Electrum Server in Python',
'github_user': 'kyuupichan',
'github_repo': 'electrumx',
'github_button': True,
'github_type': 'star',
'github_banner': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ElectrumXdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ElectrumX.tex', 'ElectrumX Documentation',
'Neil Booth', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'electrumx', 'ElectrumX Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ElectrumX', 'ElectrumX Documentation',
author, 'ElectrumX', 'One line description of project.',
'Miscellaneous'),
]
| mit | 5,676,769,521,636,768,000 | 29.263473 | 79 | 0.647804 | false |
gcallah/Indra | indraV1/models/markov_attempts/two_pop_markov_model.py | 1 | 1534 | """
You can clone this file and its companion two_pop_m_run.py
to easily get started on a new two pop markov model.
It also is a handy tool to have around for testing
new features added to the base system. The agents
don't move. They have 50% chance of changing color
from red to blue, or from blue to red.
"""
import indra.two_pop_markov as itpm
R = 0
B = 1
STATE_MAP = { R: "Red", B: "Blue" }
class TestFollower(itpm.Follower):
"""
An agent that prints its neighbors in preact
and also jumps to an empty cell: defaut behavior
from our ancestor.
Attributes:
state: Red or Blue ... whichever it is the agent
will appear to be this on the scatter plot.
ntype: node type
next_state: the next color the agent will be
"""
def __init__(self, name, goal):
super().__init__(name, goal)
self.state = R
self.ntype = STATE_MAP[R]
self.next_state = None
def postact(self):
"""
Set our type to next_state.
"""
if self.next_state is not None and self.next_state != self.state:
# print("Setting state to " + str(self.next_state))
self.set_state(self.next_state)
self.next_state = None
return self.pos
def set_state(self, new_state):
"""
Set agent's new type.
"""
old_type = self.ntype
self.state = new_state
self.ntype = STATE_MAP[new_state]
self.env.change_agent_type(self, old_type, self.ntype)
| gpl-3.0 | 7,503,514,805,912,917,000 | 26.890909 | 73 | 0.602999 | false |
imankulov/android2po | tests/test_commands.py | 3 | 6703 | """TOOD: We need to test the basic command functionality, ensuring that
at it's core, import, export and init are operative, create the files they
should create, skip the files they should skip when they should be skipped,
etc. In particular, we should test both the case of multiple XML input files
(strings.xml, arrays.xml), and the case of only single source.
"test_options" tests the commands in combination with specific options and
will thus ensure that commands run, but does not check that they do the
right thing.
"""
from nose.tools import assert_raises
from babel.messages import Catalog
from android2po.convert import StringArray
from .helpers import ProgramTest
class TestInit(ProgramTest):
def test_init_with_nondefault_strings(self):
p = self.setup_project()
p.write_xml(data={'s1': 'foo'})
p.write_xml(data={'s1': 'bar', 'de_only': 'no_default'}, lang='de')
assert 'de_only' in p.program('init', {'-v': ''})
class TestExport(ProgramTest):
def test_export_with_empty_master_xml(self):
"""[Regression] Test that export works fine if the master
resource is empty."""
p = self.setup_project(xml_langs=['de'])
p.write_xml(data="""<resources></resources>""", lang='de')
p.write_po(Catalog('de'))
assert not '[failed]' in p.program('export')
def test_export_with_non_existent_po(self):
p = self.setup_project()
p.write_xml(data="""<resources></resources>""", lang='de')
assert '[skipped]' in p.program('export', {'de': ''})
class TestImport(ProgramTest):
pass
class TestPlurals(ProgramTest):
"""Test plural support on the program level.
Low-level plural tests are in convert/
"""
def test_init(self):
"""Test that the init command generates the proper plural form."""
p = self.setup_project()
p.write_xml(data="""<resources></resources>""")
p.write_xml(data="""<resources></resources>""", lang='ja')
p.program('init')
catalog = p.get_po('ja.po')
assert catalog.num_plurals == 1
assert catalog.plural_expr == '(0)'
def test_export(self):
"""Test that the export command maintains the proper plural form,
and actually replaces an incorrect one."""
p = self.setup_project()
p.write_xml(data="""<resources></resources>""")
p.write_xml(data="""<resources></resources>""", lang='ja')
# Generate a catalog with different plural rules than we expect
catalog = Catalog('ja')
catalog._num_plurals, catalog._plural_expr = 2, '(n < 2)'
p.write_po(catalog)
# Export should override the info
assert 'Plural-Forms header' in p.program('export')
catalog = p.get_po('ja.po')
assert catalog.num_plurals == 1
assert catalog.plural_expr == '(0)'
class TestDealWithBrokenInput(ProgramTest):
"""Make sure we can handle broken input.
"""
def mkcatalog(locale='de'):
"""Helper that returns a gettext catalog with one message
already added.
Tests can add a broken message and then ensure that at least
the valid message still was processed.
"""
c = Catalog(locale='de')
c.add('valid_message', 'valid_value', context='valid_message')
return c
def runprogram(self, project, command, args={}, **kw):
"""Helper to run the given command in quiet mode. The warnings
we test for here should appear even there.
"""
args['--quiet'] = True
return project.program(command, args, **kw)
def test_nocontext(self):
"""Some strings in the .po file do not have a context set.
"""
p = self.setup_project()
c = self.mkcatalog()
c.add('s', 'v',) # no context!
p.write_po(c, 'de.po')
assert 'no context' in self.runprogram(p, 'import', expect=1)
assert len(p.get_xml('de')) == 1
def test_duplicate_aray_index(self):
"""An encoded array in the .po file has the same index twice.
"""
p = self.setup_project()
c = self.mkcatalog()
c.add('t1', 'v1', context='myarray:1')
c.add('t2', 'v2', context='myarray:1')
p.write_po(c, 'de.po')
assert 'Duplicate index' in self.runprogram(p, 'import', expect=1)
xml = p.get_xml('de')
assert len(xml) == 2
assert len(xml['myarray']) == 1
def test_invalid_xhtml(self):
"""XHTML in .po files may be invalid; a forgiving parser will be
used as a fallback.
"""
p = self.setup_project()
c = self.mkcatalog()
c.add('s', 'I am <b>bold', context='s')
p.write_po(c, 'de.po')
assert 'invalid XHTML' in self.runprogram(p, 'import')
assert p.get_xml('de')['s'].text == 'I am <b>bold</b>'
# XXX test_duplicate_context
def test_duplicate_resource_string(self):
"""A resource XML file could contain a string twice.
"""
p = self.setup_project()
p.write_xml(data="""<resources><string name="s1">foo</string><string name="s1">bar</string></resources>""")
assert 'Duplicate resource' in self.runprogram(p, 'init')
assert len(p.get_po('template.pot')) == 1
def test_empty_stringarray(self):
"""A warning is shown if a string array is empty.
"""
p = self.setup_project()
p.write_xml(data={'s1': StringArray([])})
assert 'is empty' in self.runprogram(p, 'init')
assert len(p.get_po('template.pot')) == 0
def test_type_mismatch(self):
"""A resource name is string-array in the reference file, but a
normal string in the translation.
"""
p = self.setup_project(xml_langs=['de'])
p.write_xml(data={'s1': StringArray(['value'])})
p.write_xml(data={'s1': 'value'}, lang='de')
assert 'string-array in the reference' in self.runprogram(p, 'init')
assert len(p.get_po('template.pot')) == 1
def test_invalid_resource_xml(self):
"""Resource xml files are so broken we can't parse them.
"""
# Invalid language resource
p = self.setup_project(xml_langs=['de'])
p.write_xml(data="""<resources><string name="s1"> ...""", lang='de')
assert 'Failed parsing' in self.runprogram(p, 'init', expect=1)
assert_raises(IOError, p.get_po, 'de.po')
# Invalid default resource
p = self.setup_project()
p.write_xml(data="""<resources><string name="s1"> ...""")
assert 'Failed parsing' in self.runprogram(p, 'init', expect=1)
assert_raises(IOError, p.get_po, 'template.pot')
| bsd-2-clause | 8,881,129,003,231,269,000 | 36.238889 | 115 | 0.603312 | false |
parksandwildlife/biosys | biosys/apps/main/tests/api/test_species_observation.py | 1 | 71256 | import datetime
import re
from os import path
import json
from django.contrib.gis.geos import Point
from django.core.urlresolvers import reverse
from django.utils import timezone, six
from openpyxl import load_workbook
from rest_framework import status
from main.models import Dataset, Record
from main.tests.api import helpers
from main.tests.test_data_package import clone
from main.utils_species import NoSpeciesFacade
class TestPermissions(helpers.BaseUserTestCase):
"""
Test Permissions
Get: authenticated
Update: admin, custodians
Create: admin, custodians
Delete: admin, custodians
"""
species_facade_class = NoSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
project = self.project_1
client = self.data_engineer_1_client
schema = self.schema_with_species_name()
self.ds_1 = self._create_dataset_with_schema(project, client, schema,
dataset_type=Dataset.TYPE_SPECIES_OBSERVATION)
self.record_1 = self._create_default_record()
def _create_default_record(self):
ds = self.ds_1
client = self.custodian_1_client
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
ds.record_queryset.delete()
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
return ds.record_queryset.first()
def test_get(self):
urls = [
reverse('api:record-list'),
reverse('api:record-detail', kwargs={'pk': self.record_1.pk})
]
access = {
"forbidden": [self.anonymous_client],
"allowed": [self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.admin_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.get(url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.get(url).status_code,
status.HTTP_200_OK
)
def test_create(self):
"""
Admin and custodians
:return:
"""
urls = [reverse('api:record-list')]
ds = self.ds_1
rec = self.record_1
data = {
"dataset": rec.dataset.pk,
"data": rec.data,
}
access = {
"forbidden": [self.anonymous_client, self.readonly_client, self.custodian_2_client],
"allowed": [self.admin_client, self.custodian_1_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.post(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
def test_bulk_create(self):
"""
Cannot create bulk with this end point
:return:
"""
urls = [reverse('api:record-list')]
rec = self.record_1
ds = self.ds_1
data = [
{
"dataset": rec.dataset.pk,
"data": rec.data
},
{
"dataset": rec.dataset.pk,
"data": rec.data
}
]
access = {
"forbidden": [self.anonymous_client, self.readonly_client, self.custodian_2_client,
self.admin_client, self.custodian_1_client],
"allowed": []
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.post(url, data, format='json').status_code,
[status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + len(data))
def test_update(self):
"""
admin + custodian of project for site 1
:return:
"""
rec = self.record_1
previous_data = clone(rec.data)
updated_data = clone(previous_data)
updated_data['Longitude'] = '118.78'
urls = [reverse('api:record-detail', kwargs={'pk': rec.pk})]
data = {
"data": updated_data,
}
access = {
"forbidden": [self.anonymous_client, self.readonly_client, self.custodian_2_client],
"allowed": [self.admin_client, self.custodian_1_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.patch(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
rec.data = previous_data
rec.save()
self.assertEqual(
client.patch(url, data, format='json').status_code,
status.HTTP_200_OK
)
rec.refresh_from_db()
self.assertEqual(rec.data, updated_data)
def test_delete(self):
"""
Currently admin + custodian
:return:
"""
rec = self.record_1
urls = [reverse('api:record-detail', kwargs={'pk': rec.pk})]
data = None
access = {
"forbidden": [self.anonymous_client, self.readonly_client, self.custodian_2_client],
"allowed": [self.admin_client, self.custodian_1_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.delete(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
rec.save()
count = Dataset.objects.count()
self.assertEqual(
client.delete(url, data, format='json').status_code,
status.HTTP_204_NO_CONTENT
)
self.assertTrue(Dataset.objects.count(), count - 1)
def test_options(self):
urls = [
reverse('api:record-list'),
reverse('api:record-detail', kwargs={'pk': 1})
]
access = {
"forbidden": [self.anonymous_client],
"allowed": [self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.admin_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.options(url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
# authenticated
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.options(url).status_code,
status.HTTP_200_OK
)
class TestDataValidation(helpers.BaseUserTestCase):
species_facade_class = NoSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
self.ds_1 = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema,
dataset_type=Dataset.TYPE_SPECIES_OBSERVATION)
def _create_default_record(self):
ds = self.ds_1
client = self.custodian_1_client
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
ds.record_queryset.delete()
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
return ds.record_queryset.first()
def test_create_one_happy_path(self):
"""
Test the create of one record
:return:
"""
ds = self.ds_1
client = self.custodian_1_client
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
ds.record_queryset.delete()
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
def test_empty_not_allowed(self):
ds = self.ds_1
client = self.custodian_1_client
payload = {
"dataset": ds.pk,
"data": {}
}
url = reverse('api:record-list')
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_create_column_not_in_schema(self):
"""
Test that if we introduce a column not in the schema it will not validate in strict mode
"""
ds = self.ds_1
client = self.custodian_1_client
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31',
'Extra Column': 'Extra Value'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = helpers.set_strict_mode(reverse('api:record-list'))
ds.record_queryset.delete()
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), 0)
def test_update_column_not_in_schema(self):
"""
Test that updating a record with column not in the schema it will not validate in strict mode
:return:
"""
ds = self.ds_1
client = self.custodian_1_client
record = self._create_default_record()
incorrect_data = clone(record.data)
incorrect_data['Extra Column'] = "Extra Value"
data = {
"dataset": record.dataset.pk,
"data": incorrect_data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
# set strict mode
url = helpers.set_strict_mode(url)
count = ds.record_queryset.count()
self.assertEqual(
client.put(url, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
self.assertEqual(
client.patch(url, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_date_error(self):
"""
An observation must have a date
:return:
"""
ds = self.ds_1
record = self._create_default_record()
date_column = ds.schema.observation_date_field.name
new_data = clone(record.data)
url_post = reverse('api:record-list')
url_update = reverse('api:record-detail', kwargs={'pk': record.pk})
valid_values = ['15/08/2008']
for value in valid_values:
new_data[date_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
invalid_values = [None, '', 'not a date']
for value in invalid_values:
new_data[date_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.put(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.patch(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_geometry_error(self):
"""
An observation must have a valid geometry
:return:
"""
ds = self.ds_1
record = self._create_default_record()
lat_column = ds.schema.latitude_field.name
new_data = clone(record.data)
url_post = reverse('api:record-list')
url_update = reverse('api:record-detail', kwargs={'pk': record.pk})
valid_values = [-34.125]
for value in valid_values:
new_data[lat_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
invalid_values = [None, '', 'not a valid latitude']
for value in invalid_values:
new_data[lat_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.put(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.patch(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_species_name(self):
ds = self.ds_1
record = self._create_default_record()
column = ds.schema.species_name_parser.species_name_field.name
new_data = clone(record.data)
url_post = reverse('api:record-list')
url_update = reverse('api:record-detail', kwargs={'pk': record.pk})
valid_values = ['Canis Lupus', 'chubby bat', 'anything']
for value in valid_values:
new_data[column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
invalid_values = [None, '', 125]
for value in invalid_values:
new_data[column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.put(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.patch(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
class TestDateTimeAndGeometryExtraction(helpers.BaseUserTestCase):
species_facade_class = NoSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
def test_create(self):
"""
Test that the date and geometry are extracted from the data
and saved in DB
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
# clear all records
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
expected_date = datetime.date(2018, 1, 31)
self.assertEqual(timezone.localtime(record.datetime).date(), expected_date)
geometry = record.geometry
self.assertIsInstance(geometry, Point)
self.assertEqual((115.75, -32.0), (geometry.x, geometry.y))
def test_update(self):
"""
Test that the date and geometry are extracted from the data
and saved in DB after a PATCH of the record data
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
# clear all records
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
# date and lat/lon
# change lat/lon
data = {
'Species Name': 'Chubby Bat',
'Latitude': 22.222,
'Longitude': 111.111,
'When': '2017-12-24'
}
payload = {
"data": data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.patch(url, data=payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
expected_date = datetime.date(2017, 12, 24)
self.assertEqual(timezone.localtime(record.datetime).date(), expected_date)
geometry = record.geometry
self.assertIsInstance(geometry, Point)
self.assertEqual((111.111, 22.222), (geometry.x, geometry.y))
class TestSpeciesNameExtraction(helpers.BaseUserTestCase):
species_facade_class = NoSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
def test_create(self):
"""
Test that the species name is extracted from the data and saved in DB even if the species is not valid
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
# clear all records
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
self.assertEqual(ds.record_queryset.first().species_name, 'Chubby Bat')
def test_update(self):
"""
Test that name extraction after a PUT method
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
self.assertEqual(record.species_name, 'Chubby Bat')
# update the species_name
data = {
'Species Name': ' Canis lupus ',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.put(url, payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
self.assertEqual(record.species_name, 'Canis lupus')
def test_patch(self):
"""
Test that name extraction after a PATCH method
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
self.assertEqual(record.species_name, 'Chubby Bat')
# update the species_name
data = {
'Species Name': 'Canis lupus ',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"data": data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.patch(url, payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
self.assertEqual(record.species_name, 'Canis lupus')
class TestNameIDFromSpeciesName(helpers.BaseUserTestCase):
"""
Test that we retrieve the name id from the species facade
"""
species_facade_class = helpers.LightSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
def test_create(self):
"""
Test that the name_id is retrieved from the species facade from the species_name
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
for species_name, name_id in list(helpers.LightSpeciesFacade().name_id_by_species_name().items())[:2]:
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
data['Species Name'] = species_name
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
self.assertEqual(ds.record_queryset.first().name_id, name_id)
def test_update(self):
"""
Test that the name_id is retrieved from the species facade from the species_name
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# create a record with a wrong species name. Should have name_id = -1
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
self.assertEqual(record.name_id, -1)
# update the species_name
data = {
'Species Name': 'Canis lupus',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
expected_name_id = 25454
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.put(url, payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
self.assertEqual(record.name_id, expected_name_id)
def test_patch(self):
"""
Same as above but wit a patch method instead of put
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# create a record with a wrong species name. Should have name_id = -1
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
self.assertEqual(record.name_id, -1)
# update the species_name
data = {
'Species Name': 'Canis lupus',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"data": data
}
expected_name_id = 25454
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.patch(url, payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
self.assertEqual(record.name_id, expected_name_id)
class TestExport(helpers.BaseUserTestCase):
def setUp(self):
super(TestExport, self).setUp()
rows = [
['When', 'Species Name', 'How Many', 'Latitude', 'Longitude', 'Comments'],
['2018-02-07', 'Canis lupus', 1, -32.0, 115.75, ''],
['2018-01-12', 'Chubby bat', 10, -32.0, 115.75, 'Awesome'],
['2018-02-02', 'Canis dingo', 2, -32.0, 115.75, 'Watch out kids'],
['2018-02-10', 'Unknown', 3, -32.0, 115.75, 'Canis?'],
]
self.ds_1 = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(self.ds_1.type, Dataset.TYPE_SPECIES_OBSERVATION)
def test_happy_path_no_filter(self):
client = self.custodian_1_client
dataset = self.ds_1
all_records = Record.objects.filter(dataset=dataset)
self.assertTrue(all_records.count() > 0)
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# check headers
self.assertEqual(resp.get('content-type'),
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
content_disposition = resp.get('content-disposition')
# should be something like:
# 'attachment; filename=DatasetName_YYYY_MM_DD-HHMMSS.xlsx
match = re.match('attachment; filename=(.+)', content_disposition)
self.assertIsNotNone(match)
filename, ext = path.splitext(match.group(1))
self.assertEqual(ext, '.xlsx')
filename.startswith(dataset.name)
# read content
wb = load_workbook(six.BytesIO(resp.content), read_only=True)
# one datasheet named from dataset
sheet_names = wb.sheetnames
self.assertEqual(1, len(sheet_names))
self.assertEqual(dataset.name, sheet_names[0])
ws = wb[dataset.name]
rows = list(ws.rows)
expected_records = Record.objects.filter(dataset=dataset)
self.assertEqual(len(rows), expected_records.count() + 1)
headers = [c.value for c in rows[0]]
schema = dataset.schema
# all the columns of the schema should be in the excel
self.assertEqual(schema.headers, headers)
def test_permission_ok_for_not_custodian(self):
"""Export is a read action. Should be authorised for every logged-in user."""
client = self.custodian_2_client
dataset = self.ds_1
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_permission_denied_if_not_logged_in(self):
"""Must be logged-in."""
client = self.anonymous_client
dataset = self.ds_1
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
class TestSpeciesNameFromNameID(helpers.BaseUserTestCase):
"""
Use case:
The schema doesn't include a Species Name but just a Name Id column.
Test that using the upload (excel) or API the species name is collected from herbie and populated.
The test suite uses a mock herbie facade with a static species_name -> nameId dict
@see helpers.SOME_SPECIES_NAME_NAME_ID_MAP
"""
species_facade_class = helpers.LightSpeciesFacade
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
@staticmethod
def schema_with_name_id():
schema_fields = [
{
"name": "Name Id",
"type": "integer",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def test_species_name_collected_upload(self):
"""
Happy path: upload excel with a valid Name Id.
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# data
csv_data = [
['Name Id', 'When', 'Latitude', 'Longitude'],
[25454, '01/01/2017', -32.0, 115.75], # "Canis lupus"
['24204', '02/02/2017', -33.0, 116.0] # "Vespadelus douglasorum"
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, payload, format='multipart')
self.assertEqual(status.HTTP_200_OK, resp.status_code)
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), len(csv_data) - 1)
for r in records:
self.assertTrue(r.name_id > 0)
self.assertIsNotNone(r.species_name)
canis_lupus = records.filter(name_id=25454).first()
self.assertIsNotNone(canis_lupus)
self.assertEqual(canis_lupus.species_name, "Canis lupus")
vespadelus = records.filter(name_id=24204).first()
self.assertIsNotNone(vespadelus)
self.assertEqual(vespadelus.species_name, "Vespadelus douglasorum")
def test_species_name_collected_api_create(self):
"""
Same as above: testing that the species name is collected when using the API create
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 25454, # "Canis lupus"
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.name_id, 25454)
self.assertEqual(record.species_name, "Canis lupus")
def test_species_name_collected_api_update(self):
"""
Updating the Name Id should update the species name
:return:
"""
# create record
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 25454, # "Canis lupus"
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.name_id, 25454)
self.assertEqual(record.species_name, "Canis lupus")
# patch Name Id
new_name_id = 24204
record_data['Name Id'] = new_name_id
expected_species_name = 'Vespadelus douglasorum'
url = reverse('api:record-detail', kwargs={'pk': record.pk})
payload = {
'data': record_data
}
resp = client.patch(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertEqual(record.name_id, new_name_id)
self.assertEqual(record.species_name, expected_species_name)
def test_wrong_id_rejected_upload(self):
"""
If a wrong Name Id is provided the system assume its an error
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# data
csv_data = [
['Name Id', 'When', 'Latitude', 'Longitude'],
[99934, '01/01/2017', -32.0, 115.75], # wrong
['24204', '02/02/2017', -33.0, 116.0] # "Vespadelus douglasorum"
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, payload, format='multipart')
self.assertEqual(status.HTTP_400_BAD_REQUEST, resp.status_code)
records = Record.objects.filter(dataset=dataset)
# should be only one record (the good one)
self.assertEqual(records.count(), 1)
vespadelus = records.filter(name_id=24204).first()
self.assertIsNotNone(vespadelus)
self.assertEqual(vespadelus.species_name, "Vespadelus douglasorum")
def test_wrong_id_rejected_api_create(self):
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 9999, # wrong
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Record.objects.filter(dataset=dataset).count(), 0)
class TestSpeciesNameAndNameID(helpers.BaseUserTestCase):
"""
Use case:
The schema includes a Species Name and a Name Id column.
Test that the Name Id takes precedence
The test suite uses a mock herbie facade with a static species_name -> Name Id dict
@see helpers.SOME_SPECIES_NAME_NAME_ID_MAP
"""
species_facade_class = helpers.LightSpeciesFacade
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
@staticmethod
def schema_with_name_id_and_species_name():
schema_fields = [
{
"name": "Name Id",
"type": "integer",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "Species Name",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def test_species_name_collected_upload(self):
"""
Happy path: upload excel with a valid Name Id.
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id_and_species_name()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# data
csv_data = [
['Name Id', 'Species Name', 'When', 'Latitude', 'Longitude'],
[25454, 'Chubby Bat', '01/01/2017', -32.0, 115.75], # "Canis lupus"
['24204', 'French Frog', '02/02/2017', -33.0, 116.0] # "Vespadelus douglasorum"
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, payload, format='multipart')
self.assertEqual(status.HTTP_200_OK, resp.status_code)
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), len(csv_data) - 1)
for r in records:
self.assertTrue(r.name_id > 0)
self.assertIsNotNone(r.species_name)
canis_lupus = records.filter(name_id=25454).first()
self.assertIsNotNone(canis_lupus)
self.assertEqual(canis_lupus.species_name, "Canis lupus")
vespadelus = records.filter(name_id=24204).first()
self.assertIsNotNone(vespadelus)
self.assertEqual(vespadelus.species_name, "Vespadelus douglasorum")
def test_nameId_collected_upload(self):
"""
Test that if Name Id is not provided it is collected from the species list
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id_and_species_name()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# data
csv_data = [
['Name Id', 'Species Name', 'When', 'Latitude', 'Longitude'],
['', 'Canis lupus', '01/01/2017', -32.0, 115.75], # "Canis lupus"
['', 'Vespadelus douglasorum', '02/02/2017', -33.0, 116.0] # "Vespadelus douglasorum"
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, payload, format='multipart')
self.assertEqual(status.HTTP_200_OK, resp.status_code)
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), len(csv_data) - 1)
for r in records:
self.assertTrue(r.name_id > 0)
self.assertIsNotNone(r.species_name)
canis_lupus = records.filter(name_id=25454).first()
self.assertIsNotNone(canis_lupus)
self.assertEqual(canis_lupus.species_name, "Canis lupus")
vespadelus = records.filter(name_id=24204).first()
self.assertIsNotNone(vespadelus)
self.assertEqual(vespadelus.species_name, "Vespadelus douglasorum")
def test_species_name_collected_api_create(self):
"""
Same as above: testing that the species name is collected when using the API create
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id_and_species_name()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 25454, # "Canis lupus"
'Species Name': 'Chubby Bat',
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.name_id, 25454)
self.assertEqual(record.species_name, "Canis lupus")
def test_species_name_collected_api_update(self):
"""
Updating the Name Id should update the species name
:return:
"""
# create record
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id_and_species_name()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 25454, # "Canis lupus"
'Species Name': 'Chubby Bat',
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.name_id, 25454)
self.assertEqual(record.species_name, "Canis lupus")
# TODO: the species name in the data is not updated. Should we?
self.assertEqual(record.data.get('Species Name'), 'Chubby Bat')
# patch Name Id
new_name_id = 24204
record_data['Name Id'] = new_name_id
expected_species_name = 'Vespadelus douglasorum'
url = reverse('api:record-detail', kwargs={'pk': record.pk})
payload = {
'data': record_data
}
resp = client.patch(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertEqual(record.name_id, new_name_id)
self.assertEqual(record.species_name, expected_species_name)
class TestCompositeSpeciesName(helpers.BaseUserTestCase):
"""
Test for species name composed from Genus, Species, infra_rank, infra_name columns
"""
species_facade_class = helpers.LightSpeciesFacade
@staticmethod
def schema_with_4_columns_genus():
schema_fields = [
{
"name": "Genus",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Species",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "InfraSpecific Rank",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "InfraSpecific Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
@staticmethod
def schema_with_2_columns_genus():
schema_fields = [
{
"name": "Genus",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Species",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
@staticmethod
def schema_with_genus_and_species_name_no_required():
schema_fields = [
{
"name": "SpeciesName",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": "speciesName"
}
},
{
"name": "Genus",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": "genus"
}
},
{
"name": "Species",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": "species"
}
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
self.client = self.custodian_1_client
def assert_create_dataset(self, schema):
try:
return self._create_dataset_with_schema(
self.project_1,
self.data_engineer_1_client,
schema,
dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
except Exception as e:
self.fail('Species Observation dataset creation failed for schema {schema}'.format(
schema=schema
))
def test_genus_species_only_happy_path(self):
schema = self.schema_with_2_columns_genus()
dataset = self.assert_create_dataset(schema)
records = [
['Genus', 'Species', 'When', 'Latitude', 'Longitude'],
['Canis', 'lupus', '2018-01-25', -32.0, 115.75],
]
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
received = resp.json()
rec_id = received[0]['recordId']
record = Record.objects.filter(pk=rec_id).first()
self.assertEqual(record.species_name, 'Canis lupus')
self.assertEqual(record.name_id, 25454)
def test_genus_species_and_infra_specifics_happy_path(self):
schema = self.schema_with_4_columns_genus()
dataset = self.assert_create_dataset(schema)
records = [
['Genus', 'Species', 'InfraSpecific Rank', 'InfraSpecific Name', 'When', 'Latitude', 'Longitude'],
['Canis', 'lupus', 'subsp. familiaris ', ' rank naughty dog ', '2018-01-25', -32.0, 115.75],
]
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
received = resp.json()
rec_id = received[0]['recordId']
record = Record.objects.filter(pk=rec_id).first()
expected_species_name = 'Canis lupus subsp. familiaris rank naughty dog'
self.assertEqual(record.species_name, expected_species_name)
self.assertEqual(record.name_id, -1)
def test_validation_missing_species(self):
schema = self.schema_with_2_columns_genus()
dataset = self.assert_create_dataset(schema)
data = {
'Genus': "Canis",
'When': '2018-01-25',
'Latitude': -32.0,
'Longitude': 115.75
}
url = helpers.url_post_record_strict()
payload = {
'dataset': dataset.pk,
'data': data
}
resp = self.client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
received_json = resp.json()
# should contain one error on the 'data' for the species field
self.assertIn('data', received_json)
errors = received_json.get('data')
self.assertIsInstance(errors, list)
self.assertEqual(len(errors), 1)
error = errors[0]
# should be "Species::msg"
pattern = re.compile(r"^Species::(.+)$")
self.assertTrue(pattern.match(error))
def test_genus_required_error(self):
"""
If genus is set to be required and not provided it should not throw an exception
but return a 400 with a field error message
see https://decbugs.com/view.php?id=6907 for details
"""
schema = self.schema_with_2_columns_genus()
dataset = self.assert_create_dataset(schema)
# Genus is required
self.assertTrue(dataset.schema.get_field_by_name('Genus').required)
# provides 3 records with no Genus (row=2,3,4)
records = [
['Genus', 'Species', 'When', 'Latitude', 'Longitude'],
[None, 'lupus', '2018-01-25', -32.0, 115.75],
['', 'lupus', '2018-01-25', -32.0, 115.75],
[' ', 'lupus', '2018-01-25', -32.0, 115.75]
]
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk, strict=False)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
received = resp.json()
# expected: array of report by row
self.assertIsInstance(received, list)
self.assertEqual(len(received), 3)
# this what an report should look like
expected_row_report = {
'row': 3,
'errors': {'Genus': 'Field "Genus" has constraint "required" which is not satisfied for value "None"'},
'warnings': {}}
for row_report in received:
self.assertIn('errors', row_report)
errors = row_report.get('errors')
self.assertIn('Genus', errors)
msg = errors.get('Genus')
self.assertEqual(msg, expected_row_report['errors']['Genus'])
def test_species_required_error(self):
"""
If species (with genus) is set to be required and not provided it should not throw an exception
but return a 400 with a field error message
see https://decbugs.com/view.php?id=6907 for details
"""
schema = self.schema_with_2_columns_genus()
dataset = self.assert_create_dataset(schema)
# Genus is required
self.assertTrue(dataset.schema.get_field_by_name('Genus').required)
# provides 3 records with no Species (row=2,3,4)
records = [
['Genus', 'Species', 'When', 'Latitude', 'Longitude'],
['Canis', '', '2018-01-25', -32.0, 115.75],
['Canis', None, '2018-01-25', -32.0, 115.75],
['Canis', ' ', '2018-01-25', -32.0, 115.75]
]
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk, strict=False)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
received = resp.json()
# expected: array of report by row
self.assertIsInstance(received, list)
self.assertEqual(len(received), 3)
# this what an report should look like
expected_row_report = {
'row': 3,
'errors': {'Species': 'Field "Species" has constraint "required" which is not satisfied for value "None"'},
'warnings': {}}
for row_report in received:
self.assertIn('errors', row_report)
errors = row_report.get('errors')
self.assertIn('Species', errors)
msg = errors.get('Species')
self.assertEqual(msg, expected_row_report['errors']['Species'])
def test_species_name_and_genus_requirement(self):
"""
If the schema has speciesName and genus/species we should not impose any requirement
User should be able to choose one or the other way to enter a species.
"""
schema = self.schema_with_genus_and_species_name_no_required()
self.assert_create_dataset(schema)
def test_species_name_tag_precedence(self):
"""
if the schema has Species Name and genus/species and the the Species Name column is biosys tagged as type
speciesName it then has precedence over genus/species.
@see https://youtrack.gaiaresources.com.au/youtrack/issue/BIOSYS-305
Given I have a species observation dataset with fields |Genus|Species|Species Name|
And the Species Name field is tagged with the Biosys type 'SpeciesName'
And Genus and Species fields have no Biosys type
When I enter |Pteropyus|vampyrus|Canis lupus|
Then the species extracted should be Canis lupus and not Pteropyus vampyrus
"""
schema = self.schema_with_genus_and_species_name_no_required()
# remove biosys tag for Genus and Species
for field in schema['fields']:
if field['name'] in ['Genus', 'Species']:
del field['biosys']
dataset = self.assert_create_dataset(schema)
records = [
['Genus', 'Species', 'SpeciesName', 'When', 'Latitude', 'Longitude'],
['Pteropyus', 'vampyrus', 'Canis lupus', '2018-01-25', -32.0, 115.75],
]
expected_species_name = 'Canis lupus'
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk, strict=False)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
received = resp.json()
rec_id = received[0]['recordId']
record = Record.objects.filter(pk=rec_id).first()
self.assertEqual(record.species_name, expected_species_name)
class TestPatch(helpers.BaseUserTestCase):
def test_patch_validated(self):
"""
Test that we can patch just the 'validated' flag
:return:
"""
rows = [
['Species Name', 'When', 'Latitude', 'Longitude', 'Comments'],
['Chubby bat', '2018-06-01', -32, 115.75, 'It is huge!']
]
dataset = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(dataset.type, Dataset.TYPE_SPECIES_OBSERVATION)
records = dataset.record_set.all()
record = records.last()
self.assertIsNotNone(record)
self.assertFalse(record.validated)
previous_data = json.dumps(record.data)
# patch
url = reverse('api:record-detail', kwargs={"pk": record.pk})
client = self.custodian_1_client
payload = {
'validated': True
}
resp = client.patch(url, payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertTrue(record.validated)
self.assertTrue(json.dumps(record.data), previous_data)
def test_patch_locked(self):
"""
Test that we can patch just the 'locked' flag
:return:
"""
rows = [
['Species Name', 'When', 'Latitude', 'Longitude', 'Comments'],
['Chubby bat', '2018-06-01', -32, 115.75, 'It is huge!']
]
dataset = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(dataset.type, Dataset.TYPE_SPECIES_OBSERVATION)
records = dataset.record_set.all()
record = records.last()
self.assertIsNotNone(record)
self.assertFalse(record.locked)
previous_data = json.dumps(record.data)
# patch
url = reverse('api:record-detail', kwargs={"pk": record.pk})
client = self.custodian_1_client
payload = {
'locked': True
}
resp = client.patch(url, payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertTrue(record.locked)
self.assertTrue(json.dumps(record.data), previous_data)
| apache-2.0 | -7,408,917,784,714,279,000 | 35.635476 | 119 | 0.53705 | false |
perchrn/TaktPlayer | src/configuration/ConfigurationHolder.py | 1 | 24848 | '''
Created on 11. jan. 2012
@author: pcn
'''
from xml.etree.ElementTree import Element, SubElement
from xml.etree import ElementTree
from BeautifulSoup import BeautifulStoneSoup
import os
import random
import sys
def getDefaultDirectories():
taktPackageConfigDir = os.path.join(os.getcwd(), "config")
if(sys.platform == "win32"):
appDataDir = os.getenv('APPDATA')
taktConfigDefaultDir = os.path.join(appDataDir, "TaktPlayer")
elif(sys.platform == "darwin"):
appDataDir = os.path.join(os.getenv('USERPROFILE') or os.getenv('HOME'), "Library")
taktConfigDefaultDir = os.path.join(appDataDir, "TaktPlayer")
else:
appDataDir = os.getenv('USERPROFILE') or os.getenv('HOME')
taktConfigDefaultDir = os.path.join(appDataDir, ".TaktPlayer")
if(os.path.isdir(appDataDir) == True):
if(os.path.isdir(taktConfigDefaultDir) == False):
os.makedirs(taktConfigDefaultDir)
if(os.path.isdir(taktConfigDefaultDir) == False):
taktConfigDefaultDir = taktPackageConfigDir
taktVideoDefaultDir = os.path.join(os.getcwd(), "testVideo")
else:
taktVideoDefaultDir = os.path.join(taktConfigDefaultDir, "Video")
if(os.path.isdir(taktVideoDefaultDir) == False):
os.makedirs(taktVideoDefaultDir)
if(os.path.isdir(taktVideoDefaultDir) == False):
taktVideoDefaultDir = os.path.join(os.getcwd(), "testVideo")
else:
taktConfigDefaultDir = taktPackageConfigDir
taktVideoDefaultDir = os.path.join(os.getcwd(), "testVideo")
print "*" * 100
print "DEBUG pcn: appDataDir: " + str(appDataDir)
print "DEBUG pcn: taktConfigDefaultDir: " + str(taktConfigDefaultDir)
print "DEBUG pcn: taktVideoDefaultDir: " + str(taktVideoDefaultDir)
print "*" * 100
return taktConfigDefaultDir, taktVideoDefaultDir
def xmlToPrettyString(xml):
xmlString = ElementTree.tostring(xml, encoding="utf-8", method="xml")
soup = BeautifulStoneSoup(xmlString)#, selfClosingTags=['global'])
return soup.prettify()
class ParameterTypes():
(Bool, Float, Int, Text) = range(4)
class Parameter(object):
def __init__(self, name, default, value, paramType):
self._name = name
self._default = default
self._value = value
self._type = paramType
def getName(self):
return self._name
def getValue(self):
return self._value
def getType(self):
return self._type
def setValue(self, value):
self._value = value
def resetToDefault(self):
if(self._default == None):
print "ERROR! Cannot reset this value! No default configured. " + self._name
else:
self._value = self._default
def setString(self, string):
if(self._type == ParameterTypes.Bool):
if((string.lower() == "true") or (string.lower() == "yes")):
self._value = True
else:
self._value = False
elif(self._type == ParameterTypes.Float):
try:
self._value = float(string)
except:
if(self._default != None):
self._value = self._default
else:
self._value = -1.0
elif(self._type == ParameterTypes.Int):
try:
self._value = int(string)
except:
if(self._default != None):
self._value = self._default
else:
self._value = -1
elif(self._type == ParameterTypes.Text):
self._value = string
else:
self._value = string
def setDefaultValue(self, defaultValue):
self._default = defaultValue
class ConfigurationHolder(object):
def __init__(self, name, parent = None, uniqueName = None, uniqueId = None):
self._name = name
self._parent = parent
self._parameters = []
self._children = []
self._uniqueName = uniqueName
self._uniqueId = uniqueId
self._loadedXML = None
self._loadedFileName = ""
self._unsavedConfig = False
self._configIsUpdated = True
self._oldConfigId = -1
self._configId = -1
self._updateId()
self._selfClosingList = None
def setSelfclosingTags(self, tagList):
self._selfClosingList = tagList
def _updateId(self):
newId = self._configId
while(newId == self._configId):
newId = random.randint(1, 999999)
self._configId = newId
def getConfigId(self):
if(self._parent != None):
return self._parent.getConfigId()
else:
if(self._oldConfigId != self._configId):
self._oldConfigId = self._configId
if(self._loadedXML != None):
#DEBUG pcn:
import difflib
loadedXml = self._xmlToString(self._loadedXML)
generatedXML = self.getConfigurationXMLString()
if(generatedXML != loadedXml):
print "L"*120
print loadedXml
print "G"*120
print generatedXML
print "="*120
for line in difflib.context_diff(a=loadedXml, b=generatedXML):
print line
print "="*120
print "DEBUG pcn: generatedXML != loadedXML"
return self._configId
def addXml(self, xmlPart):
self._loadedXML = xmlPart
def loadConfig(self, configName):
print "Loading config: " + configName
if(self._unsavedConfig == True):
self.saveConfigFile(self._loadedFileName + ".bak")
if(os.path.isabs(configName) == True):
filePath = os.path.normpath(configName)
else:
filePath = os.path.normpath(os.path.join(os.getcwd(), "config", configName))
self._loadedFileName = filePath
if(os.path.isfile(filePath) == False):
print "********** Error loading configuration: \"%s\" **********" %(filePath)
filePath = os.path.normpath(os.path.join(os.getcwd(), "config", os.path.basename(configName)))
print "********** Trying package configuration: \"%s\" **********" %(filePath)
if(os.path.isfile(filePath) == False):
print "********** Error loading configuration: \"%s\" **********" %(filePath)
print "********** Keeping last configuration. **********"
return
try:
print "DEBUG pcn: loadConfig: " + filePath
loadFile = open(filePath, 'r')
xmlString = loadFile.read()
if(self._selfClosingList != None):
soup = BeautifulStoneSoup(xmlString, selfClosingTags=self._selfClosingList)
else:
soup = BeautifulStoneSoup(xmlString)
self._loadedXML = ElementTree.XML(soup.prettify())
self._updateFromXml(self._loadedXML)
self._unsavedConfig = False
except:
print "********** Error loading configuration: \"%s\" **********" %(filePath)
raise
def saveConfigFile(self, configName):
if(configName.endswith(".cfg.autosave") == False):
if(configName.endswith(".cfg.bak") == False):
if(configName.endswith(".cfg") == False):
configName = configName + ".cfg"
if(os.path.isabs(configName) == True):
filePath = os.path.normpath(configName)
else:
filePath = os.path.normpath(os.path.join(os.getcwd(), "config", configName))
try:
saveFile = open(filePath, 'w')
xmlString = self.getConfigurationXMLString()
saveFile.write(xmlString)
if((configName.endswith(".cfg.autosave") == False) and (configName.endswith(".cfg.bak") == False)):
self._loadedFileName = filePath
self._unsavedConfig = False
saveFile.close()
except:
print "********** Error saving configuration: \"%s\" **********" %(filePath)
raise
def newConfigFileName(self, configName):
if(configName.endswith(".cfg.bak") == False):
if(configName.endswith(".cfg") == False):
configName = configName + ".cfg"
if(os.path.isabs(configName) == True):
filePath = os.path.normpath(configName)
else:
filePath = os.path.normpath(os.path.join(os.getcwd(), "config", configName))
self._loadedFileName = filePath
self._unsavedConfig = True
def getConfigFileList(self, configDir):
packageConfigDir = os.path.normpath(os.path.join(os.getcwd(), "config"))
if((configDir != "") and (os.path.isabs(configDir) == True)):
dirPath = os.path.normpath(configDir)
else:
dirPath = packageConfigDir
fileListList = []
fileListString = ""
fileList = os.listdir(dirPath)
for aFile in fileList:
if(aFile.endswith(".cfg")):
if((aFile != "PlayerConfig.cfg") and(aFile != "GuiConfig.cfg")):
if(fileListString != ""):
fileListString += ";"
fileListString += aFile
fileListList.append(aFile)
firstExtraFile = True
if(dirPath != packageConfigDir):
fileList = os.listdir(packageConfigDir)
for aFile in fileList:
if(aFile.endswith(".cfg")):
if((aFile != "PlayerConfig.cfg") and(aFile != "GuiConfig.cfg")):
if(fileListList.count(aFile) == 0):
if(firstExtraFile == True):
fileListString += ";-------------------------"
firstExtraFile = False
if(fileListString != ""):
fileListString += ";"
fileListString += aFile
return fileListString
def getCurrentFileName(self):
return os.path.basename(self._loadedFileName)
def isConfigNotSaved(self):
return self._unsavedConfig
def setFromXmlString(self, xmlString):
# print "l"*120
# print xmlString
# print "l"*120
if(self._selfClosingList != None):
soup = BeautifulStoneSoup(xmlString, selfClosingTags=self._selfClosingList)
else:
soup = BeautifulStoneSoup(xmlString)
self._loadedXML = ElementTree.XML(soup.prettify())
if(self._unsavedConfig == True):
if(self._loadedFileName != ""):
self.saveConfigFile(self._loadedFileName + ".autosave")
self._updateFromXml(self._loadedXML)
self._unsavedConfig = True
def setFromXml(self, xmlConfig):
self._loadedXML = xmlConfig
if(self._unsavedConfig == True):
if(self._loadedFileName != ""):
self.saveConfigFile(self._loadedFileName + ".autosave")
self._updateFromXml(self._loadedXML)
self._unsavedConfig = True
def _updateParamsFromXml(self):
for param in self._parameters:
oldVal = param.getValue()
if(self._loadedXML != None):
xmlValue = self._loadedXML.get(param.getName().lower())
else:
xmlValue = None
if(xmlValue == None):
# print "defaulting " + param.getName().lower()
param.resetToDefault()
else:
# print "update: " + param.getName() + " val: " + xmlValue
param.setString(xmlValue)
if(oldVal != param.getValue()):
self._configIsUpdated = True
def _updateFromXml(self, xmlPart):
if(self._parent != None):
self._loadedXML = xmlPart
self._updateParamsFromXml()
for child in self._children:
childName = child.getName()
childUniqueId = child.getUniqueParameterName()
if(childUniqueId != None):
childUniqueValue = str(child._findParameter(childUniqueId).getValue())
if(xmlPart != None):
childXmlPart = self._findXmlChild(xmlPart, childName, childUniqueId, childUniqueValue)
else:
childXmlPart = None
else:
if(xmlPart != None):
childXmlPart = self._findXmlChild(xmlPart, childName)
else:
childXmlPart = None
child._updateFromXml(childXmlPart)
self._configIsUpdated = True
def getName(self):
return self._name
def getUniqueParameterName(self):
return self._uniqueName
def _findParameter(self, name):
for param in self._parameters:
if(param.getName() == name):
return param
return None
def _addParameter(self, name, defaultValue, value, paramType):
# print "DEBUG pcn: addParameter inId: " + str(self._uniqueId) + " name: " + name + " value: " + str(value)
foundParam = self._findParameter(name)
if(foundParam != None):
# xmlValue = None
# if(self._loadedXML != None):
# xmlValue = self._getValueFromXml(self._loadedXML, name)
if(foundParam.getType() == paramType):
self._configIsUpdated = True
foundParam.setDefaultValue(defaultValue)
# if(xmlValue == None):
# print "Warning! Same parameter with same type added! Updating default value."
else:
print "Error! Same parameter with different type cannot be added!"
else:
self._configIsUpdated = True
newParam = Parameter(name, defaultValue, value, paramType)
self._parameters.append(newParam)
if(self._loadedXML != None):
self._getValueFromXml(self._loadedXML, name)
def addBoolParameter(self, name, defaultValue):
self._addParameter(name, defaultValue, defaultValue, ParameterTypes.Bool)
def addFloatParameter(self, name, defaultValue):
self._addParameter(name, defaultValue, defaultValue, ParameterTypes.Float)
def addIntParameter(self, name, defaultValue):
self._addParameter(name, defaultValue, defaultValue, ParameterTypes.Int)
def addTextParameter(self, name, defaultValue):
self._addParameter(name, defaultValue, defaultValue, ParameterTypes.Text)
def addTextParameterStatic(self, name, value):
self._addParameter(name, None, value, ParameterTypes.Text)
def getValue(self, name):
foundParameter = self._findParameter(name)
if(foundParameter != None):
return foundParameter.getValue()
else:
print "Error! Could not find parameter \"" + name + "\""
return None
def removeParameter(self, name):
# print "DEBUG pcn: removeParameter inId: " + str(self._uniqueId) + " name: " + name
foundParameter = self._findParameter(name)
if(foundParameter != None):
self._parameters.remove(foundParameter)
def _getValueFromXml(self, xml, name):
param = self._findParameter(name)
name = name.lower()
if(param != None):
value = xml.get(name)
if(value != None):
param.setString(value)
return param.getValue()
else:
print "No xml value found for name: %s" % name
return None
else:
print "No param for name: %s..." % name
return None
def setValue(self, name, value):
foundParameter = self._findParameter(name)
if(foundParameter != None):
self._configIsUpdated = True
foundParameter.setValue(value)
else:
print "Error! Trying to set unknown parameter!"
def getConfigurationXML(self):
root = Element("Configuration")
if(self._parent != None):
parentPath = self.getParentPath()
root.attrib["path"] = parentPath
self._addSelfToXML(root)
return root
def getParentPath(self):
if(self._parent == None):
return self._name
else:
return self._parent.getParentPath() + "." + self._name
def getConfigurationXMLString(self):
root = self.getConfigurationXML()
return self._xmlToString(root)
def getLoadedXMLString(self):
return self._xmlToString(self._loadedXML)
def _xmlToString(self, xml):
xmlString = ElementTree.tostring(xml, encoding="utf-8", method="xml")
if(self._selfClosingList != None):
soup = BeautifulStoneSoup(xmlString, selfClosingTags=self._selfClosingList)
else:
soup = BeautifulStoneSoup(xmlString)
return soup.prettify()
def _printXml(self, xml):
print self._xmlToString(xml)
def _addSelfToXML(self, parentNode):
ourNode = SubElement(parentNode, self._name)
self._addXMLAttributes(ourNode)
for child in self._children:
child._addSelfToXML(ourNode)
# print "DEBUG pcn: _addSelfToXML: ourNode"
# self._printXml(ourNode)
# print "DEBUG pcn: _addSelfToXML: loaded"
# if(self._loadedXML != None):
# self._printXml(self._loadedXML)
# print "DEBUG pcn: _addSelfToXML:"
def _addXMLAttributes(self, node):
for param in self._parameters:
value = param.getValue()
if isinstance( value, basestring ):
node.attrib[param.getName()] = value.encode("utf-8").decode("utf-8")
else:
node.attrib[param.getName()] = str(value)
def _findChild(self, name, idName = None, idValue = None):
lowername = name.lower()
for child in self._children:
if(child.getName().lower() == lowername):
if(idName == None):
return child
else:
foundValue = child.getValue(idName)
if(foundValue == idValue):
return child
return None
def _findChildPath(self, path, getValue):
pathSplit = path.split('.', 32)#Max 32 levels
if(len(pathSplit) == 1):
if(getValue == True):
return self.getValue(path)
else:
if(path.lower() == self._name.lower()):
return self
else:
return self._findChild(path)
else:
if(pathSplit[0].lower() == self._name.lower()):
child = self
else:
child = self._findChild(pathSplit[0])
if(child != None):
first = True
subPath = ""
for name in pathSplit:
if(first == True):
first = False
else:
if(subPath != ""):
subPath += "."
subPath = subPath + name
return child._findChildPath(subPath, getValue)
print "Did not find: " + pathSplit[0] + " in " + self._name + (len)
return None
def getPath(self, path):
if(self._parent != None):
return self._parent.getPath(path)
else:
return self._findChildPath(path, False)
def getValueFromPath(self, path):
if(self._parent != None):
return self._parent.getValueFromPath(path)
else:
return self._findChildPath(path, True)
def _findXmlChild(self, loadedXml, name, idName = None, idValue = None):
if(self._parent == None):
loadedXml = self._findXmlChildInternal(loadedXml, self._name, idName, idValue)
return self._findXmlChildInternal(loadedXml, name, idName, idValue)
def _findXmlChildInternal(self, loadedXml, name, idName = None, idValue = None):
name = name.lower()
if(idName != None):
idName = idName.lower()
if(loadedXml != None):
# print "loadedXml len = " + str(len(loadedXml))
for xmlChild in loadedXml:#self._loadedXML.findall(name):
# print "tag: " + str(xmlChild.tag)
if(name == xmlChild.tag):
if(idName != None):
if(xmlChild.get(idName) == idValue):
# print "Found: " + name + " with id " + idName + " = " + str(idValue)
return xmlChild
else:
# print "Found: " + name
return xmlChild
print "Could not find child with name: " + name + " in " + self._name
return None
def findXmlChildrenList(self, name):
if(self._loadedXML == None):
# print "findXmlChildrenList self._loadedXML == None"
return None
name = name.lower()
return self._loadedXML.findall(name)
def removeChildUniqueId(self, name, idName, idValue):
for i in range(len(self._children)):
child = self._children[i]
if(child.getName() == name):
if(idName == None):
self._children.pop(i)
return True
else:
foundValue = child.getValue(idName)
if(foundValue == idValue):
self._children.pop(i)
return True
return False
def findChildUniqueId(self, name, idName, idValue):
foundChild = self._findChild(name, idName, idValue)
if(foundChild != None):
# print "findChildUniqueId: Child found. " + name
return foundChild
else:
return None
def addChildUniqueId(self, name, idName, idValue, idRaw = None):
foundChild = self._findChild(name, idName, idValue)
if(foundChild != None):
print "Warning! addChildUniqueId: Child exist already. Duplicate name? " + name + " idName: " + str(idName) + " idValue: " + str(idValue)
return foundChild
else:
#print "Add Child Unique: " + name + " idName: " + str(idName) + " idValue: " + str(idValue) + " idRaw: " + str(idRaw)
self._configIsUpdated = True
if(idRaw == None):
idRaw = idValue
newChild = ConfigurationHolder(name, self, idName, idRaw)
newChild.addTextParameterStatic(idName, idValue)
newChild.addXml(self._findXmlChild(self._loadedXML, name, idName, idValue))
self._children.append(newChild)
self._children.sort(key=lambda x: x._uniqueId)
return newChild
def addChildUnique(self, name):
foundChild = self._findChild(name)
if(foundChild != None):
# print "Warning! addChildUnique: Child exist already. Duplicate name? " + name
return foundChild
else:
# print "Add Child: " + name
self._configIsUpdated = True
newChild = ConfigurationHolder(name, self)
newChild.addXml(self._findXmlChild(self._loadedXML, name))
self._children.append(newChild)
return newChild
def resetConfigurationUpdated(self):
self._configIsUpdated = False
for child in self._children:
child.resetConfigurationUpdated()
def isConfigurationUpdated(self):
if(self._configIsUpdated == True):
self._updateId()
#print "DEBUG pcn: self._configIsUpdated == True for: " + self._name
return True
else:
for child in self._children:
if(child.isConfigurationUpdated() == True):
self._configIsUpdated = True
self._updateId()
return True
return False
| gpl-2.0 | 4,916,540,883,233,829,000 | 38.012882 | 149 | 0.538031 | false |
timm-tem/RPi_mediaserver | piface/outputallon.py | 1 | 1251 | # THIS IS THE PYTHON CODE FOR PiFACE OUTPUT ON
#
# Copyright (C) 2014 Tim Massey
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Also add information on how to contact you by electronic and paper mail.
#!/usr/bin/python
import pifacedigitalio
pifacedigital = pifacedigitalio.PiFaceDigital()
pifacedigital.output_pins[0].turn_on()
pifacedigital.output_pins[1].turn_on()
pifacedigital.output_pins[2].turn_on()
pifacedigital.output_pins[3].turn_on()
pifacedigital.output_pins[4].turn_on()
pifacedigital.output_pins[5].turn_on()
pifacedigital.output_pins[6].turn_on()
pifacedigital.output_pins[7].turn_on()
| gpl-3.0 | 8,116,197,362,531,765,000 | 40.7 | 77 | 0.733813 | false |
infojasyrc/client_dataws | client_dataws/lib/util/file_verificator.py | 1 | 2208 | '''
Created on Feb 02, 2013
@author: Jose Sal y Rosas
@contact: [email protected]
'''
import zlib
import hashlib
class Verificator(object):
def __init__(self):
pass
def set_parameters(self, path='', algorithm='crc32', blocksize=8192):
self.path = path
self.algorithm = algorithm
self.blocksize = blocksize
def set_algorithm(self, algorithm):
self.algorithm = algorithm
def set_file(self, path):
self.path = path
def set_block_size(self, blocksize):
self.blocksize = blocksize
def get_algorithm(self):
return self.algorithm
def get_file(self):
return self.path
def get_block_size(self):
return self.blocksize
def generatechecksum(self, path='', blocksize=8192):
resultado = 0
if path == '':
path = self.path
if blocksize == 8192:
blocksize = self.blocksize
if 'crc32' in self.algorithm:
resultado = self.executecrc(path, blocksize)
elif 'md5' in self.algorithm:
resultado = self.executemd5(path, blocksize)
return resultado
def executecrc(self, path, blocksize):
crctemp = 0
with open(path, 'rb') as f:
while True:
data = f.read(blocksize)
if not data:
break
crctemp = zlib.crc32(data, crctemp)
return crctemp
def executemd5(self, path, blocksize):
with open(path, 'rb') as f:
m = hashlib.md5()
while True:
data = f.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest()
def verify(self, path, checksum):
if checksum == self.generatechecksum(path):
return 'suceed'
else:
return 'failed'
if __name__ == "__main__":
path = '/home/developer/Documents/database/datos/iniciales/EW_Drift+Faraday/EW_Drift/d2012219/D2012213003.r'
obj = Verificator()
obj.set_parameters(path, 'md5')
checksum = obj.generatechecksum()
print checksum
print obj.verify(path, checksum) | mit | 8,283,340,951,751,417,000 | 23.544444 | 112 | 0.567029 | false |
itdxer/django-project-template | {{cookiecutter.project_name}}/apps/users/migrations/0001_initial.py | 1 | 2579 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
},
bases=(models.Model,),
),
]
| mit | 6,700,627,795,742,435,000 | 68.702703 | 289 | 0.651028 | false |
puttarajubr/commcare-hq | corehq/apps/api/tests.py | 1 | 47922 | import json
from datetime import datetime
import dateutil.parser
from django.utils.http import urlencode
from django.test import TestCase
from django.core.urlresolvers import reverse
from tastypie.models import ApiKey
from tastypie.resources import Resource
from tastypie import fields
from corehq.apps.groups.models import Group
from corehq.pillows.reportxform import ReportXFormPillow
from couchforms.models import XFormInstance
from casexml.apps.case.models import CommCareCase
from corehq.pillows.xform import XFormPillow
from corehq.pillows.case import CasePillow
from corehq.apps.users.models import CommCareUser, WebUser
from corehq.apps.domain.models import Domain
from corehq.apps.receiverwrapper.models import FormRepeater, CaseRepeater, ShortFormRepeater
from corehq.apps.api.resources import v0_1, v0_4, v0_5
from corehq.apps.api.fields import ToManyDocumentsField, ToOneDocumentField, UseIfRequested, ToManyDictField
from corehq.apps.api import es
from corehq.apps.api.es import ESQuerySet, ESUserError
from django.conf import settings
from custom.hope.models import CC_BIHAR_PREGNANCY
class FakeXFormES(object):
"""
A mock of XFormES that will return the docs that have been
added regardless of the query.
"""
def __init__(self):
self.docs = []
self.queries = []
def add_doc(self, id, doc):
self.docs.append(doc)
def run_query(self, query):
self.queries.append(query)
start = query.get('from', 0)
end = (query['size'] + start) if 'size' in query else None
return {
'hits': {
'total': len(self.docs),
'hits': [{'_source': doc} for doc in self.docs[start:end]]
}
}
class APIResourceTest(TestCase):
"""
Base class for shared API tests. Sets up a domain and user and provides
some helper methods and properties for accessing the API
"""
resource = None # must be set by subclasses
api_name = 'v0.4' # can be overridden by subclasses
maxDiff = None
@classmethod
def setUpClass(cls):
cls.domain = Domain.get_or_create_with_name('qwerty', is_active=True)
cls.list_endpoint = reverse('api_dispatch_list',
kwargs=dict(domain=cls.domain.name,
api_name=cls.api_name,
resource_name=cls.resource.Meta.resource_name))
cls.username = '[email protected]'
cls.password = '***'
cls.user = WebUser.create(cls.domain.name, cls.username, cls.password)
cls.user.set_role(cls.domain.name, 'admin')
cls.user.save()
@classmethod
def tearDownClass(cls):
cls.user.delete()
cls.domain.delete()
def single_endpoint(self, id):
return reverse('api_dispatch_detail', kwargs=dict(domain=self.domain.name,
api_name=self.api_name,
resource_name=self.resource.Meta.resource_name,
pk=id))
class TestXFormInstanceResource(APIResourceTest):
"""
Tests the XFormInstanceResource, currently only v0_4
TODO: Provide tests for each version, especially for those aspects
which differ between versions. They should call into reusable tests
for the functionality that is not different.
"""
resource = v0_4.XFormInstanceResource
def _test_es_query(self, url_params, expected_query):
fake_xform_es = FakeXFormES()
prior_run_query = fake_xform_es.run_query
# A bit of a hack since none of Python's mocking libraries seem to do basic spies easily...
def mock_run_query(es_query):
self.assertEqual(sorted(es_query['filter']['and']), expected_query)
return prior_run_query(es_query)
fake_xform_es.run_query = mock_run_query
v0_4.MOCK_XFORM_ES = fake_xform_es
self.client.login(username=self.username, password=self.password)
response = self.client.get('%s?%s' % (self.list_endpoint, urlencode(url_params)))
self.assertEqual(response.status_code, 200)
def test_get_list(self):
"""
Any form in the appropriate domain should be in the list from the API.
"""
# The actual infrastructure involves saving to CouchDB, having PillowTop
# read the changes and write it to ElasticSearch.
# In order to test just the API code, we set up a fake XFormES (this should
# really be a parameter to the XFormInstanceResource constructor)
# and write the translated form directly; we are not trying to test
# the ptop infrastructure.
#the pillow is set to offline mode - elasticsearch not needed to validate
pillow = XFormPillow(online=False)
fake_xform_es = FakeXFormES()
v0_4.MOCK_XFORM_ES = fake_xform_es
backend_form = XFormInstance(xmlns = 'fake-xmlns',
domain = self.domain.name,
received_on = datetime.utcnow(),
form = {
'#type': 'fake-type',
'@xmlns': 'fake-xmlns'
})
backend_form.save()
translated_doc = pillow.change_transform(backend_form.to_json())
fake_xform_es.add_doc(translated_doc['_id'], translated_doc)
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_forms = json.loads(response.content)['objects']
self.assertEqual(len(api_forms), 1)
api_form = api_forms[0]
self.assertEqual(api_form['form']['@xmlns'], backend_form.xmlns)
self.assertEqual(api_form['received_on'], backend_form.received_on.isoformat())
backend_form.delete()
def test_get_list_xmlns(self):
"""
Forms can be filtered by passing ?xmlns=<xmlns>
Since we not testing ElasticSearch, we only test that the proper query is generated.
"""
expected = [
{'term': {'doc_type': 'xforminstance'}},
{'term': {'domain.exact': 'qwerty'}},
{'term': {'xmlns.exact': 'foo'}}
]
self._test_es_query({'xmlns': 'foo'}, expected)
def test_get_list_received_on(self):
"""
Forms can be filtered by passing ?recieved_on_start=<date>&received_on_end=<date>
Since we not testing ElasticSearch, we only test that the proper query is generated.
"""
start_date = datetime(1969, 6, 14)
end_date = datetime(2011, 1, 2)
expected = [
{'range': {'received_on': {'from': start_date.isoformat()}}},
{'range': {'received_on': {'to': end_date.isoformat()}}},
{'term': {'doc_type': 'xforminstance'}},
{'term': {'domain.exact': 'qwerty'}},
]
params = {
'received_on_end': end_date.isoformat(),
'received_on_start': start_date.isoformat(),
}
self._test_es_query(params, expected)
def test_get_list_ordering(self):
'''
Forms can be ordering ascending or descending on received_on; by default
ascending.
'''
fake_xform_es = FakeXFormES()
# A bit of a hack since none of Python's mocking libraries seem to do basic spies easily...
prior_run_query = fake_xform_es.run_query
queries = []
def mock_run_query(es_query):
queries.append(es_query)
return prior_run_query(es_query)
fake_xform_es.run_query = mock_run_query
v0_4.MOCK_XFORM_ES = fake_xform_es
self.client.login(username=self.username, password=self.password)
response = self.client.get('%s?order_by=received_on' % self.list_endpoint) # Runs *2* queries
self.assertEqual(response.status_code, 200)
self.assertEqual(queries[0]['sort'], [{'received_on': 'asc'}])
response = self.client.get('%s?order_by=-received_on' % self.list_endpoint) # Runs *2* queries
self.assertEqual(response.status_code, 200)
self.assertEqual(queries[2]['sort'], [{'received_on': 'desc'}])
def test_get_list_archived(self):
expected = [
{'or': [
{'term': {'doc_type': 'xforminstance'}},
{'term': {'doc_type': 'xformarchived'}}
]},
{'term': {'domain.exact': 'qwerty'}},
]
self._test_es_query({'include_archived': 'true'}, expected)
class TestCommCareCaseResource(APIResourceTest):
"""
Tests the CommCareCaseREsource, currently only v0_4
"""
resource = v0_4.CommCareCaseResource
def test_get_list(self):
"""
Any case in the appropriate domain should be in the list from the API.
"""
# The actual infrastructure involves saving to CouchDB, having PillowTop
# read the changes and write it to ElasticSearch.
#the pillow is set to offline mode - elasticsearch not needed to validate
pillow = CasePillow(online=False)
fake_case_es = FakeXFormES()
v0_4.MOCK_CASE_ES = fake_case_es
modify_date = datetime.utcnow()
backend_case = CommCareCase(server_modified_on=modify_date, domain=self.domain.name)
backend_case.save()
translated_doc = pillow.change_transform(backend_case.to_json())
fake_case_es.add_doc(translated_doc['_id'], translated_doc)
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_cases = json.loads(response.content)['objects']
self.assertEqual(len(api_cases), 1)
api_case = api_cases[0]
self.assertEqual(dateutil.parser.parse(api_case['server_date_modified']), backend_case.server_modified_on)
backend_case.delete()
class TestHOPECaseResource(APIResourceTest):
"""
Tests the HOPECaseREsource, currently only v0_4, just to make sure
it does not crash right away
"""
resource = v0_4.HOPECaseResource
def test_get_list(self):
"""
Any case in the appropriate domain should be in the list from the API.
"""
# The actual infrastructure involves saving to CouchDB, having PillowTop
# read the changes and write it to ElasticSearch.
#the pillow is set to offline mode - elasticsearch not needed to validate
pillow = CasePillow(online=False)
fake_case_es = FakeXFormES()
v0_4.MOCK_CASE_ES = fake_case_es
modify_date = datetime.utcnow()
backend_case = CommCareCase(server_modified_on=modify_date, domain=self.domain.name)
backend_case.type = CC_BIHAR_PREGNANCY
backend_case.save()
translated_doc = pillow.change_transform(backend_case.to_json())
fake_case_es.add_doc(translated_doc['_id'], translated_doc)
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_cases = json.loads(response.content)['objects']
self.assertEqual(len(api_cases), 2)
api_case = api_cases['mother_lists'][0]
self.assertEqual(dateutil.parser.parse(api_case['server_date_modified']), backend_case.server_modified_on)
backend_case.delete()
class TestCommCareUserResource(APIResourceTest):
"""
Basic sanity checking of v0_1.CommCareUserResource
"""
resource = v0_5.CommCareUserResource
api_name = 'v0.5'
def test_get_list(self):
self.client.login(username=self.username, password=self.password)
commcare_user = CommCareUser.create(domain=self.domain.name, username='fake_user', password='*****')
backend_id = commcare_user.get_id
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_users = json.loads(response.content)['objects']
self.assertEqual(len(api_users), 1)
self.assertEqual(api_users[0]['id'], backend_id)
commcare_user.delete()
def test_get_single(self):
self.client.login(username=self.username, password=self.password)
commcare_user = CommCareUser.create(domain=self.domain.name, username='fake_user', password='*****')
backend_id = commcare_user._id
response = self.client.get(self.single_endpoint(backend_id))
self.assertEqual(response.status_code, 200)
api_user = json.loads(response.content)
self.assertEqual(api_user['id'], backend_id)
commcare_user.delete()
def test_create(self):
self.client.login(username=self.username, password=self.password)
group = Group({"name": "test"})
group.save()
self.assertEqual(0, len(CommCareUser.by_domain(self.domain.name)))
user_json = {
"username": "jdoe",
"password": "qwer1234",
"first_name": "John",
"last_name": "Doe",
"email": "[email protected]",
"language": "en",
"phone_numbers": [
"+50253311399",
"50253314588"
],
"groups": [
group._id
],
"user_data": {
"chw_id": "13/43/DFA"
}
}
response = self.client.post(self.list_endpoint,
json.dumps(user_json),
content_type='application/json')
self.assertEqual(response.status_code, 201)
[user_back] = CommCareUser.by_domain(self.domain.name)
self.assertEqual(user_back.username, "jdoe")
self.assertEqual(user_back.first_name, "John")
self.assertEqual(user_back.last_name, "Doe")
self.assertEqual(user_back.email, "[email protected]")
self.assertEqual(user_back.language, "en")
self.assertEqual(user_back.get_group_ids()[0], group._id)
self.assertEqual(user_back.user_data["chw_id"], "13/43/DFA")
self.assertEqual(user_back.default_phone_number, "+50253311399")
user_back.delete()
group.delete()
def test_update(self):
self.client.login(username=self.username, password=self.password)
user = CommCareUser.create(domain=self.domain.name, username="test", password="qwer1234")
group = Group({"name": "test"})
group.save()
user_json = {
"first_name": "test",
"last_name": "last",
"email": "[email protected]",
"language": "pol",
"phone_numbers": [
"+50253311399",
"50253314588"
],
"groups": [
group._id
],
"user_data": {
"chw_id": "13/43/DFA"
}
}
backend_id = user._id
response = self.client.put(self.single_endpoint(backend_id),
json.dumps(user_json),
content_type='application/json')
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(1, len(CommCareUser.by_domain(self.domain.name)))
modified = CommCareUser.get(backend_id)
self.assertEqual(modified.username, "test")
self.assertEqual(modified.first_name, "test")
self.assertEqual(modified.last_name, "last")
self.assertEqual(modified.email, "[email protected]")
self.assertEqual(modified.language, "pol")
self.assertEqual(modified.get_group_ids()[0], group._id)
self.assertEqual(modified.user_data["chw_id"], "13/43/DFA")
self.assertEqual(modified.default_phone_number, "+50253311399")
modified.delete()
group.delete()
class TestWebUserResource(APIResourceTest):
"""
Basic sanity checking of v0_1.CommCareUserResource
"""
resource = v0_5.WebUserResource
api_name = 'v0.5'
def _check_user_data(self, user, json_user):
self.assertEqual(user._id, json_user['id'])
role = user.get_role(self.domain.name)
self.assertEqual(role.name, json_user['role'])
self.assertEqual(user.is_domain_admin(self.domain.name), json_user['is_admin'])
for perm in ['edit_web_users', 'edit_commcare_users', 'edit_data',
'edit_apps', 'view_reports']:
self.assertEqual(getattr(role.permissions, perm), json_user['permissions'][perm])
def test_get_list(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_users = json.loads(response.content)['objects']
self.assertEqual(len(api_users), 1)
self._check_user_data(self.user, api_users[0])
another_user = WebUser.create(self.domain.name, 'anotherguy', '***')
another_user.set_role(self.domain.name, 'field-implementer')
another_user.save()
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_users = json.loads(response.content)['objects']
self.assertEqual(len(api_users), 2)
# username filter
response = self.client.get('%s?username=%s' % (self.list_endpoint, 'anotherguy'))
self.assertEqual(response.status_code, 200)
api_users = json.loads(response.content)['objects']
self.assertEqual(len(api_users), 1)
self._check_user_data(another_user, api_users[0])
response = self.client.get('%s?username=%s' % (self.list_endpoint, 'nomatch'))
self.assertEqual(response.status_code, 200)
api_users = json.loads(response.content)['objects']
self.assertEqual(len(api_users), 0)
def test_get_single(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.single_endpoint(self.user._id))
self.assertEqual(response.status_code, 200)
api_user = json.loads(response.content)
self._check_user_data(self.user, api_user)
def test_create(self):
self.client.login(username=self.username, password=self.password)
user_json = {
"username":"test_1234",
"password":"qwer1234",
"email":"[email protected]",
"first_name":"Joe",
"is_admin": True,
"last_name":"Admin",
"permissions":{
"edit_apps":True,
"edit_commcare_users":True,
"edit_data":True,
"edit_web_users":True,
"view_reports":True
},
"phone_numbers":[
],
"role":"admin"
}
response = self.client.post(self.list_endpoint,
json.dumps(user_json),
content_type='application/json')
self.assertEqual(response.status_code, 201)
user_back = WebUser.get_by_username("test_1234")
self.assertEqual(user_back.username, "test_1234")
self.assertEqual(user_back.first_name, "Joe")
self.assertEqual(user_back.last_name, "Admin")
self.assertEqual(user_back.email, "[email protected]")
user_back.delete()
def test_update(self):
self.client.login(username=self.username, password=self.password)
user = WebUser.create(domain=self.domain.name, username="test", password="qwer1234")
user_json = {
"email":"[email protected]",
"first_name":"Joe",
"is_admin": True,
"last_name":"Admin",
"permissions":{
"edit_apps":True,
"edit_commcare_users":True,
"edit_data":True,
"edit_web_users":True,
"view_reports":True
},
"phone_numbers":[
],
"role":"admin"
}
backend_id = user._id
response = self.client.put(self.single_endpoint(backend_id),
json.dumps(user_json),
content_type='application/json')
self.assertEqual(response.status_code, 200, response.content)
modified = WebUser.get(backend_id)
self.assertEqual(modified.username, "test")
self.assertEqual(modified.first_name, "Joe")
self.assertEqual(modified.last_name, "Admin")
self.assertEqual(modified.email, "[email protected]")
modified.delete()
class TestRepeaterResource(APIResourceTest):
"""
Basic sanity checking of v0_4.RepeaterResource
"""
resource = v0_4.RepeaterResource
repeater_types = [FormRepeater, CaseRepeater, ShortFormRepeater]
def test_get(self):
self.client.login(username=self.username, password=self.password)
# Add a repeater of various types and check that it comes back
for cls in self.repeater_types:
repeater = cls(domain=self.domain.name,
url='http://example.com/forwarding/{cls}'.format(cls=cls.__name__))
repeater.save()
backend_id = repeater._id
response = self.client.get(self.single_endpoint(backend_id))
self.assertEqual(response.status_code, 200)
result = json.loads(response.content)
self.assertEqual(result['id'], backend_id)
self.assertEqual(result['url'], repeater.url)
self.assertEqual(result['domain'], repeater.domain)
self.assertEqual(result['type'], cls.__name__)
repeater.delete()
def test_get_list(self):
self.client.login(username=self.username, password=self.password)
# Add a form repeater and check that it comes back
form_repeater = FormRepeater(domain=self.domain.name, url='http://example.com/forwarding/form')
form_repeater.save()
backend_id = form_repeater._id
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_repeaters = json.loads(response.content)['objects']
self.assertEqual(len(api_repeaters), 1)
self.assertEqual(api_repeaters[0]['id'], backend_id)
self.assertEqual(api_repeaters[0]['url'], form_repeater.url)
self.assertEqual(api_repeaters[0]['domain'], form_repeater.domain)
self.assertEqual(api_repeaters[0]['type'], 'FormRepeater')
# Add a case repeater and check that both come back
case_repeater = CaseRepeater(domain=self.domain.name, url='http://example.com/forwarding/case')
case_repeater.save()
backend_id = case_repeater._id
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_repeaters = json.loads(response.content)['objects']
self.assertEqual(len(api_repeaters), 2)
api_case_repeater = filter(lambda r: r['type'] == 'CaseRepeater', api_repeaters)[0]
self.assertEqual(api_case_repeater['id'], case_repeater._id)
self.assertEqual(api_case_repeater['url'], case_repeater.url)
self.assertEqual(api_case_repeater['domain'], case_repeater.domain)
form_repeater.delete()
case_repeater.delete()
def test_create(self):
self.client.login(username=self.username, password=self.password)
for cls in self.repeater_types:
self.assertEqual(0, len(cls.by_domain(self.domain.name)))
repeater_json = {
"domain": self.domain.name,
"type": cls.__name__,
"url": "http://example.com/forwarding/{cls}".format(cls=cls.__name__),
}
response = self.client.post(self.list_endpoint,
json.dumps(repeater_json),
content_type='application/json')
self.assertEqual(response.status_code, 201, response.content)
[repeater_back] = cls.by_domain(self.domain.name)
self.assertEqual(repeater_json['domain'], repeater_back.domain)
self.assertEqual(repeater_json['type'], repeater_back.doc_type)
self.assertEqual(repeater_json['url'], repeater_back.url)
repeater_back.delete()
def test_update(self):
self.client.login(username=self.username, password=self.password)
for cls in self.repeater_types:
repeater = cls(domain=self.domain.name,
url='http://example.com/forwarding/{cls}'.format(cls=cls.__name__))
repeater.save()
backend_id = repeater._id
repeater_json = {
"domain": self.domain.name,
"type": cls.__name__,
"url": "http://example.com/forwarding/modified/{cls}".format(cls=cls.__name__),
}
response = self.client.put(self.single_endpoint(backend_id),
json.dumps(repeater_json),
content_type='application/json')
self.assertEqual(response.status_code, 204, response.content)
self.assertEqual(1, len(cls.by_domain(self.domain.name)))
modified = cls.get(backend_id)
self.assertTrue('modified' in modified.url)
repeater.delete()
class TestReportPillow(TestCase):
def test_xformPillowTransform(self):
"""
Test to make sure report xform and reportxform pillows strip the appVersion dict to match the
mappings
"""
pillows = [ReportXFormPillow(online=False),XFormPillow(online=False)]
bad_appVersion = {
"_id": "foo",
"domain": settings.ES_XFORM_FULL_INDEX_DOMAINS[0],
"form": {
"meta": {
"@xmlns": "http://openrosa.org/jr/xforms",
"username": "someuser",
"instanceID": "foo",
"userID": "some_user_id",
"timeEnd": "2013-09-20T01:33:12Z",
"appVersion": {
"@xmlns": "http://commcarehq.org/xforms",
"#text": "CCODK:\"2.5.1\"(11126). v236 CC2.5b[11126] on April-15-2013"
},
"timeStart": "2013-09-19T01:13:20Z",
"deviceID": "somedevice"
}
}
}
for pillow in pillows:
cleaned = pillow.change_transform(bad_appVersion)
self.assertFalse(isinstance(cleaned['form']['meta']['appVersion'], dict))
self.assertTrue(isinstance(cleaned['form']['meta']['appVersion'], str))
self.assertTrue(cleaned['form']['meta']['appVersion'], "CCODK:\"2.5.1\"(11126). v236 CC2.5b[11126] on April-15-2013")
class TestESQuerySet(TestCase):
'''
Tests the ESQuerySet for appropriate slicing, etc
'''
def test_slice(self):
es = FakeXFormES()
for i in xrange(0, 1300):
es.add_doc(i, {'i': i})
queryset = ESQuerySet(es_client=es, payload={})
qs_slice = list(queryset[3:7])
self.assertEqual(es.queries[0]['from'], 3)
self.assertEqual(es.queries[0]['size'], 4)
self.assertEqual(len(qs_slice), 4)
queryset = ESQuerySet(es_client=es, payload={})
qs_slice = list(queryset[10:20])
self.assertEqual(es.queries[1]['from'], 10)
self.assertEqual(es.queries[1]['size'], 10)
self.assertEqual(len(qs_slice), 10)
queryset = ESQuerySet(es_client=es, payload={})
qs_slice = list(queryset[500:1000])
self.assertEqual(es.queries[2]['from'], 500)
self.assertEqual(es.queries[2]['size'], 500)
self.assertEqual(len(qs_slice), 500)
def test_order_by(self):
es = FakeXFormES()
for i in xrange(0, 1300):
es.add_doc(i, {'i': i})
queryset = ESQuerySet(es_client=es, payload={})
qs_asc = list(queryset.order_by('foo'))
self.assertEqual(es.queries[0]['sort'], [{'foo': 'asc'}])
qs_desc = list(queryset.order_by('-foo'))
self.assertEqual(es.queries[1]['sort'], [{'foo': 'desc'}])
qs_overwrite = list(queryset.order_by('bizzle').order_by('-baz'))
self.assertEqual(es.queries[2]['sort'], [{'baz': 'desc'}])
qs_multi = list(queryset.order_by('one', '-two', 'three'))
self.assertEqual(es.queries[3]['sort'], [{'one': 'asc'}, {'two': 'desc'}, {'three': 'asc'}])
class ToManySourceModel(object):
def __init__(self, other_model_ids, other_model_dict):
self.other_model_dict = other_model_dict
self.other_model_ids = other_model_ids
@property
def other_models(self):
return [self.other_model_dict.get(id) for id in self.other_model_ids]
class ToManyDestModel(object):
def __init__(self, id):
self.id = id
class ToManySourceResource(Resource):
other_model_ids = fields.ListField(attribute='other_model_ids')
other_models = ToManyDocumentsField('corehq.apps.api.tests.ToManyDestResource', attribute='other_models')
def __init__(self, objs):
super(ToManySourceResource, self).__init__()
self.objs = objs
def obj_get_list(self):
return self.objs
class Meta:
model_class = ToManySourceModel
class ToManyDestResource(Resource):
id = fields.CharField(attribute='id')
class Meta:
model_class = ToManyDestModel
class TestToManyDocumentsField(TestCase):
'''
Basic test that ToMany dehydrated alright
'''
def test_requested_use_in(self):
dest_objs = {
'foo': ToManyDestModel('foo'),
'bar': ToManyDestModel('bar'),
'baz': ToManyDestModel('baz'),
}
source_objs = [
ToManySourceModel(other_model_ids=['foo', 'bar'], other_model_dict=dest_objs),
ToManySourceModel(other_model_ids=['bar', 'baz'], other_model_dict=dest_objs)
]
source_resource = ToManySourceResource(source_objs)
bundle = source_resource.build_bundle(obj=source_objs[0])
dehydrated_bundle = source_resource.full_dehydrate(bundle)
self.assertTrue('other_models' in dehydrated_bundle.data)
self.assertEqual([other['id'] for other in dehydrated_bundle.data['other_models']], ['foo', 'bar'])
bundle = source_resource.build_bundle(obj=source_objs[1])
dehydrated_bundle = source_resource.full_dehydrate(bundle)
self.assertEqual([other['id'] for other in dehydrated_bundle.data['other_models']], ['bar', 'baz'])
class ToManyDictSourceModel(object):
def __init__(self, other_model_ids, other_model_dict):
self.other_model_dict = other_model_dict
self.other_model_ids = other_model_ids
@property
def other_models(self):
return dict([(key, self.other_model_dict.get(id)) for key, id in self.other_model_ids.items()])
class ToManyDictDestModel(object):
def __init__(self, id):
self.id = id
class ToManyDictSourceResource(Resource):
other_model_ids = fields.ListField(attribute='other_model_ids')
other_models = ToManyDictField('corehq.apps.api.tests.ToManyDictDestResource', attribute='other_models')
def __init__(self, objs):
super(ToManyDictSourceResource, self).__init__()
self.objs = objs
def obj_get_list(self):
return self.objs
class Meta:
model_class = ToManyDictSourceModel
class ToManyDictDestResource(Resource):
id = fields.CharField(attribute='id')
class Meta:
model_class = ToManyDictDestModel
class TestToManyDictField(TestCase):
'''
Basic test that ToMany dehydrated alright
'''
def test_dehydrate(self):
dest_objs = {
'foo': ToManyDictDestModel('foo'),
'bar': ToManyDictDestModel('bar'),
'baz': ToManyDictDestModel('baz'),
}
source_objs = [
ToManyDictSourceModel(other_model_ids={ 'first_other': 'foo', 'second_other': 'bar'}, other_model_dict=dest_objs),
ToManyDictSourceModel(other_model_ids={ 'first_other': 'bar', 'second_other': 'baz'}, other_model_dict=dest_objs)
]
source_resource = ToManyDictSourceResource(source_objs)
bundle = source_resource.build_bundle(obj=source_objs[0])
dehydrated_bundle = source_resource.full_dehydrate(bundle)
self.assertTrue('other_models' in dehydrated_bundle.data)
self.assertEqual(dehydrated_bundle.data['other_models']['first_other']['id'] , 'foo')
self.assertEqual(dehydrated_bundle.data['other_models']['second_other']['id'], 'bar')
bundle = source_resource.build_bundle(obj=source_objs[1])
dehydrated_bundle = source_resource.full_dehydrate(bundle)
self.assertEqual(dehydrated_bundle.data['other_models']['first_other']['id'] , 'bar')
self.assertEqual(dehydrated_bundle.data['other_models']['second_other']['id'], 'baz')
class ToOneSourceModel(object):
def __init__(self, other_model_id, other_model_dict):
self.other_model_dict = other_model_dict
self.other_model_id = other_model_id
@property
def other_model(self):
return self.other_model_dict.get(self.other_model_id)
class ToOneDestModel(object):
def __init__(self, id):
self.id = id
class ToOneSourceResource(Resource):
other_model_id = fields.ListField(attribute='other_model_id')
other_model = ToOneDocumentField('corehq.apps.api.tests.ToOneDestResource', attribute='other_model')
def __init__(self, objs):
super(ToOneSourceResource, self).__init__()
self.objs = objs
def obj_get_list(self):
return self.objs
class Meta:
model_class = ToOneSourceModel
class ToOneDestResource(Resource):
id = fields.CharField(attribute='id')
class Meta:
model_class = ToOneDestModel
class TestToOneDocumentField(TestCase):
'''
Basic test of the <fieldname>__full
'''
def test_requested_use_in(self):
dest_objs = {
'foo': ToOneDestModel('foo'),
'bar': ToOneDestModel('bar'),
'baz': ToOneDestModel('baz'),
}
source_objs = [
ToOneSourceModel(other_model_id='foo', other_model_dict=dest_objs),
ToOneSourceModel(other_model_id='bar', other_model_dict=dest_objs)
]
source_resource = ToOneSourceResource(source_objs)
bundle = source_resource.build_bundle(obj=source_objs[0])
dehydrated_bundle = source_resource.full_dehydrate(bundle)
self.assertEqual(dehydrated_bundle.data['other_model']['id'], 'foo')
bundle = source_resource.build_bundle(obj=source_objs[1])
dehydrated_bundle = source_resource.full_dehydrate(bundle)
self.assertEqual(dehydrated_bundle.data['other_model']['id'], 'bar')
class UseIfRequestedModel(object):
def __init__(self, id):
self.id = id
class UseIfRequestedTestResource(Resource):
something = UseIfRequested(fields.CharField(attribute='id'))
def __init__(self, objs):
super(UseIfRequestedTestResource, self).__init__()
self.objs = objs
def obj_get_list(self):
return self.objs
class Meta:
model_class = UseIfRequestedModel
class TestUseIfRequested(TestCase):
def test_requested_use_in(self):
objs = [
UseIfRequestedModel(id='foo'),
UseIfRequestedModel(id='bar')
]
test_resource = UseIfRequestedTestResource(objs)
bundle = test_resource.build_bundle(obj=objs[0])
dehydrated_bundle = test_resource.full_dehydrate(bundle)
self.assertFalse('id' in dehydrated_bundle.data)
bundle = test_resource.build_bundle(obj=objs[0])
bundle.request.GET['something__full'] = 'true'
dehydrated_bundle = test_resource.full_dehydrate(bundle)
self.assertTrue('something' in dehydrated_bundle.data)
self.assertEqual(dehydrated_bundle.data['something'], 'foo')
class TestSingleSignOnResource(APIResourceTest):
resource = v0_4.SingleSignOnResource
def setUp(self):
super(TestSingleSignOnResource, self).setUp()
self.commcare_username = '[email protected]'
self.commcare_password = '*****'
self.commcare_user = CommCareUser.create(self.domain.name, self.commcare_username, self.commcare_password)
def tearDown(self):
self.commcare_user.delete()
super(TestSingleSignOnResource, self).tearDown()
def test_web_user_success(self):
'''
If correct credentials for a web user are submitted, the response is the profile of that web user
'''
response = self.client.post(self.list_endpoint, {'username': self.username, 'password': self.password})
self.assertEqual(response.status_code, 200)
def test_commcare_user_success(self):
'''
If correct credentials for a commcare user are submitted, the response is the record for that commcare user
'''
response = self.client.post(self.list_endpoint, {'username': self.commcare_username, 'password': self.commcare_password})
self.assertEqual(response.status_code, 200)
def test_wrong_domain(self):
'''
If correct credentials for a user in a different domain are submitted, the response is forbidden
'''
wrong_domain = Domain.get_or_create_with_name('dvorak', is_active=True)
wrong_list_endpoint = reverse('api_dispatch_list', kwargs=dict(domain=wrong_domain.name,
api_name=self.api_name,
resource_name=self.resource.Meta.resource_name))
response = self.client.post(wrong_list_endpoint, {'username': self.username, 'password': self.password})
self.assertEqual(response.status_code, 403)
wrong_domain.delete()
def test_wrong_credentials(self):
'''
If incorrect password for the correct username and domain pair are submitted, the response is forbidden
'''
response = self.client.post(self.list_endpoint, {'username': self.username, 'password': 'bimbizzleboozle'})
self.assertEqual(response.status_code, 403)
def test_no_username(self):
'''
If no username supplied, 400
'''
response = self.client.post(self.list_endpoint, {'password': 'bimbizzleboozle'})
self.assertEqual(response.status_code, 400)
def test_no_password(self):
'''
If no password supplied, 400
'''
response = self.client.post(self.list_endpoint, {'username': self.username})
self.assertEqual(response.status_code, 400)
class TestGroupResource(APIResourceTest):
resource = v0_5.GroupResource
api_name = 'v0.5'
def test_get_list(self):
self.client.login(username=self.username, password=self.password)
group = Group({"name": "test", "domain": self.domain.name})
group.save()
backend_id = group.get_id
response = self.client.get(self.list_endpoint)
self.assertEqual(response.status_code, 200)
api_groups = json.loads(response.content)['objects']
self.assertEqual(len(api_groups), 1)
self.assertEqual(api_groups[0]['id'], backend_id)
group.delete()
def test_get_single(self):
self.client.login(username=self.username, password=self.password)
group = Group({"name": "test", "domain": self.domain.name})
group.save()
backend_id = group.get_id
response = self.client.get(self.single_endpoint(backend_id))
self.assertEqual(response.status_code, 200)
api_groups = json.loads(response.content)
self.assertEqual(api_groups['id'], backend_id)
group.delete()
def test_create(self):
self.client.login(username=self.username, password=self.password)
self.assertEqual(0, len(Group.by_domain(self.domain.name)))
group_json = {
"case_sharing": True,
"metadata": {
"localization": "Ghana"
},
"name": "test group",
"reporting": True,
}
response = self.client.post(self.list_endpoint,
json.dumps(group_json),
content_type='application/json')
self.assertEqual(response.status_code, 201)
[group_back] = Group.by_domain(self.domain.name)
self.assertEqual(group_back.name, "test group")
self.assertTrue(group_back.reporting)
self.assertTrue(group_back.case_sharing)
self.assertEqual(group_back.metadata["localization"], "Ghana")
group_back.delete()
def test_update(self):
self.client.login(username=self.username, password=self.password)
group = Group({"name": "test", "domain": self.domain.name})
group.save()
group_json = {
"case_sharing": True,
"metadata": {
"localization": "Ghana"
},
"name": "test group",
"reporting": True,
}
backend_id = group._id
response = self.client.put(self.single_endpoint(backend_id),
json.dumps(group_json),
content_type='application/json')
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(1, len(Group.by_domain(self.domain.name)))
modified = Group.get(backend_id)
self.assertEqual(modified.name, "test group")
self.assertTrue(modified.reporting)
self.assertTrue(modified.case_sharing)
self.assertEqual(modified.metadata["localization"], "Ghana")
modified.delete()
class FakeUserES(object):
def __init__(self):
self.docs = []
self.queries = []
def add_doc(self, doc):
self.docs.append(doc)
def make_query(self, q=None, fields=None, domain=None, start_at=None, size=None):
self.queries.append(q)
start = int(start_at) if start_at else 0
end = min(len(self.docs), start + int(size)) if size else None
return self.docs[start:end]
class TestBulkUserAPI(APIResourceTest):
resource = v0_5.BulkUserResource
api_name = 'v0.5'
@classmethod
def setUpClass(cls):
cls.domain = Domain.get_or_create_with_name('qwerty', is_active=True)
cls.username = '[email protected]'
cls.password = '***'
cls.admin_user = WebUser.create(cls.domain.name, cls.username, cls.password)
cls.admin_user.set_role(cls.domain.name, 'admin')
cls.admin_user.save()
cls.fake_user_es = FakeUserES()
v0_5.MOCK_BULK_USER_ES = cls.mock_es_wrapper
cls.make_users()
@classmethod
def tearDownClass(cls):
cls.admin_user.delete()
cls.domain.delete()
v0_5.MOCK_BULK_USER_ES = None
@classmethod
def make_users(cls):
users = [
('Robb', 'Stark'),
('Jon', 'Snow'),
('Brandon', 'Stark'),
('Eddard', 'Stark'),
('Catelyn', 'Stark'),
('Tyrion', 'Lannister'),
('Tywin', 'Lannister'),
('Jamie', 'Lannister'),
('Cersei', 'Lannister'),
]
for first, last in users:
username = '_'.join([first.lower(), last.lower()])
email = username + '@qwerty.commcarehq.org'
cls.fake_user_es.add_doc({
'id': 'lskdjflskjflaj',
'email': email,
'username': username,
'first_name': first,
'last_name': last,
'phone_numbers': ['9042411080'],
})
@classmethod
def mock_es_wrapper(cls, *args, **kwargs):
return cls.fake_user_es.make_query(**kwargs)
@property
def list_endpoint(self):
return reverse(
'api_dispatch_list',
kwargs={
'domain': self.domain.name,
'api_name': self.api_name,
'resource_name': self.resource.Meta.resource_name,
}
)
def test_excluded_field(self):
result = self.query(fields=['email', 'first_name', 'password'])
self.assertEqual(result.status_code, 400)
def query(self, **params):
self.client.login(username=self.username, password=self.password)
url = '%s?%s' % (self.list_endpoint, urlencode(params, doseq=True))
return self.client.get(url)
def test_paginate(self):
limit = 3
result = self.query(limit=limit)
self.assertEqual(result.status_code, 200)
users = json.loads(result.content)['objects']
self.assertEquals(len(users), limit)
result = self.query(start_at=limit, limit=limit)
self.assertEqual(result.status_code, 200)
users = json.loads(result.content)['objects']
self.assertEquals(len(users), limit)
def test_basic(self):
response = self.query()
self.assertEqual(response.status_code, 200)
class TestApiKey(APIResourceTest):
"""
Only tests access (200 vs 401). Correctness should be tested elsewhere
"""
resource = v0_5.WebUserResource
api_name = 'v0.5'
@classmethod
def setUpClass(cls):
super(TestApiKey, cls).setUpClass()
django_user = WebUser.get_django_user(cls.user)
cls.api_key, _ = ApiKey.objects.get_or_create(user=django_user)
@classmethod
def tearDownClass(cls):
cls.api_key.delete()
super(TestApiKey, cls).tearDownClass()
def test_get_user(self):
endpoint = "%s?%s" % (self.single_endpoint(self.user._id),
urlencode({
"username": self.user.username,
"api_key": self.api_key.key
}))
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
def test_wrong_api_key(self):
endpoint = "%s?%s" % (self.single_endpoint(self.user._id),
urlencode({
"username": self.user.username,
"api_key": 'blah'
}))
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 401)
def test_wrong_user_api_key(self):
username = '[email protected]'
password = '***'
other_user = WebUser.create(self.domain.name, username, password)
other_user.set_role(self.domain.name, 'admin')
other_user.save()
django_user = WebUser.get_django_user(other_user)
other_api_key, _ = ApiKey.objects.get_or_create(user=django_user)
endpoint = "%s?%s" % (self.single_endpoint(self.user._id),
urlencode({
"username": self.user.username,
"api_key": other_api_key.key
}))
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 401)
other_api_key.delete()
other_user.delete()
| bsd-3-clause | 2,962,651,482,476,640,000 | 36.062645 | 129 | 0.596073 | false |
andrewchenshx/vnpy | vnpy/app/algo_trading/algos/dma_algo.py | 1 | 2651 | from vnpy.trader.constant import Offset, Direction, OrderType
from vnpy.trader.object import TradeData, OrderData, TickData
from vnpy.trader.engine import BaseEngine
from vnpy.app.algo_trading import AlgoTemplate
class DmaAlgo(AlgoTemplate):
""""""
display_name = "DMA 直接委托"
default_setting = {
"vt_symbol": "",
"direction": [Direction.LONG.value, Direction.SHORT.value],
"order_type": [
OrderType.MARKET.value,
OrderType.LIMIT.value,
OrderType.STOP.value,
OrderType.FAK.value,
OrderType.FOK.value
],
"price": 0.0,
"volume": 0.0,
"offset": [
Offset.NONE.value,
Offset.OPEN.value,
Offset.CLOSE.value,
Offset.CLOSETODAY.value,
Offset.CLOSEYESTERDAY.value
]
}
variables = [
"traded",
"vt_orderid",
"order_status",
]
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
""""""
super().__init__(algo_engine, algo_name, setting)
# Parameters
self.vt_symbol = setting["vt_symbol"]
self.direction = Direction(setting["direction"])
self.order_type = OrderType(setting["order_type"])
self.price = setting["price"]
self.volume = setting["volume"]
self.offset = Offset(setting["offset"])
# Variables
self.vt_orderid = ""
self.traded = 0
self.order_status = ""
self.subscribe(self.vt_symbol)
self.put_parameters_event()
self.put_variables_event()
def on_tick(self, tick: TickData):
""""""
if not self.vt_orderid:
if self.direction == Direction.LONG:
self.vt_orderid = self.buy(
self.vt_symbol,
self.price,
self.volume,
self.order_type,
self.offset
)
else:
self.vt_orderid = self.sell(
self.vt_symbol,
self.price,
self.volume,
self.order_type,
self.offset
)
self.put_variables_event()
def on_order(self, order: OrderData):
""""""
self.traded = order.traded
self.order_status = order.status
if not order.is_active():
self.stop()
self.put_variables_event()
def on_trade(self, trade: TradeData):
""""""
pass
| mit | 3,550,596,912,636,014,000 | 25.69697 | 67 | 0.501703 | false |
smartczm/python-learn | Old-day01-10/s13-day5/get/day5/Atm/src/crontab.py | 1 | 2023 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import json
import time
from config import settings
from src.backend import logger
def main():
card_list = os.listdir(settings.USER_DIR_FOLDER)
for card in card_list:
basic_info = json.load(open(os.path.join(settings.USER_DIR_FOLDER, card, 'basic_info.json')))
struct_time = time.localtime()
# 循环账单列表,为每月的欠款计息。并写入到当月账单中
for item in basic_info['debt']:
interest = item['total_debt'] * 0.0005
if basic_info['saving'] >= interest:
basic_info['saving'] -= interest
else:
temp = interest - basic_info['saving']
basic_info['balance'] -= temp
logger_obj = logger.get_logger(card, struct_time)
logger_obj.info("欠款利息 - %f - 备注:未还款日期%s;共欠款%f,未还款%f" % (interest, item['date'], item['total_debt'], item['balance_debt'],))
json.dump(
basic_info,
open(os.path.join(settings.USER_DIR_FOLDER, basic_info['card'], "basic_info.json"), 'w')
)
# 如果当前等于10号(9号之前)
# 当前余额为负值,则将值添加到账单列表中,开始计息,同时,本月可用额度恢复。
if struct_time.tm_mday == 11 and basic_info['credit'] > basic_info['balance']:
date = time.strftime("%Y-%m-%d")
dic = {'date': date,
"total_debt": basic_info['credit'] - basic_info['balance'],
"balance_debt": basic_info['credit'] - basic_info['balance'],
}
basic_info['debt'].append(dic)
# 恢复可用额度
basic_info['balance'] = basic_info['credit']
json.dump(
basic_info,
open(os.path.join(settings.USER_DIR_FOLDER, basic_info['card'], "basic_info.json"), 'w')
)
def run():
main()
| gpl-2.0 | -718,927,813,982,822,900 | 34.134615 | 135 | 0.536946 | false |
openvswitch/ovn-scale-test | rally_ovs/plugins/ovs/ovsclients_impl.py | 1 | 19591 | # Copyright 2016 Ebay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import itertools
import pipes
from io import StringIO
from rally_ovs.plugins.ovs.ovsclients import *
from rally_ovs.plugins.ovs.utils import get_ssh_from_credential
@configure("ssh")
class SshClient(OvsClient):
def create_client(self):
print("********* call OvnNbctl.create_client")
return get_ssh_from_credential(self.credential)
@configure("ovn-nbctl")
class OvnNbctl(OvsClient):
class _OvnNbctl(DdCtlMixin):
def __init__(self, credential):
self.ssh = get_ssh_from_credential(credential)
self.context = {}
self.sandbox = None
self.batch_mode = False
self.cmds = None
self.socket = None
def enable_batch_mode(self, value=True):
self.batch_mode = bool(value)
def set_sandbox(self, sandbox, install_method="sandbox",
host_container=None):
self.sandbox = sandbox
self.install_method = install_method
self.host_container = host_container
def set_daemon_socket(self, socket=None):
self.socket = socket
def run(self, cmd, opts=[], args=[], stdout=sys.stdout,
stderr=sys.stderr, raise_on_error=True):
self.cmds = self.cmds or []
if self.batch_mode:
cmd = itertools.chain([" -- "], opts, [cmd], args)
self.cmds.append(" ".join(cmd))
return
if self.sandbox:
cmd_prefix = []
if self.install_method == "sandbox":
self.cmds.append(". %s/sandbox.rc" % self.sandbox)
elif self.install_method == "docker":
cmd_prefix = ["sudo docker exec ovn-north-database"]
elif self.install_method == "physical":
if self.host_container:
cmd_prefix = ["sudo docker exec " + self.host_container]
else:
cmd_prefix = ["sudo"]
if cmd == "exit":
cmd_prefix.append(" ovs-appctl -t ")
if self.socket:
ovn_cmd = "ovn-nbctl -u " + self.socket
else:
ovn_cmd = "ovn-nbctl"
cmd = itertools.chain(cmd_prefix, [ovn_cmd], opts, [cmd], args)
self.cmds.append(" ".join(cmd))
self.ssh.run("\n".join(self.cmds),
stdout=stdout, stderr=stderr, raise_on_error=raise_on_error)
self.cmds = None
def flush(self):
if self.cmds == None or len(self.cmds) == 0:
return
run_cmds = []
if self.sandbox:
if self.install_method == "sandbox":
run_cmds.append(". %s/sandbox.rc" % self.sandbox)
run_cmds.append("ovn-nbctl" + " ".join(self.cmds))
elif self.install_method == "docker":
run_cmds.append("sudo docker exec ovn-north-database ovn-nbctl " + " ".join(self.cmds))
elif self.install_method == "physical":
if self.host_container:
cmd_prefix = "sudo docker exec " + self.host_container + " ovn-nbctl"
else:
cmd_prefix = "sudo ovn-nbctl"
run_cmds.append(cmd_prefix + " ".join(self.cmds))
self.ssh.run("\n".join(run_cmds),
stdout=sys.stdout, stderr=sys.stderr)
self.cmds = None
def db_set(self, table, record, *col_values):
args = [table, record]
args += set_colval_args(*col_values)
self.run("set", args=args)
def lrouter_port_add(self, lrouter, name, mac=None, ip_addr=None):
params =[lrouter, name, mac, ip_addr]
self.run("lrp-add", args=params)
return {"name":name}
def lrouter_add(self, name):
params = [name]
self.run("lr-add", args=params)
return {"name":name}
def lswitch_add(self, name, other_cfg={}):
params = [name]
self.run("ls-add", args=params)
for cfg, val in other_cfg.items():
param_cfg = 'other_config:{c}="{v}"'.format(c=cfg, v=val)
params = ['Logical_Switch', name, param_cfg]
self.run("set", args=params)
return {"name":name}
def lswitch_del(self, name):
params = [name]
self.run("ls-del", args=params)
def lswitch_list(self):
stdout = StringIO()
self.run("ls-list", stdout=stdout)
output = stdout.getvalue()
return parse_lswitch_list(output)
def lrouter_list(self):
stdout = StringIO()
self.run("lr-list", stdout=stdout)
output = stdout.getvalue()
return parse_lswitch_list(output)
def lrouter_del(self, name):
params = [name]
self.run("lr-del", args=params)
def lswitch_port_add(self, lswitch, name, mac='', ip=''):
params =[lswitch, name]
self.run("lsp-add", args=params)
return {"name":name, "mac":mac, "ip":ip}
def lport_list(self, lswitch):
params =[lswitch]
self.run("lsp-list", args=params)
def lport_del(self, name):
params = [name]
self.run("lsp-del", args=params)
'''
param address: [mac], [mac,ip], [mac,ip1,ip2] ...
'''
def lport_set_addresses(self, name, *addresses):
params = [name]
for i in addresses:
i = filter(lambda x: x, i)
i = "\ ".join(i)
if i:
params += [i]
self.run("lsp-set-addresses", args=params)
def lport_set_port_security(self, name, *addresses):
params = [name]
params += addresses
self.run("lsp-set-port-security", args=params)
def lport_set_type(self, name, type):
params = [name, type]
self.run("lsp-set-type", args=params)
def lport_set_options(self, name, *options):
params = [name]
params += options
self.run("lsp-set-options", args=params)
def acl_add(self, lswitch, direction, priority, match, action,
log=False):
opts = ["--log"] if log else []
match = pipes.quote(match)
params = [lswitch, direction, str(priority), match, action]
self.run("acl-add", opts, params)
def acl_list(self, lswitch):
params = [lswitch]
self.run("acl-list", args=params)
def acl_del(self, lswitch, direction=None,
priority=None, match=None):
params = [lswitch]
if direction:
params.append(direction)
if priority:
params.append(priority)
if match:
params.append(match)
self.run("acl-del", args=params)
def show(self, lswitch=None):
params = [lswitch] if lswitch else []
stdout = StringIO()
self.run("show", args=params, stdout=stdout)
output = stdout.getvalue()
return get_lswitch_info(output)
def sync(self, wait='hv'):
# sync command should always be flushed
opts = ["--wait=%s" % wait]
batch_mode = self.batch_mode
if batch_mode:
self.flush()
self.batch_mode = False
self.run("sync", opts)
self.batch_mode = batch_mode
def start_daemon(self):
stdout = StringIO()
opts = ["--detach", "--pidfile", "--log-file"]
self.run("", opts=opts, stdout=stdout, raise_on_error=False)
return stdout.getvalue().rstrip()
def stop_daemon(self):
self.run("exit", raise_on_error=False)
self.socket = None
def create_client(self):
print("********* call OvnNbctl.create_client")
client = self._OvnNbctl(self.credential)
return client
@configure("ovn-sbctl")
class OvnSbctl(OvsClient):
class _OvnSbctl(DdCtlMixin):
def __init__(self, credential):
self.ssh = get_ssh_from_credential(credential)
self.context = {}
self.sandbox = None
self.batch_mode = False
self.cmds = None
def enable_batch_mode(self, value=True):
self.batch_mode = bool(value)
def set_sandbox(self, sandbox, install_method="sandbox",
host_container=None):
self.sandbox = sandbox
self.install_method = install_method
self.host_container = host_container
def run(self, cmd, opts=[], args=[], stdout=sys.stdout, stderr=sys.stderr):
self.cmds = self.cmds or []
if self.batch_mode:
cmd = itertools.chain([" -- "], opts, [cmd], args)
self.cmds.append(" ".join(cmd))
return
if self.sandbox:
cmd_prefix = []
if self.install_method == "sandbox":
self.cmds.append(". %s/sandbox.rc" % self.sandbox)
elif self.install_method == "docker":
cmd_prefix = ["sudo docker exec ovn-north-database"]
elif self.install_method == "physical":
if self.host_container:
cmd_prefix = ["sudo docker exec " + self.host_container]
else:
cmd_prefix = ["sudo"]
cmd = itertools.chain(cmd_prefix, ["ovn-sbctl"], opts, [cmd], args)
self.cmds.append(" ".join(cmd))
self.ssh.run("\n".join(self.cmds),
stdout=stdout, stderr=stderr)
self.cmds = None
def flush(self):
if self.cmds == None or len(self.cmds) == 0:
return
run_cmds = []
if self.sandbox:
if self.install_method == "sandbox":
run_cmds.append(". %s/sandbox.rc" % self.sandbox)
run_cmds.append("ovn-sbctl" + " ".join(self.cmds))
elif self.install_method == "docker":
run_cmds.append("sudo docker exec ovn-north-database ovn-sbctl " + " ".join(self.cmds))
elif self.install_method == "physical":
if self.host_container:
run_cmds.append("sudo docker exec " + self.host_container + " ovn-sbctl" + " ".join(self.cmds))
else:
run_cmds.append("sudo ovn-sbctl" + " ".join(self.cmds))
self.ssh.run("\n".join(run_cmds),
stdout=sys.stdout, stderr=sys.stderr)
self.cmds = None
def db_set(self, table, record, *col_values):
args = [table, record]
args += set_colval_args(*col_values)
self.run("set", args=args)
def count_igmp_flows(self, lswitch, network_prefix='239'):
stdout = StringIO()
self.ssh.run(
"ovn-sbctl list datapath_binding | grep {sw} -B 1 | "
"grep uuid | cut -f 2 -d ':'".format(sw=lswitch),
stdout=stdout)
uuid = stdout.getvalue().rstrip()
stdout = StringIO()
self.ssh.run(
"ovn-sbctl list logical_flow | grep 'dst == {nw}' -B 1 | "
"grep {uuid} -B 1 | wc -l".format(
uuid=uuid, nw=network_prefix),
stdout=stdout
)
return int(stdout.getvalue())
def sync(self, wait='hv'):
# sync command should always be flushed
opts = ["--wait=%s" % wait]
batch_mode = self.batch_mode
if batch_mode:
self.flush()
self.batch_mode = False
self.run("sync", opts)
self.batch_mode = batch_mode
def chassis_bound(self, chassis_name):
batch_mode = self.batch_mode
if batch_mode:
self.flush()
self.batch_mode = False
stdout = StringIO()
self.run("find chassis", ["--bare", "--columns _uuid"],
["name={}".format(chassis_name)],
stdout=stdout)
self.batch_mode = batch_mode
return len(stdout.getvalue().splitlines()) == 1
def create_client(self):
print("********* call OvnSbctl.create_client")
client = self._OvnSbctl(self.credential)
return client
@configure("ovs-ssh")
class OvsSsh(OvsClient):
class _OvsSsh(object):
def __init__(self, credential):
self.ssh = get_ssh_from_credential(credential)
self.batch_mode = False
self.cmds = None
def enable_batch_mode(self, value=True):
self.batch_mode = bool(value)
def set_sandbox(self, sandbox, install_method="sandbox",
host_container=None):
self.sandbox = sandbox
self.install_method = install_method
self.host_container = host_container
def run(self, cmd):
self.cmds = self.cmds or []
if self.host_container:
self.cmds.append('sudo docker exec ' + self.host_container + ' ' + cmd)
else:
self.cmds.append(cmd)
if self.batch_mode:
return
self.flush()
def run_immediate(self, cmd, stdout=sys.stdout, stderr=sys.stderr):
self.ssh.run(cmd, stdout)
def flush(self):
if self.cmds == None:
return
self.ssh.run("\n".join(self.cmds),
stdout=sys.stdout, stderr=sys.stderr)
self.cmds = None
def create_client(self):
print("********* call OvsSsh.create_client")
client = self._OvsSsh(self.credential)
return client
@configure("ovs-vsctl")
class OvsVsctl(OvsClient):
class _OvsVsctl(object):
def __init__(self, credential):
self.ssh = get_ssh_from_credential(credential)
self.context = {}
self.batch_mode = False
self.sandbox = None
self.cmds = None
def enable_batch_mode(self, value=True):
self.batch_mode = bool(value)
def set_sandbox(self, sandbox, install_method="sandbox",
host_container=None):
self.sandbox = sandbox
self.install_method = install_method
self.host_container = host_container
def run(self, cmd, opts=[], args=[], extras=[], stdout=sys.stdout, stderr=sys.stderr):
self.cmds = self.cmds or []
# TODO: tested with non batch_mode only for docker
if self.install_method == "docker":
self.batch_mode = False
if self.sandbox and self.batch_mode == False:
if self.install_method == "sandbox":
self.cmds.append(". %s/sandbox.rc" % self.sandbox)
elif self.install_method == "docker":
self.cmds.append("sudo docker exec %s ovs-vsctl " % \
self.sandbox + cmd + \
" " + " ".join(args) + \
" " + " ".join(extras))
if self.install_method != "docker":
if self.host_container:
cmd_prefix = ["sudo docker exec " + self.host_container + " ovs-vsctl"]
else:
cmd_prefix = ["ovs-vsctl"]
cmd = itertools.chain(cmd_prefix, opts, [cmd], args, extras)
self.cmds.append(" ".join(cmd))
if self.batch_mode:
return
self.ssh.run("\n".join(self.cmds), stdout=stdout, stderr=stderr)
self.cmds = None
def flush(self):
if self.cmds == None:
return
if self.sandbox:
if self.install_method == "sandbox":
self.cmds.insert(0, ". %s/sandbox.rc" % self.sandbox)
self.ssh.run("\n".join(self.cmds),
stdout=sys.stdout, stderr=sys.stderr)
self.cmds = None
def add_port(self, bridge, port, may_exist=True, internal=False):
opts = ['--may-exist'] if may_exist else []
extras = ['--', 'set interface {} type=internal'.format(port)] if internal else []
self.run('add-port', opts, [bridge, port], extras)
def del_port(self, port):
self.run('del-port', args=[port])
def db_set(self, table, record, *col_values):
args = [table, record]
args += set_colval_args(*col_values)
self.run("set", args=args)
def create_client(self):
print("********* call OvsVsctl.create_client")
client = self._OvsVsctl(self.credential)
return client
@configure("ovs-ofctl")
class OvsOfctl(OvsClient):
class _OvsOfctl(object):
def __init__(self, credential):
self.ssh = get_ssh_from_credential(credential)
self.context = {}
self.sandbox = None
def set_sandbox(self, sandbox, install_method="sandbox",
host_container=None):
self.sandbox = sandbox
self.install_method = install_method
self.host_container = host_container
def run(self, cmd, opts=[], args=[], stdout=sys.stdout, stderr=sys.stderr):
# TODO: add support for docker
cmds = []
if self.sandbox:
if self.install_method == "sandbox":
cmds.append(". %s/sandbox.rc" % self.sandbox)
if self.install_method == "physical" and self.host_container:
cmd_prefix = ["sudo docker exec " + self.host_container + " ovs-ofctl"]
else:
cmd_prefix = ["ovs-ofctl"]
cmd = itertools.chain(cmd_prefix, opts, [cmd], args)
cmds.append(" ".join(cmd))
self.ssh.run("\n".join(cmds),
stdout=stdout, stderr=stderr)
def dump_flows(self, bridge):
stdout = StringIO()
opts = []
self.run("dump-flows", opts, [bridge], stdout=stdout)
oflow_data = stdout.getvalue().strip()
oflow_data = oflow_data.split('\n')
return len(oflow_data)
def create_client(self):
print("********* call OvsOfctl.create_client")
client = self._OvsOfctl(self.credential)
return client
| apache-2.0 | -7,918,983,159,370,553,000 | 32.835924 | 119 | 0.509418 | false |
IsmoilovMuhriddin/allgo | rasp/diagnosis.py | 1 | 3928 | import signal
import time
import wiringpi as wp
from rasp.allgo_utils import PCA9685
from rasp.allgo_utils import ultrasonic as uls
LOW = 0
HIGH = 1
OUTPUT = wp.OUTPUT
INPUT = wp.INPUT
CAR_DIR_FW = 0
CAR_DIR_BK = 1
CAR_DIR_LF = 2
CAR_DIR_RF = 3
CAR_DIR_ST = 4
DIR_DISTANCE_ALERT = 20
preMillis = 0
keepRunning = 1
OUT = [5, 0, 1, 2, 3] # 5:front_left_led, 0:front_right_led, 1:rear_right_led, 2:rear_left_led, 3:ultra_trig
IN = [21, 22, 26, 23] # 21:left_IR, 22:center_IR, 26:right_IR, 23:ultra_echo
ULTRASONIC_TRIG = 3 # TRIG port is to use as output signal
ULTRASONIC_ECHO = 23 # ECHO port is to use as input signal
# An instance of the motor & buzzer
pca9685 = PCA9685()
#Ultrasonic ultra; # An instance of the ultrasonic sensor
ultra = uls(ULTRASONIC_TRIG,ULTRASONIC_ECHO)
# distance range: 2cm ~ 5m
# angular range: 15deg
# resolution: 3mm
"""
void setup();
void loop();
void checkUltra();
void intHandler(int dummy);
"""
def setup():
wp.wiringPiSetup() # Initialize wiringPi to load Raspbarry Pi PIN numbering scheme
"""
for(i=0; i<sizeof(OUT); i++){
pinMode(OUT[i], OUTPUT); // Set the pin as output mode
wp.digitalWrite(OUT[i], LOW); // Transmit HIGH or LOW value to the pin(5V ~ 0V)
}"""
for i in range(len(OUT)):
wp.pinMode(OUT[i],OUTPUT)
wp.digitalWrite(OUT[i], LOW)
for i in range(len(IN)):
wp.pinMode(IN[i],INPUT)
def check_ultra():
disValue=ultra.distance()
print("Distance:%.2f\t"%disValue)
def action(menu):
global curMillis
if menu==0:
pca9685.go_forward();
time.sleep(20);
pca9685.stop();
elif menu== 1:
pca9685.go_back();
time.sleep(20);
pca9685.stop();
elif menu== 2:
# frount left
wp.digitalWrite(OUT[0], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[0], LOW);
time.sleep(20);
wp.digitalWrite(OUT[0], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[0], LOW);
elif menu== 3:
#// frount right
wp.digitalWrite(OUT[1], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[1], LOW);
time.sleep(20);
wp.digitalWrite(OUT[1], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[1], LOW);
elif menu== 4:
#// rear left
wp.digitalWrite(OUT[3], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[3], LOW);
time.sleep(20);
wp.digitalWrite(OUT[3], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[3], LOW);
elif menu== 5:
# rear right
wp.digitalWrite(OUT[2], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[2], LOW);
time.sleep(20);
wp.digitalWrite(OUT[2], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[2], LOW);
elif menu ==6:
#ultrasonic
check_ultra();
elif menu== 9:
pca9685.go_right();
time.sleep(5);
pca9685.stop();
elif menu== 10:
pca9685.go_left();
time.sleep(5);
pca9685.stop();
elif menu== 8:
print("Beeping for 2 seconds\n");
pca9685.on_buzz();
time.sleep(2);
pca9685.off_buzz();
elif menu== 11:
print("EXIT\n");
keepRunning = 0;
else:
print("Check the list again\n")
print("\n")
menu = -1
def loop():
"""// return the cu
time(el
time since your arduino started) in milliseconds(1/1000 second)"""
llinevalue = 0
clinevalue = 0
rlinevalue = 0
print 'This is a diagnostic program for your mobile robot.\n'
print '0: go foward\n1: go backward\n2: front left led\n3: frount right led\n4: rear left led\n5: rear right led\n6: ultrasonic\n7: IR\n8: buzzer\n9:go right\n10: go left\n11: Exit the program\n'
print('Please select one of them: ')
menu = int(input())
action(menu)
menu = -1
"""// obstacle detection and move to another derection.
void checkUltra(){
float disValue = ultra.ReadDista
timeter();
printf("ultrasonic: %f\n",disValue);
"""
def signal_handler(dummy):
print("SIGNAL INTERRUPT",dummy)
time.sleep(1000)
keepRunning = 0;
#sda
def main (**kwargs):
setup()
signal.signal(signal.SIGINT, signal_handler)
while keepRunning:
loop()
return 0
main()
| mit | -4,003,689,042,530,230,300 | 20.944134 | 196 | 0.647912 | false |
ppppn/twitter-bot | ReplyAndFav.py | 1 | 6678 | #!/usr/bin/python
# coding: UTF-8
from tweepy.error import TweepError
import random
import re
from const import *
from words import *
from replies import replies
import datetime
import logging
from API import GetAPI
logging.basicConfig(level=LOGLEVEL)
api = None
# 説明
# 関数リスト
# FUNCTION_NAME(args) > Returns(SUCCESS, FAILED)
# FetchHomeTL() > (TIMELINE, False)
# FormattingAndTweetForReply(status, content) > (True, False)
# CheckAndReplyToSpecialWord(account_screen_name, status) > (True, False)
# CheckAndReplyToNormalTweet(status) > (True, False)
# CheckAndCreateFav(status) > (True, False)
def UpdateAndNotifyAccountInfo():
__name__ = "UpdateAndNotifyAccountInfo()"
global api
account = api.me()
try:
if not account.name == BOT_NAME:
api.update_status(UPDATE_MSG)
api.update_profile(name=BOT_NAME)
logging.info("%s: Successfully finished.", __name__)
except TweepError, e:
logging.error("%s: %s", __name__, e.reason)
def FetchHomeTL():
__name__ = "FetchHomeTL()"
global api
since_id = api.user_timeline()[0].id
logging.debug("%s: Last post id: %d", __name__, since_id)
try:
return api.home_timeline(since_id=since_id)
except TweepError, e:
logging.error("%s: %s", __name__, e.reason)
return False
def FormattingAndTweetForReply(status, content):
__name__ = "FormattingAndTweetForReply()"
global api
#ツイートを最終的に投稿される形にフォーマットし、投稿する
error_counter = 0
#{name}を相手の名前で置き換える
content = content.format(name = status.author.name)
#@hogehogeをつける
formatted_tweet = "@" + status.author.screen_name + " " + content
#投稿する
while error_counter < ERROR_LIMIT:
try:
api.update_status(formatted_tweet, in_reply_to_status_id = int(status.id))
logging.debug("%s: The following tweet was successfully posted> '%s'",
__name__, formatted_tweet)
return True
except TweepError, e:
logging.error(e.reason)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
def CheckAndReplyToSpecialWord(account_screen_name, status):
__name__ = "CheckAndReplyToSpecialWord()"
global api
error_counter = 0
#ぼっと宛のメンションに限定
if status.in_reply_to_screen_name == account_screen_name:
for special_word in special_words:
if re.search(special_word, status.text):
logging.debug("%s: The special word '%s' was detected in %s's post '%s'",
__name__, special_word, status.author.screen_name, status.text)
num_max_patterns = len(special_words[special_word]) - 1
while error_counter < ERROR_LIMIT:
random.seed()
selected_num = random.randint(0, num_max_patterns)
content = special_words[special_word][selected_num]
#重複投稿によるエラー防止のため時刻を追記
content += " (%s)"%str(datetime.datetime.today())
logging.debug("%s: Special word reply was generated> '%s'", __name__, content)
if FormattingAndTweetForReply(status, content):
return True
else:
logging.error("%s: Reselect", __name__)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
logging.debug("%s: No special word was founded in %s's post '%s'",
__name__, status.author.screen_name, status.text)
return False
else:
return False
def CheckAndReplyToNormalTweet(status):
__name__ = "CheckAndReplyToNormalTweet()"
global api
error_counter = 0
num_max_tw = len(replies) - 1
for word in reply_words:
if re.search(word, status.text):
logging.debug("%s: The reply word '%s' was detected in %s's post '%s'",
__name__, word, status.author.screen_name, status.text)
while error_counter < ERROR_LIMIT:
random.seed()
tw_num = random.randint(0, num_max_tw)
content = replies[tw_num].format(name=status.author.name)
logging.debug("%s: Normal word reply selected> '%s'", __name__, content)
if FormattingAndTweetForReply(status, content):
return True
else:
logging.error("%s: Reselect", __name__)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
def CheckAndCreateFav(status):
__name__ = "CheckAndCreateFav()"
global api
if status.favorited == False:
error_counter = 0
for fav_word in fav_words:
if re.search(fav_word, status.text):
logging.debug("%s: Favorite word '%s' was detected in %s's post '%s'",
__name__, fav_word, status.author.screen_name, status.text)
while error_counter < ERROR_LIMIT:
try:
api.create_favorite(status.id)
logging.debug("%s: Successfully favorited %s's post> '%s'",
__name__, status.author.screen_name, status.text)
return True
except TweepError, e:
logging.error(e.reason)
error_counter += 1
logging.error("%s: Failed to create fav %d times. Aborted.",
__name__, ERROR_LIMIT)
return False
def main():
global api
api = GetAPI()
UpdateAndNotifyAccountInfo()
account_screen_name = api.me().screen_name
tw_counter = 0
fav_counter = 0
result = False
Timeline = FetchHomeTL()
contains_excluded_word = False
if Timeline == False:
logging.critical("Failed to fetch home timeline. All processes are aborted.")
else:
for status in Timeline:
contains_excluded_word = False
if status.author.screen_name == account_screen_name:
pass
#ぼっとがツイートしたものは対象外
else:
#excluded_wordに登録された単語が含まれている場合、処理しない
for excluded_word in excluded_words:
if re.search(excluded_word, status.text):
contains_excluded_word = True
if contains_excluded_word == False:
result = CheckAndReplyToSpecialWord(account_screen_name, status)
if result == False:
result = CheckAndReplyToNormalTweet(status)
if result == True:
tw_counter += 1
result = CheckAndCreateFav(status)
if result == True:
fav_counter += 1
logging.info("Reply: %d, Fav: %d", tw_counter, fav_counter)
if __name__ == "__main__":
main()
| mit | -1,916,938,151,522,885,000 | 32.989418 | 89 | 0.622821 | false |
futureshocked/RaspberryPi-FullStack | Complete_Python2_app/lab_app.py | 1 | 6719 | from flask import Flask, request, render_template
import time
import datetime
import arrow
app = Flask(__name__)
app.debug = True # Make this False if you are no longer debugging
@app.route("/")
def hello():
return "Hello World!"
@app.route("/lab_temp")
def lab_temp():
import sys
import Adafruit_DHT
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, 4)
if humidity is not None and temperature is not None:
return render_template("lab_temp.html",temp=temperature,hum=humidity)
else:
return render_template("no_sensor.html")
@app.route("/lab_env_db", methods=['GET']) #Add date limits in the URL #Arguments: from=2015-03-04&to=2015-03-05
def lab_env_db():
temperatures, humidities, timezone, from_date_str, to_date_str = get_records()
# Create new record tables so that datetimes are adjusted back to the user browser's time zone.
time_adjusted_temperatures = []
time_adjusted_humidities = []
for record in temperatures:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_adjusted_temperatures.append([local_timedate.format('YYYY-MM-DD HH:mm'), round(record[2],2)])
for record in humidities:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_adjusted_humidities.append([local_timedate.format('YYYY-MM-DD HH:mm'), round(record[2],2)])
print "rendering lab_env_db.html with: %s, %s, %s" % (timezone, from_date_str, to_date_str)
return render_template("lab_env_db.html", timezone = timezone,
temp = time_adjusted_temperatures,
hum = time_adjusted_humidities,
from_date = from_date_str,
to_date = to_date_str,
temp_items = len(temperatures),
hum_items = len(humidities),
query_string = request.query_string, #This query string is used by the Plotly link
)
def get_records():
import sqlite3
from_date_str = request.args.get('from',time.strftime("%Y-%m-%d 00:00")) #Get the from date value from the URL
to_date_str = request.args.get('to',time.strftime("%Y-%m-%d %H:%M")) #Get the to date value from the URL
timezone = request.args.get('timezone','Etc/UTC');
range_h_form = request.args.get('range_h',''); #This will return a string, if field range_h exists in the request
range_h_int = "nan" #initialise this variable with not a number
print "REQUEST:"
print request.args
try:
range_h_int = int(range_h_form)
except:
print "range_h_form not a number"
print "Received from browser: %s, %s, %s, %s" % (from_date_str, to_date_str, timezone, range_h_int)
if not validate_date(from_date_str): # Validate date before sending it to the DB
from_date_str = time.strftime("%Y-%m-%d 00:00")
if not validate_date(to_date_str):
to_date_str = time.strftime("%Y-%m-%d %H:%M") # Validate date before sending it to the DB
print '2. From: %s, to: %s, timezone: %s' % (from_date_str,to_date_str,timezone)
# Create datetime object so that we can convert to UTC from the browser's local time
from_date_obj = datetime.datetime.strptime(from_date_str,'%Y-%m-%d %H:%M')
to_date_obj = datetime.datetime.strptime(to_date_str,'%Y-%m-%d %H:%M')
# If range_h is defined, we don't need the from and to times
if isinstance(range_h_int,int):
arrow_time_from = arrow.utcnow().replace(hours=-range_h_int)
arrow_time_to = arrow.utcnow()
from_date_utc = arrow_time_from.strftime("%Y-%m-%d %H:%M")
to_date_utc = arrow_time_to.strftime("%Y-%m-%d %H:%M")
from_date_str = arrow_time_from.to(timezone).strftime("%Y-%m-%d %H:%M")
to_date_str = arrow_time_to.to(timezone).strftime("%Y-%m-%d %H:%M")
else:
#Convert datetimes to UTC so we can retrieve the appropriate records from the database
from_date_utc = arrow.get(from_date_obj, timezone).to('Etc/UTC').strftime("%Y-%m-%d %H:%M")
to_date_utc = arrow.get(to_date_obj, timezone).to('Etc/UTC').strftime("%Y-%m-%d %H:%M")
conn = sqlite3.connect('/var/www/lab_app/lab_app.db')
curs = conn.cursor()
curs.execute("SELECT * FROM temperatures WHERE rDateTime BETWEEN ? AND ?", (from_date_utc.format('YYYY-MM-DD HH:mm'), to_date_utc.format('YYYY-MM-DD HH:mm')))
temperatures = curs.fetchall()
curs.execute("SELECT * FROM humidities WHERE rDateTime BETWEEN ? AND ?", (from_date_utc.format('YYYY-MM-DD HH:mm'), to_date_utc.format('YYYY-MM-DD HH:mm')))
humidities = curs.fetchall()
conn.close()
return [temperatures, humidities, timezone, from_date_str, to_date_str]
@app.route("/to_plotly", methods=['GET']) #This method will send the data to ploty.
def to_plotly():
import plotly.plotly as py
from plotly.graph_objs import *
temperatures, humidities, timezone, from_date_str, to_date_str = get_records()
# Create new record tables so that datetimes are adjusted back to the user browser's time zone.
time_series_adjusted_tempreratures = []
time_series_adjusted_humidities = []
time_series_temprerature_values = []
time_series_humidity_values = []
for record in temperatures:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_series_adjusted_tempreratures.append(local_timedate.format('YYYY-MM-DD HH:mm'))
time_series_temprerature_values.append(round(record[2],2))
for record in humidities:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_series_adjusted_humidities.append(local_timedate.format('YYYY-MM-DD HH:mm')) #Best to pass datetime in text
#so that Plotly respects it
time_series_humidity_values.append(round(record[2],2))
temp = Scatter(
x=time_series_adjusted_tempreratures,
y=time_series_temprerature_values,
name='Temperature'
)
hum = Scatter(
x=time_series_adjusted_humidities,
y=time_series_humidity_values,
name='Humidity',
yaxis='y2'
)
data = Data([temp, hum])
layout = Layout(
title="Temperature and humidity in Peter's lab",
xaxis=XAxis(
type='date',
autorange=True
),
yaxis=YAxis(
title='Celcius',
type='linear',
autorange=True
),
yaxis2=YAxis(
title='Percent',
type='linear',
autorange=True,
overlaying='y',
side='right'
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='lab_temp_hum')
return plot_url
def validate_date(d):
try:
datetime.datetime.strptime(d, '%Y-%m-%d %H:%M')
return True
except ValueError:
return False
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080) | mit | 111,837,944,110,198,480 | 37.843931 | 159 | 0.651883 | false |
michaelhkw/incubator-impala | tests/comparison/query_profile.py | 1 | 30490 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from logging import getLogger
from random import choice, randint, random, shuffle
from tests.comparison.db_types import (
Boolean,
Char,
Decimal,
Float,
Int,
TYPES,
Timestamp)
from tests.comparison.query import (
InsertClause,
InsertStatement,
Query,
StatementExecutionMode,
ValuesClause)
from tests.comparison.funcs import (
AnalyticAvg,
AnalyticCount,
AnalyticFirstValue,
AnalyticLag,
AnalyticLastValue,
AnalyticLead,
AnalyticMax,
AnalyticMin,
AnalyticSum,
And,
Coalesce,
Equals,
GreaterThan,
GreaterThanOrEquals,
If,
In,
IsDistinctFrom,
IsNotDistinctFrom,
IsNotDistinctFromOp,
LessThan,
LessThanOrEquals,
NotEquals,
NotIn,
Or,
WindowBoundary)
from tests.comparison.random_val_generator import RandomValGenerator
UNBOUNDED_PRECEDING = WindowBoundary.UNBOUNDED_PRECEDING
PRECEDING = WindowBoundary.PRECEDING
CURRENT_ROW = WindowBoundary.CURRENT_ROW
FOLLOWING = WindowBoundary.FOLLOWING
UNBOUNDED_FOLLOWING = WindowBoundary.UNBOUNDED_FOLLOWING
LOG = getLogger()
class DefaultProfile(object):
def __init__(self):
# Bounds are (min, max) values, the actual value used will be selected from the
# bounds and each value within the range has an equal probability of being selected.
self._bounds = {
'MAX_NESTED_QUERY_COUNT': (0, 2),
'MAX_NESTED_EXPR_COUNT': (0, 2),
'SELECT_ITEM_COUNT': (1, 5),
'WITH_TABLE_COUNT': (1, 3),
'TABLE_COUNT': (1, 2),
'ANALYTIC_LEAD_LAG_OFFSET': (1, 100),
'ANALYTIC_WINDOW_OFFSET': (1, 100),
'INSERT_VALUES_ROWS': (1, 10)}
# Below are interdependent weights used to determine probabilities. The probability
# of any item being selected should be (item weight) / sum(weights). A weight of
# zero means the item will never be selected.
self._weights = {
'SELECT_ITEM_CATEGORY': {
'AGG': 3,
'ANALYTIC': 1,
'BASIC': 10},
'TYPES': {
Boolean: 1,
Char: 1,
Decimal: 1,
Float: 1,
Int: 10,
Timestamp: 1},
'RELATIONAL_FUNCS': {
# The weights below are "best effort" suggestions. Because QueryGenerator
# prefers to set column types first, and some functions are "supported" only
# by some types, it means functions can be pruned off from this dictionary,
# and that will shift the probabilities. A quick example if that if a Char
# column is chosen: LessThan may not have a pre-defined signature for Char
# comparison, so LessThan shouldn't be chosen with Char columns. The
# tendency to prune will shift as the "funcs" module is adjusted to
# add/remove signatures.
And: 2,
Coalesce: 2,
Equals: 40,
GreaterThan: 2,
GreaterThanOrEquals: 2,
In: 2,
If: 2,
IsDistinctFrom: 2,
IsNotDistinctFrom: 1,
IsNotDistinctFromOp: 1,
LessThan: 2,
LessThanOrEquals: 2,
NotEquals: 2,
NotIn: 2,
Or: 2},
'CONJUNCT_DISJUNCTS': {
# And and Or appear both under RELATIONAL_FUNCS and CONJUNCT_DISJUNCTS for the
# following reasons:
# 1. And and Or are considered "relational" by virtue of taking two arguments
# and returning a Boolean. The crude signature selection means they could be
# selected, so we describe weights there.
# 2. They are set here explicitly as well so that
# QueryGenerator._create_bool_func_tree() can create a "more realistic"
# expression that has a Boolean operator at the top of the tree by explicitly
# asking for an And or Or.
# IMPALA-3896 tracks a better way to do this.
And: 5,
Or: 1},
'ANALYTIC_WINDOW': {
('ROWS', UNBOUNDED_PRECEDING, None): 1,
('ROWS', UNBOUNDED_PRECEDING, PRECEDING): 2,
('ROWS', UNBOUNDED_PRECEDING, CURRENT_ROW): 1,
('ROWS', UNBOUNDED_PRECEDING, FOLLOWING): 2,
('ROWS', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING): 2,
('ROWS', PRECEDING, None): 1,
('ROWS', PRECEDING, PRECEDING): 2,
('ROWS', PRECEDING, CURRENT_ROW): 1,
('ROWS', PRECEDING, FOLLOWING): 2,
('ROWS', PRECEDING, UNBOUNDED_FOLLOWING): 2,
('ROWS', CURRENT_ROW, None): 1,
('ROWS', CURRENT_ROW, CURRENT_ROW): 1,
('ROWS', CURRENT_ROW, FOLLOWING): 2,
('ROWS', CURRENT_ROW, UNBOUNDED_FOLLOWING): 2,
('ROWS', FOLLOWING, FOLLOWING): 2,
('ROWS', FOLLOWING, UNBOUNDED_FOLLOWING): 2,
# Ranges not yet supported
('RANGE', UNBOUNDED_PRECEDING, None): 0,
('RANGE', UNBOUNDED_PRECEDING, PRECEDING): 0,
('RANGE', UNBOUNDED_PRECEDING, CURRENT_ROW): 0,
('RANGE', UNBOUNDED_PRECEDING, FOLLOWING): 0,
('RANGE', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING): 0,
('RANGE', PRECEDING, None): 0,
('RANGE', PRECEDING, PRECEDING): 0,
('RANGE', PRECEDING, CURRENT_ROW): 0,
('RANGE', PRECEDING, FOLLOWING): 0,
('RANGE', PRECEDING, UNBOUNDED_FOLLOWING): 0,
('RANGE', CURRENT_ROW, None): 0,
('RANGE', CURRENT_ROW, CURRENT_ROW): 0,
('RANGE', CURRENT_ROW, FOLLOWING): 0,
('RANGE', CURRENT_ROW, UNBOUNDED_FOLLOWING): 0,
('RANGE', FOLLOWING, FOLLOWING): 0,
('RANGE', FOLLOWING, UNBOUNDED_FOLLOWING): 0},
'JOIN': {
'INNER': 90,
'LEFT': 30,
'RIGHT': 10,
'FULL_OUTER': 3,
'CROSS': 1},
'SUBQUERY_PREDICATE': {
('Exists', 'AGG', 'CORRELATED'): 0, # Not supported
('Exists', 'AGG', 'UNCORRELATED'): 1,
('Exists', 'NON_AGG', 'CORRELATED'): 1,
('Exists', 'NON_AGG', 'UNCORRELATED'): 1,
('NotExists', 'AGG', 'CORRELATED'): 0, # Not supported
('NotExists', 'AGG', 'UNCORRELATED'): 0, # Not supported
('NotExists', 'NON_AGG', 'CORRELATED'): 1,
('NotExists', 'NON_AGG', 'UNCORRELATED'): 0, # Not supported
('In', 'AGG', 'CORRELATED'): 0, # Not supported
('In', 'AGG', 'UNCORRELATED'): 0, # Not supported
('In', 'NON_AGG', 'CORRELATED'): 1,
('In', 'NON_AGG', 'UNCORRELATED'): 1,
('NotIn', 'AGG', 'CORRELATED'): 0, # Not supported
('NotIn', 'AGG', 'UNCORRELATED'): 1,
('NotIn', 'NON_AGG', 'CORRELATED'): 1,
('NotIn', 'NON_AGG', 'UNCORRELATED'): 1,
('Scalar', 'AGG', 'CORRELATED'): 0, # Not supported
('Scalar', 'AGG', 'UNCORRELATED'): 1,
('Scalar', 'NON_AGG', 'CORRELATED'): 0, # Not supported
('Scalar', 'NON_AGG', 'UNCORRELATED'): 1},
'QUERY_EXECUTION': { # Used by the discrepancy searcher
StatementExecutionMode.CREATE_TABLE_AS: 1,
StatementExecutionMode.CREATE_VIEW_AS: 1,
StatementExecutionMode.SELECT_STATEMENT: 10},
'STATEMENT': {
# TODO: Eventually make this a mix of DML and SELECT (IMPALA-4601)
Query: 1},
'INSERT_SOURCE_CLAUSE': {
Query: 3,
ValuesClause: 1},
'INSERT_COLUMN_LIST': {
'partial': 3,
'none': 1},
'VALUES_ITEM_EXPR': {
'constant': 1,
'function': 2},
'INSERT_UPSERT': {
InsertClause.CONFLICT_ACTION_IGNORE: 1,
InsertClause.CONFLICT_ACTION_UPDATE: 3}}
# On/off switches
self._flags = {
'ANALYTIC_DESIGNS': {
'TOP_LEVEL_QUERY_WITHOUT_LIMIT': True,
'DETERMINISTIC_ORDER_BY': True,
'NO_ORDER_BY': True,
'ONLY_SELECT_ITEM': True,
'UNBOUNDED_WINDOW': True,
'RANK_FUNC': True}}
# Independent probabilities where 1 means 100%. These values may be ignored depending
# on the context. For example, GROUP_BY is almost always ignored and instead
# determined by the SELECT item weights above, since mixing aggregate and
# non-aggregate items requires the use of a GROUP BY. The GROUP_BY option below is
# only applied if all of the SELECT items are non-aggregate.
self._probabilities = {
'OPTIONAL_QUERY_CLAUSES': {
'WITH': 0.1, # MAX_NESTED_QUERY_COUNT bounds take precedence
'FROM': 1,
'WHERE': 0.5,
'GROUP_BY': 0.1, # special case, doesn't really do much, see comment above
'HAVING': 0.25,
'UNION': 0.1,
'ORDER_BY': 0.1},
'OPTIONAL_ANALYTIC_CLAUSES': {
'PARTITION_BY': 0.5,
'ORDER_BY': 0.5,
'WINDOW': 0.5}, # will only be used if ORDER BY is chosen
'MISC': {
'INLINE_VIEW': 0.1, # MAX_NESTED_QUERY_COUNT bounds take precedence
'SELECT_DISTINCT': 0.1,
'SCALAR_SUBQUERY': 0.1,
'ONLY_USE_EQUALITY_JOIN_PREDICATES': 0.8,
'ONLY_USE_AGGREGATES_IN_HAVING_CLAUSE': 0.7,
'UNION_ALL': 0.5}} # Determines use of "ALL" but not "UNION"
self.__type_weights = {}
self.constant_generator = RandomValGenerator()
def _get_config_value(self, start_config, *keys):
value = start_config
for key in keys:
value = value[key]
return value
def weights(self, *keys):
'''Convenience method for getting the values of named weights'''
return self._get_config_value(self._weights, *keys)
def bounds(self, *keys):
'''Convenience method for getting the values of named bounds'''
return self._get_config_value(self._bounds, *keys)
def probability(self, *keys):
'''Convenience method for getting the value of named probabilities'''
return self._get_config_value(self._probabilities, *keys)
def _choose_from_bounds(self, *bounds):
'''Returns a value that is within the given bounds. Each value has an equal chance
of being chosen.
'''
if isinstance(bounds[0], str):
lower, upper = self.bounds(*bounds)
else:
lower, upper = bounds
return randint(lower, upper)
def _choose_from_weights(self, *weight_args):
'''Returns a value that is selected from the keys of weights with the probability
determined by the values of weights.
'''
if isinstance(weight_args[0], str):
weights = self.weights(*weight_args)
else:
weights = weight_args[0]
total_weight = sum(weights.itervalues())
numeric_choice = randint(1, total_weight)
for choice_, weight in weights.iteritems():
if weight <= 0:
continue
if numeric_choice <= weight:
return choice_
numeric_choice -= weight
def _choose_from_filtered_weights(self, filter, *weights):
'''Convenience method, apply the given filter before choosing a value.'''
if isinstance(weights[0], str):
weights = self.weights(*weights)
else:
weights = weights[0]
return self._choose_from_weights(dict((choice_, weight) for choice_, weight
in weights.iteritems() if filter(choice_)))
def _decide_from_probability(self, *keys):
return random() < self.probability(*keys)
def get_max_nested_query_count(self):
'''Return the maximum number of queries the top level query may contain.'''
return self._choose_from_bounds('MAX_NESTED_QUERY_COUNT')
def use_with_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'WITH')
def only_use_equality_join_predicates(self):
return self._decide_from_probability('MISC', 'ONLY_USE_EQUALITY_JOIN_PREDICATES')
def only_use_aggregates_in_having_clause(self):
return self._decide_from_probability('MISC', 'ONLY_USE_AGGREGATES_IN_HAVING_CLAUSE')
def get_with_clause_table_ref_count(self):
'''Return the number of table ref entries a WITH clause should contain.'''
return self._choose_from_bounds('WITH_TABLE_COUNT')
def get_select_item_count(self):
return self._choose_from_bounds('SELECT_ITEM_COUNT')
def choose_nested_expr_count(self):
return self._choose_from_bounds('MAX_NESTED_EXPR_COUNT')
def allowed_analytic_designs(self):
return [design for design, is_enabled in self._flags['ANALYTIC_DESIGNS'].iteritems()
if is_enabled]
def use_partition_by_clause_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'PARTITION_BY')
def use_order_by_clause_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'ORDER_BY')
def use_window_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'WINDOW')
def choose_window_type(self):
return self._choose_from_weights('ANALYTIC_WINDOW')
def get_window_offset(self):
return self._choose_from_bounds('ANALYTIC_WINDOW_OFFSET')
def get_offset_for_analytic_lead_or_lag(self):
return self._choose_from_bounds('ANALYTIC_LEAD_LAG_OFFSET')
def get_table_count(self):
return self._choose_from_bounds('TABLE_COUNT')
def use_inline_view(self):
return self._decide_from_probability('MISC', 'INLINE_VIEW')
def choose_table(self, table_exprs):
return choice(table_exprs)
def choose_join_type(self, join_types):
return self._choose_from_filtered_weights(
lambda join_type: join_type in join_types, 'JOIN')
def choose_join_condition_count(self):
return max(1, self._choose_from_bounds('MAX_NESTED_EXPR_COUNT'))
def use_where_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'WHERE')
def use_scalar_subquery(self):
return self._decide_from_probability('MISC', 'SCALAR_SUBQUERY')
def choose_subquery_predicate_category(self, func_name, allow_correlated):
weights = self.weights('SUBQUERY_PREDICATE')
func_names = set(name for name, _, _ in weights.iterkeys())
if func_name not in func_names:
func_name = 'Scalar'
allow_agg = self.weights('SELECT_ITEM_CATEGORY').get('AGG', 0)
if allow_correlated and self.bounds('TABLE_COUNT')[1] == 0:
allow_correlated = False
weights = dict(((name, use_agg, use_correlated), weight)
for (name, use_agg, use_correlated), weight in weights.iteritems()
if name == func_name and
(allow_agg or use_agg == 'NON_AGG') and
weight)
if weights:
return self._choose_from_weights(weights)
def use_distinct(self):
return self._decide_from_probability('MISC', 'SELECT_DISTINCT')
def use_distinct_in_func(self):
return self._decide_from_probability('MISC', 'SELECT_DISTINCT')
def use_group_by_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'GROUP_BY')
def use_having_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'HAVING')
def use_union_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'UNION')
def use_union_all(self):
return self._decide_from_probability('MISC', 'UNION_ALL')
def get_query_execution(self):
return self._choose_from_weights('QUERY_EXECUTION')
def use_having_without_groupby(self):
return True
def use_nested_with(self):
return True
def use_lateral_join(self):
return False
def use_boolean_expr_for_lateral_join(self):
return False
def get_num_boolean_exprs_for_lateral_join(self):
return False
# Workaround for Hive null ordering differences, and lack of 'NULL FIRST', 'NULL LAST'
# specifications. The ref db will order nulls as specified for ASC sorting to make it
# identifical to Hive. Valid return values are: 'BEFORE', 'AFTER', or 'DEFAULT',
# the latter means no specification needed.
def nulls_order_asc(self):
return 'DEFAULT'
def choose_val_expr(self, val_exprs, types=TYPES):
if not val_exprs:
raise Exception('At least on value is required')
if not types:
raise Exception('At least one type is required')
available_types = set(types) & set(val_exprs.by_type)
if not available_types:
raise Exception('None of the provided values return any of the required types')
val_type = self.choose_type(available_types)
return choice(val_exprs.by_type[val_type])
def choose_constant(self, return_type=None, allow_null=True):
if not return_type:
return_type = self.choose_type()
while True:
val = self.constant_generator.generate_val(return_type)
if val is None and not allow_null:
continue
return return_type(val)
def choose_type(self, types=TYPES):
type_weights = self.weights('TYPES')
weights = dict((type_, type_weights[type_]) for type_ in types)
if not weights:
raise Exception('None of the requested types are enabled')
return self._choose_from_weights(weights)
def choose_conjunct_disjunct_fill_ratio(self):
'''Return the ratio of ANDs and ORs to use in a boolean function tree. For example,
when creating a WHERE condition that consists of 10 nested functions, a ratio of
0.1 means 1 out of the 10 functions in the WHERE clause will be an AND or OR.
'''
return random() * random()
def choose_relational_func_fill_ratio(self):
'''Return the ratio of relational functions to use in a boolean function tree. This
ratio is applied after 'choose_conjunct_disjunct_fill_ratio()'.
'''
return random() * random()
def choose_conjunct_disjunct(self):
return self._choose_from_weights('CONJUNCT_DISJUNCTS')
def choose_relational_func_signature(self, signatures):
'''Return a relational signature chosen from "signatures". A signature is considered
to be relational if it returns a boolean and accepts more than one argument.
'''
if not signatures:
raise Exception('At least one signature is required')
filtered_signatures = filter(
lambda s: s.return_type == Boolean \
and len(s.args) > 1 \
and not any(a.is_subquery for a in s.args),
signatures)
if not filtered_signatures:
raise Exception(
'None of the provided signatures corresponded to a relational function')
func_weights = self.weights('RELATIONAL_FUNCS')
missing_funcs = set(s.func for s in filtered_signatures) - set(func_weights)
if missing_funcs:
raise Exception("Weights are missing for functions: {0}".format(missing_funcs))
return self.choose_func_signature(filtered_signatures,
self.weights('RELATIONAL_FUNCS'))
def choose_func_signature(self, signatures, _func_weights=None):
'''Return a signature chosen from "signatures".'''
if not signatures:
raise Exception('At least one signature is required')
type_weights = self.weights('TYPES')
func_weights = _func_weights
if func_weights:
distinct_funcs_in_signatures = set([s.func for s in signatures])
pruned_func_weights = {f: func_weights[f] for f in distinct_funcs_in_signatures}
func_weights = pruned_func_weights
else:
# First a function will be chosen then a signature. This is done so that the number
# of signatures a function has doesn't influence its likelihood of being chosen.
# Functions will be weighted based on the weight of the types in their arguments.
# The weights will be normalized by the number of arguments in the signature. The
# weight of a function will be the maximum weight out of all of it's signatures.
# If any signature has a type with a weight of zero, the signature will not be used.
#
# Example: type_weights = {Int: 10, Float: 1},
# funcs = [foo(Int), foo(Float), bar(Int, Float)]
#
# max signature length = 2 # from bar(Int, Float)
# weight of foo(Int) = (10 * 2)
# weight of foo(Float) = (1 * 2)
# weight of bar(Int, Float) = ((10 + 1) * 1)
# func_weights = {foo: 20, bar: 11}
#
# Note that this only selects a function, the function signature will be selected
# later. This is done to prevent function with a greater number of signatures from
# being selected more frequently.
func_weights = dict()
# The length of the signature in func_weights
signature_length_by_func = dict()
for signature in signatures:
signature_weight = type_weights[signature.return_type]
signature_length = 1
for arg in signature.args:
if arg.is_subquery:
for subtype in arg.type:
signature_weight *= type_weights[subtype]
signature_length += 1
else:
signature_weight *= type_weights[arg.type]
signature_length += 1
if not signature_weight:
continue
if (signature.func not in func_weights or
signature_weight > func_weights[signature.func]):
func_weights[signature.func] = signature_weight
signature_length_by_func[signature.func] = signature_length
if not func_weights:
raise Exception('All functions disallowed based on signature types')
distinct_signature_lengths = set(signature_length_by_func.values())
for func, weight in func_weights.iteritems():
signature_length = signature_length_by_func[func]
func_weights[func] = reduce(
lambda x, y: x * y,
distinct_signature_lengths - set([signature_length]),
func_weights[func])
func = self._choose_from_weights(func_weights)
# Same idea as above but for the signatures of the selected function.
signature_weights = dict()
signature_lengths = dict()
for idx, signature in enumerate(func.signatures()):
if signature not in signatures:
continue
signature_weight = type_weights[signature.return_type]
signature_length = 1
for arg in signature.args:
if arg.is_subquery:
for subtype in arg.type:
signature_weight *= type_weights[subtype]
signature_length += 1
else:
signature_weight *= type_weights[arg.type]
signature_length += 1
if signature_weight:
signature_weights[idx] = signature_weight
signature_lengths[idx] = signature_length
distinct_signature_lengths = set(signature_lengths.values())
for idx, weight in signature_weights.iteritems():
signature_length = signature_lengths[idx]
signature_weights[idx] = reduce(
lambda x, y: x * y,
distinct_signature_lengths - set([signature_length]),
signature_weights[idx])
idx = self._choose_from_weights(signature_weights)
return func.signatures()[idx]
def allow_func_signature(self, signature):
weights = self.weights('TYPES')
if not weights[signature.return_type]:
return False
for arg in signature.args:
if arg.is_subquery:
if not all(weights[subtype] for subtype in arg.type):
return False
elif not weights[arg.type]:
return False
return True
def get_allowed_join_signatures(self, signatures):
"""
Returns all the function signatures that are allowed inside a JOIN clause. This
method is mutually exclusive with only_use_equality_join_predicates. This results of
this method are ignored if only_use_equality_join_predicates return True.
"""
return signatures
def is_non_equality_join_predicate(self, func):
"""
Returns True if the given func is considered a non-equality join condition.
"""
return func in (GreaterThan, GreaterThanOrEquals, In,
IsNotDistinctFrom, IsNotDistinctFromOp, LessThan,
LessThanOrEquals, NotEquals, NotIn)
def get_analytic_funcs_that_cannot_contain_aggs(self):
"""
Returns a list of analytic functions that should not contain aggregate functions
"""
return None
def choose_statement(self):
return self._choose_from_weights('STATEMENT')
def choose_insert_source_clause(self):
"""
Returns whether we generate an INSERT/UPSERT SELECT or an INSERT/UPSERT VALUES
"""
return self._choose_from_weights('INSERT_SOURCE_CLAUSE')
def choose_insert_column_list(self, table):
"""
Decide whether or not an INSERT/UPSERT will be in the form of:
INSERT/UPSERT INTO table SELECT|VALUES ...
or
INSERT/UPSERT INTO table (col1, col2, ...) SELECT|VALUES ...
If the second form, the column list is shuffled. The column list will always contain
the primary key columns and between 0 and all additional columns.
"""
if 'partial' == self._choose_from_weights('INSERT_COLUMN_LIST'):
columns_to_insert = list(table.primary_keys)
min_additional_insert_cols = 0 if columns_to_insert else 1
remaining_columns = [col for col in table.cols if not col.is_primary_key]
shuffle(remaining_columns)
additional_column_count = randint(min_additional_insert_cols,
len(remaining_columns))
columns_to_insert.extend(remaining_columns[:additional_column_count])
shuffle(columns_to_insert)
return columns_to_insert
else:
return None
def choose_insert_values_row_count(self):
"""
Choose the number of rows to insert in an INSERT/UPSERT VALUES
"""
return self._choose_from_bounds('INSERT_VALUES_ROWS')
def choose_values_item_expr(self):
"""
For a VALUES clause, Choose whether a particular item in a particular row will be a
constant or a function.
"""
return self._choose_from_weights('VALUES_ITEM_EXPR')
def choose_insert_vs_upsert(self):
"""
Choose whether a particular insertion-type statement will be INSERT or UPSERT.
"""
return self._choose_from_weights('INSERT_UPSERT')
class ImpalaNestedTypesProfile(DefaultProfile):
def __init__(self):
super(ImpalaNestedTypesProfile, self).__init__()
self._probabilities['OPTIONAL_QUERY_CLAUSES']['WITH'] = 0.3
self._probabilities['MISC']['INLINE_VIEW'] = 0.3
def use_lateral_join(self):
return random() < 0.5
def use_boolean_expr_for_lateral_join(self):
return random() < 0.2
def get_num_boolean_exprs_for_lateral_join(self):
if random() < 0.8:
return 0
result = 1
while random() < 0.6:
result += 1
return result
def get_table_count(self):
num = 1
while random() < (0.85 ** num):
num += 1
return num
# This profile was added for ad-hoc testing.
class TestFunctionProfile(DefaultProfile):
def choose_func_signature(self, signatures):
if not signatures:
raise Exception('At least one signature is required')
preferred_signatures = filter(lambda s: "DistinctFrom" in s.func._NAME, signatures)
if preferred_signatures:
signatures = preferred_signatures
return super(TestFunctionProfile, self).choose_func_signature(signatures)
class HiveProfile(DefaultProfile):
def __init__(self):
super(HiveProfile, self).__init__()
self._probabilities['MISC']['ONLY_USE_EQUALITY_JOIN_PREDICATES'] = 0
def use_having_without_groupby(self):
return False
def use_nested_with(self):
return False
def nulls_order_asc(self):
return 'BEFORE'
def allow_func_signature(self, signature):
if signature.func._NAME.startswith('DateAdd'):
return False
if signature.func._NAME in ('Greatest', 'Least'):
type = signature.return_type
argtypes = [arg.type for arg in signature.args]
for argtype in argtypes:
if type is None:
type = argtype
continue
else:
if type != argtype:
return False
return DefaultProfile.allow_func_signature(self, signature)
def get_allowed_join_signatures(self, signatures):
"""
Restricts the function signatures inside a JOIN clause to either be an Equals
operator, an And operator, or any operator that only takes in one argument. The reason
is that Hive only supports equi-joins, does not allow OR operators inside a JOIN, and
does not allow any other operator that operates over multiple columns.
The reason ONLY_USE_EQUALITY_JOIN_PREDICATES is not sufficient to guarantee this is
that Hive needs to restrict the functions used based on the argument size of a
function.
"""
return [signature for signature in signatures if
signature.func in (Equals, And) or len(signature.args) == 1]
def get_analytic_funcs_that_cannot_contain_aggs(self):
"""
Hive does not support aggregate functions inside AVG, COUNT, FIRSTVALUE, LAG,
LASTVALUE, LEAD, MAX, MIN, or SUM functions
"""
return (AnalyticAvg, AnalyticCount, AnalyticFirstValue, AnalyticLag,
AnalyticLastValue, AnalyticLead, AnalyticMax, AnalyticMin, AnalyticSum)
class DMLOnlyProfile(DefaultProfile):
"""
Profile that only executes DML statements
TODO: This will be useful for testing DML; eventually this should be folded into the
default profile. (IMPALA-4601)
"""
def __init__(self):
super(DMLOnlyProfile, self).__init__()
self._weights.update({
'STATEMENT': {
InsertStatement: 1}})
PROFILES = [var for var in locals().values()
if isinstance(var, type) and var.__name__.endswith('Profile')]
| apache-2.0 | 5,076,192,376,453,511,000 | 37.546144 | 90 | 0.638373 | false |
itdxer/neupy | tests/layers/test_activations.py | 1 | 13302 | import math
import six
import numpy as np
import tensorflow as tf
from neupy.utils import asfloat
from neupy.exceptions import (
LayerConnectionError,
WeightInitializationError,
)
from neupy import layers, algorithms, init
from base import BaseTestCase
from helpers import simple_classification
class ActivationLayerMainTestCase(BaseTestCase):
def test_linear_layer_withuot_bias(self):
input_layer = layers.Input(10)
output_layer = layers.Linear(2, weight=0.1, bias=None)
network = layers.join(input_layer, output_layer)
input_value = asfloat(np.ones((1, 10)))
actual_output = self.eval(network.output(input_value))
expected_output = np.ones((1, 2))
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_exception(self):
with self.assertRaises(TypeError):
layers.Linear(2, weight=None)
def test_repr_without_size(self):
layer = layers.Sigmoid()
self.assertEqual("Sigmoid(name='sigmoid-1')", str(layer))
def test_repr_with_size(self):
layer1 = layers.Sigmoid(13)
self.assertEqual(
str(layer1),
(
"Sigmoid(13, weight=HeNormal(gain=1.0), "
"bias=Constant(0), name='sigmoid-1')"
)
)
def test_variables(self):
network = layers.join(
layers.Input(2),
layers.Sigmoid(3, name='sigmoid'),
)
self.assertDictEqual(network.layer('sigmoid').variables, {})
network.outputs
variables = network.layer('sigmoid').variables
self.assertSequenceEqual(
sorted(variables.keys()),
['bias', 'weight'])
self.assertShapesEqual(variables['bias'].shape, (3,))
self.assertShapesEqual(variables['weight'].shape, (2, 3))
def test_failed_propagation_for_multiple_inputs(self):
inputs = layers.parallel(
layers.Input(1),
layers.Input(2),
)
if six.PY3:
expected_message = "2 positional arguments but 3 were given."
else:
expected_message = (
"get_output_shape\(\) takes exactly 2 arguments \(3 given\)"
)
with self.assertRaisesRegexp(TypeError, expected_message):
layers.join(inputs, layers.Relu(3, name='relu'))
def test_fail_rejoining_to_new_input(self):
network = layers.join(
layers.Input(10),
layers.Relu(5, name='relu'),
)
network.create_variables()
error_message = "Cannot connect layer `in` to layer `relu`"
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join(layers.Input(7, name='in'), network.layer('relu'))
def test_invalid_input_shape(self):
error_message = (
"Input shape expected to have 2 "
"dimensions, got 3 instead. Shape: \(\?, 10, 3\)"
)
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join(
layers.Input((10, 3)),
layers.Linear(10),
)
def test_unknwown_feature_during_weight_init(self):
network = layers.join(
layers.Input(None),
layers.Linear(10, name='linear'),
)
message = (
"Cannot create variables for the layer `linear`, "
"because number of input features is unknown. "
"Input shape: \(\?, \?\)"
)
with self.assertRaisesRegexp(WeightInitializationError, message):
network.create_variables()
with self.assertRaisesRegexp(WeightInitializationError, message):
network.outputs
def test_invalid_weight_shape(self):
network = layers.join(
layers.Input(5),
layers.Linear(4, weight=np.ones((3, 3))),
)
with self.assertRaisesRegexp(ValueError, "Cannot create variable"):
network.create_variables()
variable = tf.Variable(np.ones((3, 3)), dtype=tf.float32)
network = layers.join(
layers.Input(5),
layers.Linear(4, weight=variable),
)
with self.assertRaisesRegexp(ValueError, "Cannot create variable"):
network.create_variables()
class ActivationLayersTestCase(BaseTestCase):
def test_activation_layers_without_size(self):
X = np.array([1, 2, -1, 10])
expected_output = np.array([1, 2, 0, 10])
layer = layers.Relu()
actual_output = self.eval(layer.output(X))
np.testing.assert_array_equal(actual_output, expected_output)
def test_hard_sigmoid_layer(self):
layer = layers.HardSigmoid(6)
input_value = asfloat(np.array([[-3, -2, -1, 0, 1, 2]]))
expected = np.array([[0, 0.1, 0.3, 0.5, 0.7, 0.9]])
output = self.eval(layer.activation_function(input_value))
np.testing.assert_array_almost_equal(output, expected)
def test_linear_layer(self):
layer = layers.Linear(1)
self.assertEqual(layer.activation_function(1), 1)
def test_tanh_layer(self):
layer1 = layers.Tanh(1)
self.assertGreater(1, self.eval(layer1.activation_function(1.)))
def test_leaky_relu(self):
X = asfloat(np.array([[10, 1, 0.1, 0, -0.1, -1]]).T)
expected_output = asfloat(np.array([[10, 1, 0.1, 0, -0.001, -0.01]]).T)
layer = layers.LeakyRelu(1)
actual_output = self.eval(layer.activation_function(X))
np.testing.assert_array_almost_equal(
expected_output, actual_output)
def test_softplus_layer(self):
layer = layers.Softplus(1)
self.assertAlmostEqual(
math.log(2),
self.eval(layer.activation_function(0.)))
def test_elu_layer(self):
test_input = asfloat(np.array([[10, 1, 0.1, 0, -1]]).T)
expected_output = np.array([
[10, 1, 0.1, 0, -0.6321205588285577]]).T
layer = layers.Elu()
actual_output = self.eval(layer.activation_function(test_input))
np.testing.assert_array_almost_equal(
expected_output, actual_output)
class SigmoidTestCase(BaseTestCase):
def test_sigmoid_layer(self):
layer1 = layers.Sigmoid(1)
self.assertGreater(1, self.eval(layer1.activation_function(1.)))
def test_sigmoid_semantic_segmentation(self):
network = layers.join(
layers.Input((10, 10, 1)),
layers.Sigmoid(),
)
input = 10 * np.random.random((2, 10, 10, 1)) - 5
actual_output = self.eval(network.output(input))
self.assertTrue(np.all(actual_output >= 0))
self.assertTrue(np.all(actual_output <= 1))
class SoftmaxTestCase(BaseTestCase):
def test_softmax_layer(self):
test_input = asfloat(np.array([[0.5, 0.5, 0.1]]))
softmax_layer = layers.Softmax(3)
correct_result = np.array([[0.37448695, 0.37448695, 0.25102611]])
np.testing.assert_array_almost_equal(
correct_result,
self.eval(softmax_layer.activation_function(test_input)))
def test_softmax_semantic_segmentation(self):
network = layers.join(
layers.Input((10, 10, 6)),
layers.Softmax(),
)
input = np.random.random((2, 10, 10, 6))
actual_output = self.eval(network.output(input))
np.testing.assert_array_almost_equal(
actual_output.sum(axis=-1),
np.ones((2, 10, 10)))
class ReluTestCase(BaseTestCase):
def test_relu_activation(self):
layer = layers.Relu()
self.assertEqual(0, self.eval(layer.activation_function(-10)))
self.assertEqual(0, self.eval(layer.activation_function(0)))
self.assertEqual(10, self.eval(layer.activation_function(10)))
layer = layers.Relu(alpha=0.1)
self.assertAlmostEqual(-1, self.eval(layer.activation_function(-10)))
self.assertAlmostEqual(-0.2, self.eval(layer.activation_function(-2)))
def test_relu(self):
# Test alpha parameter
X = asfloat(np.array([[10, 1, 0.1, 0, -0.1, -1]]).T)
expected_output = asfloat(np.array([[10, 1, 0.1, 0, -0.01, -0.1]]).T)
layer = layers.Relu(1, alpha=0.1)
actual_output = self.eval(layer.activation_function(X))
np.testing.assert_array_almost_equal(
expected_output, actual_output)
def test_repr_without_size(self):
self.assertEqual("Relu(alpha=0, name='relu-1')", str(layers.Relu()))
def test_repr_with_size(self):
self.assertEqual(
str(layers.Relu(10)),
(
"Relu(10, alpha=0, weight=HeNormal(gain=2), "
"bias=Constant(0), name='relu-1')"
)
)
class PReluTestCase(BaseTestCase):
def test_invalid_alpha_axes_parameter(self):
network = layers.join(
layers.PRelu(10, alpha_axes=2),
layers.Relu(),
)
with self.assertRaises(LayerConnectionError):
# cannot specify 2-axis, because we only
# have 0 and 1 axes (2D input)
layers.join(layers.Input(10), network)
with self.assertRaises(ValueError):
# 0-axis is not allowed
layers.PRelu(10, alpha_axes=0)
def test_prelu_alpha_init_random_params(self):
prelu_layer = layers.PRelu(10, alpha=init.XavierNormal())
prelu_layer.create_variables((None, 5))
alpha = self.eval(prelu_layer.alpha)
self.assertEqual(10, np.unique(alpha).size)
def test_prelu_alpha_init_constant_value(self):
prelu_layer = layers.PRelu(10, alpha=0.25)
prelu_layer.create_variables((None, 5))
alpha = self.eval(prelu_layer.alpha)
self.assertEqual(alpha.shape, (10,))
np.testing.assert_array_almost_equal(alpha, np.ones(10) * 0.25)
def test_prelu_layer_param_conv(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 5)),
layers.PRelu(alpha=0.25, alpha_axes=(1, 3), name='prelu'),
)
network.create_variables()
alpha = self.eval(network.layer('prelu').alpha)
expected_alpha = np.ones((8, 5)) * 0.25
self.assertEqual(alpha.shape, (8, 5))
np.testing.assert_array_almost_equal(alpha, expected_alpha)
def test_prelu_output_by_dense_input(self):
prelu_layer = layers.PRelu(alpha=0.25)
prelu_layer.create_variables((None, 1))
X = np.array([[10, 1, 0.1, 0, -0.1, -1]]).T
expected_output = np.array([[10, 1, 0.1, 0, -0.025, -0.25]]).T
actual_output = self.eval(prelu_layer.activation_function(X))
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_prelu_output_by_spatial_input(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 5)),
layers.PRelu(alpha=0.25, alpha_axes=(1, 3)),
)
X = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(X))
self.assertEqual(actual_output.shape, (1, 8, 8, 5))
def test_prelu_param_updates(self):
x_train, _, y_train, _ = simple_classification()
prelu_layer1 = layers.PRelu(20, alpha=0.25)
prelu_layer2 = layers.PRelu(1, alpha=0.25)
gdnet = algorithms.GradientDescent(
[
layers.Input(10),
prelu_layer1,
prelu_layer2,
],
batch_size=None,
)
prelu1_alpha_before_training = self.eval(prelu_layer1.alpha)
prelu2_alpha_before_training = self.eval(prelu_layer2.alpha)
gdnet.train(x_train, y_train, epochs=10)
prelu1_alpha_after_training = self.eval(prelu_layer1.alpha)
prelu2_alpha_after_training = self.eval(prelu_layer2.alpha)
self.assertTrue(all(np.not_equal(
prelu1_alpha_before_training,
prelu1_alpha_after_training,
)))
self.assertTrue(all(np.not_equal(
prelu2_alpha_before_training,
prelu2_alpha_after_training,
)))
def test_repr_without_size(self):
self.assertEqual(
"PRelu(alpha_axes=(-1,), alpha=Constant(0.25), name='p-relu-1')",
str(layers.PRelu()))
def test_repr_with_size(self):
self.assertEqual(
str(layers.PRelu(10)),
(
"PRelu(10, alpha_axes=(-1,), alpha=Constant(0.25), "
"weight=HeNormal(gain=2), bias=Constant(0), "
"name='p-relu-1')"
)
)
def test_prelu_variables(self):
network = layers.join(
layers.Input(2),
layers.PRelu(3, name='prelu'),
)
self.assertDictEqual(network.layer('prelu').variables, {})
network.create_variables()
variables = network.layer('prelu').variables
self.assertSequenceEqual(
sorted(variables.keys()),
['alpha', 'bias', 'weight'])
self.assertShapesEqual(variables['bias'].shape, (3,))
self.assertShapesEqual(variables['weight'].shape, (2, 3))
self.assertShapesEqual(variables['alpha'].shape, (3,))
| mit | -8,575,744,215,459,305,000 | 33.195373 | 79 | 0.587957 | false |
karlch/vimiv | vimiv/thumbnail_manager.py | 1 | 10586 | # vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Provides classes to store and load thumbnails from a shared cache.
The ThumbnailStore transparently creates and loads thumbnails according to the
freedesktop.org thumbnail management standard.
The ThumbnailManager provides a asynchronous mechanism to load thumbnails from
the store.
If possible, you should avoid using the store directly but use the manager
instead.
"""
import collections
import hashlib
import os
import tempfile
from multiprocessing.pool import ThreadPool as Pool
from gi._error import GError
from gi.repository import GdkPixbuf, GLib, Gtk
from gi.repository.GdkPixbuf import Pixbuf
from vimiv.helpers import get_user_cache_dir
ThumbTuple = collections.namedtuple('ThumbTuple', ['original', 'thumbnail'])
class ThumbnailManager:
"""Provides an asynchronous mechanism to load thumbnails.
Attributes:
thumbnail_store: ThumbnailStore class with the loading mechanism.
large: The thumbnail managing standard specifies two thumbnail sizes.
256x256 (large) and 128x128 (normal)
default_icon: Default icon if thumbnails are not yet loaded.
error_icon: The path to the icon which is used, when thumbnail creation
fails.
"""
_cpu_count = os.cpu_count()
if _cpu_count is None:
_cpu_count = 1
elif _cpu_count > 1:
_cpu_count -= 1
_thread_pool = Pool(_cpu_count)
_cache = {}
def __init__(self, large=True):
"""Construct a new ThumbnailManager.
Args:
large: Size of thumbnails that are created. If true 256x256 else
128x128.
"""
super(ThumbnailManager, self).__init__()
self.thumbnail_store = ThumbnailStore(large=large)
# Default icon if thumbnail creation fails
icon_theme = Gtk.IconTheme.get_default()
self.error_icon = icon_theme.lookup_icon("dialog-error", 256,
0).get_filename()
self.default_icon = icon_theme.lookup_icon("image-x-generic", 256,
0).get_filename()
def _do_get_thumbnail_at_scale(self, source_file, size, callback, index,
ignore_cache=False):
if not ignore_cache and source_file in self._cache:
pixbuf = self._cache[source_file]
else:
thumbnail_path = self.thumbnail_store.get_thumbnail(source_file,
ignore_cache)
if thumbnail_path is None:
thumbnail_path = self.error_icon
pixbuf = Pixbuf.new_from_file(thumbnail_path)
self._cache[source_file] = pixbuf
if pixbuf.get_height() != size and pixbuf.get_width != size:
pixbuf = self.scale_pixbuf(pixbuf, size)
return callback, pixbuf, index
@staticmethod
def scale_pixbuf(pixbuf, size):
"""Scale the pixbuf to the given size keeping the aspect ratio.
Either the width or the height of the returned pixbuf is `size` large,
depending on the aspect ratio.
Args:
pixbuf: The pixbuf to scale
size: The size of the new width or height
Return:
The scaled pixbuf.
"""
width = size
height = size
ratio = pixbuf.get_width() / pixbuf.get_height()
if ratio > 1:
height /= ratio
else:
width *= ratio
pixbuf = pixbuf.scale_simple(width, height,
GdkPixbuf.InterpType.BILINEAR)
return pixbuf
@staticmethod
def _do_callback(result):
GLib.idle_add(*result)
def get_thumbnail_at_scale_async(self, filename, size, callback, index,
ignore_cache=False):
"""Create the thumbnail for 'filename' and return it via 'callback'.
Creates the thumbnail for the given filename at the given size and
then calls the given callback function with the resulting pixbuf.
Args:
filename: The filename to get the thumbnail for
size: The size the returned pixbuf is scaled to
callback: A callable of form callback(pixbuf, *args)
args: Any additional arguments that can be passed to callback
ignore_cache: If true, the builtin in-memory cache is bypassed and
the thumbnail file is loaded from disk
"""
self._thread_pool.apply_async(self._do_get_thumbnail_at_scale,
(filename, size, callback, index,
ignore_cache),
callback=self._do_callback)
class ThumbnailStore(object):
"""Implements freedesktop.org's Thumbnail Managing Standard."""
KEY_URI = "Thumb::URI"
KEY_MTIME = "Thumb::MTime"
KEY_SIZE = "Thumb::Size"
KEY_WIDTH = "Thumb::Image::Width"
KEY_HEIGHT = "Thumb::Image::Height"
def __init__(self, large=True):
"""Construct a new ThumbnailStore.
Args:
large: Size of thumbnails that are created. If true 256x256 else
128x128.
"""
super(ThumbnailStore, self).__init__()
import vimiv
self.base_dir = os.path.join(get_user_cache_dir(), "thumbnails")
self.fail_dir = os.path.join(
self.base_dir, "fail", "vimiv-" + vimiv.__version__)
self.thumbnail_dir = ""
self.thumb_size = 0
self.use_large_thumbnails(large)
self._ensure_dirs_exist()
def use_large_thumbnails(self, enabled=True):
"""Specify whether this thumbnail store uses large thumbnails.
Large thumbnails have 256x256 pixels and non-large thumbnails 128x128.
Args:
enabled: If true large thumbnails will be used.
"""
if enabled:
self.thumbnail_dir = os.path.join(self.base_dir, "large")
self.thumb_size = 256
else:
self.thumbnail_dir = os.path.join(self.base_dir, "normal")
self.thumb_size = 128
def get_thumbnail(self, filename, ignore_current=False):
"""Get the path of the thumbnail of the given filename.
If the requested thumbnail does not yet exist, it will first be created
before returning its path.
Args:
filename: The filename to get the thumbnail for.
ignore_current: If True, ignore saved thumbnails and force a
recreation. Needed as transforming images from within thumbnail
mode may happen faster than in 1s.
Return:
The path of the thumbnail file or None if thumbnail creation failed.
"""
# Don't create thumbnails for thumbnail cache
if filename.startswith(self.base_dir):
return filename
thumbnail_filename = self._get_thumbnail_filename(filename)
thumbnail_path = self._get_thumbnail_path(thumbnail_filename)
if os.access(thumbnail_path, os.R_OK) \
and self._is_current(filename, thumbnail_path) \
and not ignore_current:
return thumbnail_path
fail_path = self._get_fail_path(thumbnail_filename)
if os.path.exists(fail_path):
# We already tried to create a thumbnail for the given file but
# failed; don't try again.
return None
if self._create_thumbnail(filename, thumbnail_filename):
return thumbnail_path
return None
def _ensure_dirs_exist(self):
os.makedirs(self.thumbnail_dir, 0o700, exist_ok=True)
os.makedirs(self.fail_dir, 0o700, exist_ok=True)
def _is_current(self, source_file, thumbnail_path):
source_mtime = str(self._get_source_mtime(source_file))
thumbnail_mtime = self._get_thumbnail_mtime(thumbnail_path)
return source_mtime == thumbnail_mtime
def _get_thumbnail_filename(self, filename):
uri = self._get_source_uri(filename)
return hashlib.md5(bytes(uri, "UTF-8")).hexdigest() + ".png"
@staticmethod
def _get_source_uri(filename):
return "file://" + os.path.abspath(os.path.expanduser(filename))
def _get_thumbnail_path(self, thumbnail_filename):
return os.path.join(self.thumbnail_dir, thumbnail_filename)
def _get_fail_path(self, thumbnail_filename):
return os.path.join(self.fail_dir, thumbnail_filename)
@staticmethod
def _get_source_mtime(src):
return int(os.path.getmtime(src))
def _get_thumbnail_mtime(self, thumbnail_path):
pixbuf = Pixbuf.new_from_file(thumbnail_path)
mtime = pixbuf.get_options()["tEXt::" + self.KEY_MTIME]
return mtime
def _create_thumbnail(self, source_file, thumbnail_filename):
# Cannot access source; create neither thumbnail nor fail file
if not os.access(source_file, os.R_OK):
return False
try:
image = Pixbuf.new_from_file_at_scale(source_file, self.thumb_size,
self.thumb_size, True)
dest_path = self._get_thumbnail_path(thumbnail_filename)
success = True
except GError:
image = Pixbuf.new(GdkPixbuf.Colorspace.RGB, False, 8, 1, 1)
dest_path = self._get_fail_path(thumbnail_filename)
success = False
width = 0
height = 0
try:
_, width, height = GdkPixbuf.Pixbuf.get_file_info(source_file)
except IOError:
pass
options = {
"tEXt::" + self.KEY_URI: str(self._get_source_uri(source_file)),
"tEXt::" + self.KEY_MTIME: str(self._get_source_mtime(source_file)),
"tEXt::" + self.KEY_SIZE: str(os.path.getsize(source_file))
}
if width > 0 and height > 0:
options["tEXt::" + self.KEY_WIDTH] = str(width)
options["tEXt::" + self.KEY_HEIGHT] = str(height)
# First create temporary file and then move it. This avoids problems
# with concurrent access of the thumbnail cache, since "move" is an
# atomic operation
handle, tmp_filename = tempfile.mkstemp(dir=self.base_dir)
os.close(handle)
os.chmod(tmp_filename, 0o600)
image.savev(tmp_filename, "png", list(options.keys()),
list(options.values()))
os.replace(tmp_filename, dest_path)
return success
| mit | 974,612,254,628,089,200 | 35.885017 | 80 | 0.601833 | false |
OpenKMIP/PyKMIP | kmip/pie/objects.py | 1 | 68704 | # Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import abstractmethod
import sqlalchemy
from sqlalchemy import Column, event, ForeignKey, Integer, String, VARBINARY
from sqlalchemy import Boolean
from sqlalchemy.ext.associationproxy import association_proxy
import binascii
import six
from kmip.core import enums
from kmip.pie import sqltypes as sql
app_specific_info_map = sqlalchemy.Table(
"app_specific_info_map",
sql.Base.metadata,
sqlalchemy.Column(
"managed_object_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"managed_objects.uid",
ondelete="CASCADE"
)
),
sqlalchemy.Column(
"app_specific_info_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"app_specific_info.id",
ondelete="CASCADE"
)
)
)
object_group_map = sqlalchemy.Table(
"object_group_map",
sql.Base.metadata,
sqlalchemy.Column(
"managed_object_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"managed_objects.uid",
ondelete="CASCADE"
)
),
sqlalchemy.Column(
"object_group_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"object_groups.id",
ondelete="CASCADE"
)
)
)
class ManagedObject(sql.Base):
"""
The abstract base class of the simplified KMIP object hierarchy.
A ManagedObject is a core KMIP object that is the subject of key
management operations. It contains various attributes that are common to
all types of ManagedObjects, including keys, certificates, and various
types of secret or sensitive data.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
value: The value of the ManagedObject. Type varies, usually bytes.
unique_identifier: The string ID of the ManagedObject.
names: A list of names associated with the ManagedObject.
object_type: An enumeration associated with the type of ManagedObject.
"""
__tablename__ = 'managed_objects'
unique_identifier = Column('uid', Integer, primary_key=True)
_object_type = Column('object_type', sql.EnumType(enums.ObjectType))
_class_type = Column('class_type', String(50))
value = Column('value', VARBINARY(1024))
name_index = Column(Integer, default=0)
_names = sqlalchemy.orm.relationship(
"ManagedObjectName",
back_populates="mo",
cascade="all, delete-orphan",
order_by="ManagedObjectName.id"
)
names = association_proxy('_names', 'name')
operation_policy_name = Column(
'operation_policy_name',
String(50),
default='default'
)
sensitive = Column("sensitive", Boolean, default=False)
initial_date = Column(Integer, default=0)
_owner = Column('owner', String(50), default=None)
app_specific_info = sqlalchemy.orm.relationship(
"ApplicationSpecificInformation",
secondary=app_specific_info_map,
back_populates="managed_objects",
order_by="ApplicationSpecificInformation.id",
passive_deletes=True
)
object_groups = sqlalchemy.orm.relationship(
"ObjectGroup",
secondary=object_group_map,
back_populates="managed_objects",
order_by="ObjectGroup.id",
passive_deletes=True
)
__mapper_args__ = {
'polymorphic_identity': 'ManagedObject',
'polymorphic_on': _class_type
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self):
"""
Create a ManagedObject.
"""
self.value = None
self.unique_identifier = None
self.name_index = 0
self.names = list()
self.operation_policy_name = None
self.initial_date = 0
self.sensitive = False
self._object_type = None
self._owner = None
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._application_specific_informations = list()
self._contact_information = None
self._object_groups = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._archive_date = None
self._last_change_date = None
@property
def object_type(self):
"""
Accessor and property definition for the object type attribute.
Returns:
ObjectType: An ObjectType enumeration that corresponds to the
class of the object.
"""
return self._object_type
@object_type.setter
def object_type(self, value):
"""
Set blocker for the object type attribute.
Raises:
AttributeError: Always raised to block setting of attribute.
"""
raise AttributeError("object type cannot be set")
@abstractmethod
def validate(self):
"""
Verify that the contents of the ManagedObject are valid.
"""
pass
@abstractmethod
def __repr__(self):
pass
@abstractmethod
def __str__(self):
pass
@abstractmethod
def __eq__(self, other):
pass
@abstractmethod
def __ne__(self, other):
pass
class CryptographicObject(ManagedObject):
"""
The abstract base class of all ManagedObjects related to cryptography.
A CryptographicObject is a core KMIP object that is the subject of key
management operations. It contains various attributes that are common to
all types of CryptographicObjects, including keys and certificates.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_usage_masks: A list of usage mask enumerations
describing how the CryptographicObject will be used.
"""
__tablename__ = 'crypto_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('managed_objects.uid'),
primary_key=True)
cryptographic_usage_masks = Column('cryptographic_usage_mask',
sql.UsageMaskType)
state = Column('state', sql.EnumType(enums.State))
__mapper_args__ = {
'polymorphic_identity': 'CryptographicObject'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self):
"""
Create a CryptographicObject.
"""
super(CryptographicObject, self).__init__()
self.cryptographic_usage_masks = list()
self.state = enums.State.PRE_ACTIVE
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._digests = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._activation_date = None
self._compromise_date = None
self._compromise_occurrence_date = None
self._deactivation_date = None
self._destroy_date = None
self._fresh = None
self._lease_time = None
self._links = list()
self._revocation_reason = None
class Key(CryptographicObject):
"""
The abstract base class of all ManagedObjects that are cryptographic keys.
A Key is a core KMIP object that is the subject of key management
operations. It contains various attributes that are common to all types of
Keys, including symmetric and asymmetric keys.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_algorithm: A CryptographicAlgorithm enumeration defining
the algorithm the key should be used with.
cryptographic_length: An int defining the length of the key in bits.
key_format_type: A KeyFormatType enumeration defining the format of
the key value.
key_wrapping_data: A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'keys'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
cryptographic_algorithm = Column(
'cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm))
cryptographic_length = Column('cryptographic_length', Integer)
key_format_type = Column(
'key_format_type', sql.EnumType(enums.KeyFormatType))
# Key wrapping data fields
_kdw_wrapping_method = Column(
'_kdw_wrapping_method',
sql.EnumType(enums.WrappingMethod),
default=None
)
_kdw_eki_unique_identifier = Column(
'_kdw_eki_unique_identifier',
String,
default=None
)
_kdw_eki_cp_block_cipher_mode = Column(
'_kdw_eki_cp_block_cipher_mode',
sql.EnumType(enums.BlockCipherMode),
default=None
)
_kdw_eki_cp_padding_method = Column(
'_kdw_eki_cp_padding_method',
sql.EnumType(enums.PaddingMethod),
default=None
)
_kdw_eki_cp_hashing_algorithm = Column(
'_kdw_eki_cp_hashing_algorithm',
sql.EnumType(enums.HashingAlgorithm),
default=None
)
_kdw_eki_cp_key_role_type = Column(
'_kdw_eki_cp_key_role_type',
sql.EnumType(enums.KeyRoleType),
default=None
)
_kdw_eki_cp_digital_signature_algorithm = Column(
'_kdw_eki_cp_digital_signature_algorithm',
sql.EnumType(enums.DigitalSignatureAlgorithm),
default=None
)
_kdw_eki_cp_cryptographic_algorithm = Column(
'_kdw_eki_cp_cryptographic_algorithm',
sql.EnumType(enums.CryptographicAlgorithm),
default=None
)
_kdw_eki_cp_random_iv = Column(
'_kdw_eki_cp_random_iv',
Boolean,
default=None
)
_kdw_eki_cp_iv_length = Column(
'_kdw_eki_cp_iv_length',
Integer,
default=None
)
_kdw_eki_cp_tag_length = Column(
'_kdw_eki_cp_tag_length',
Integer,
default=None
)
_kdw_eki_cp_fixed_field_length = Column(
'_kdw_eki_cp_fixed_field_length',
Integer,
default=None
)
_kdw_eki_cp_invocation_field_length = Column(
'_kdw_eki_cp_invocation_field_length',
Integer
)
_kdw_eki_cp_counter_length = Column(
'_kdw_eki_cp_counter_length',
Integer,
default=None
)
_kdw_eki_cp_initial_counter_value = Column(
'_kdw_eki_cp_initial_counter_value',
Integer,
default=None
)
_kdw_mski_unique_identifier = Column(
'_kdw_mski_unique_identifier',
String,
default=None
)
_kdw_mski_cp_block_cipher_mode = Column(
'_kdw_mski_cp_block_cipher_mode',
sql.EnumType(enums.BlockCipherMode),
default=None
)
_kdw_mski_cp_padding_method = Column(
'_kdw_mski_cp_padding_method',
sql.EnumType(enums.PaddingMethod),
default=None
)
_kdw_mski_cp_hashing_algorithm = Column(
'_kdw_mski_cp_hashing_algorithm',
sql.EnumType(enums.HashingAlgorithm),
default=None
)
_kdw_mski_cp_key_role_type = Column(
'_kdw_mski_cp_key_role_type',
sql.EnumType(enums.KeyRoleType),
default=None
)
_kdw_mski_cp_digital_signature_algorithm = Column(
'_kdw_mski_cp_digital_signature_algorithm',
sql.EnumType(enums.DigitalSignatureAlgorithm),
default=None
)
_kdw_mski_cp_cryptographic_algorithm = Column(
'_kdw_mski_cp_cryptographic_algorithm',
sql.EnumType(enums.CryptographicAlgorithm),
default=None
)
_kdw_mski_cp_random_iv = Column(
'_kdw_mski_cp_random_iv',
Boolean,
default=None
)
_kdw_mski_cp_iv_length = Column(
'_kdw_mski_cp_iv_length',
Integer,
default=None
)
_kdw_mski_cp_tag_length = Column(
'_kdw_mski_cp_tag_length',
Integer,
default=None
)
_kdw_mski_cp_fixed_field_length = Column(
'_kdw_mski_cp_fixed_field_length',
Integer,
default=None
)
_kdw_mski_cp_invocation_field_length = Column(
'_kdw_mski_cp_invocation_field_length',
Integer,
default=None
)
_kdw_mski_cp_counter_length = Column(
'_kdw_mski_cp_counter_length',
Integer,
default=None
)
_kdw_mski_cp_initial_counter_value = Column(
'_kdw_mski_cp_initial_counter_value',
Integer,
default=None
)
_kdw_mac_signature = Column(
'_kdw_mac_signature',
VARBINARY(1024),
default=None
)
_kdw_iv_counter_nonce = Column(
'_kdw_iv_counter_nonce',
VARBINARY(1024),
default=None
)
_kdw_encoding_option = Column(
'_kdw_encoding_option',
sql.EnumType(enums.EncodingOption),
default=None
)
__mapper_args__ = {
'polymorphic_identity': 'Key'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self, key_wrapping_data=None):
"""
Create a Key object.
Args:
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(Key, self).__init__()
self.cryptographic_algorithm = None
self.cryptographic_length = None
self.key_format_type = None
self.key_wrapping_data = key_wrapping_data
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._cryptographic_parameters = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._usage_limits = None
@property
def key_wrapping_data(self):
"""
Retrieve all of the relevant key wrapping data fields and return them
as a dictionary.
"""
key_wrapping_data = {}
encryption_key_info = {
'unique_identifier': self._kdw_eki_unique_identifier,
'cryptographic_parameters': {
'block_cipher_mode': self._kdw_eki_cp_block_cipher_mode,
'padding_method': self._kdw_eki_cp_padding_method,
'hashing_algorithm': self._kdw_eki_cp_hashing_algorithm,
'key_role_type': self._kdw_eki_cp_key_role_type,
'digital_signature_algorithm':
self._kdw_eki_cp_digital_signature_algorithm,
'cryptographic_algorithm':
self._kdw_eki_cp_cryptographic_algorithm,
'random_iv': self._kdw_eki_cp_random_iv,
'iv_length': self._kdw_eki_cp_iv_length,
'tag_length': self._kdw_eki_cp_tag_length,
'fixed_field_length': self._kdw_eki_cp_fixed_field_length,
'invocation_field_length':
self._kdw_eki_cp_invocation_field_length,
'counter_length': self._kdw_eki_cp_counter_length,
'initial_counter_value':
self._kdw_eki_cp_initial_counter_value
}
}
if not any(encryption_key_info['cryptographic_parameters'].values()):
encryption_key_info['cryptographic_parameters'] = {}
if not any(encryption_key_info.values()):
encryption_key_info = {}
mac_sign_key_info = {
'unique_identifier': self._kdw_mski_unique_identifier,
'cryptographic_parameters': {
'block_cipher_mode': self._kdw_mski_cp_block_cipher_mode,
'padding_method': self._kdw_mski_cp_padding_method,
'hashing_algorithm': self._kdw_mski_cp_hashing_algorithm,
'key_role_type': self._kdw_mski_cp_key_role_type,
'digital_signature_algorithm':
self._kdw_mski_cp_digital_signature_algorithm,
'cryptographic_algorithm':
self._kdw_mski_cp_cryptographic_algorithm,
'random_iv': self._kdw_mski_cp_random_iv,
'iv_length': self._kdw_mski_cp_iv_length,
'tag_length': self._kdw_mski_cp_tag_length,
'fixed_field_length': self._kdw_mski_cp_fixed_field_length,
'invocation_field_length':
self._kdw_mski_cp_invocation_field_length,
'counter_length': self._kdw_mski_cp_counter_length,
'initial_counter_value':
self._kdw_mski_cp_initial_counter_value
}
}
if not any(mac_sign_key_info['cryptographic_parameters'].values()):
mac_sign_key_info['cryptographic_parameters'] = {}
if not any(mac_sign_key_info.values()):
mac_sign_key_info = {}
key_wrapping_data['wrapping_method'] = self._kdw_wrapping_method
key_wrapping_data['encryption_key_information'] = encryption_key_info
key_wrapping_data['mac_signature_key_information'] = mac_sign_key_info
key_wrapping_data['mac_signature'] = self._kdw_mac_signature
key_wrapping_data['iv_counter_nonce'] = self._kdw_iv_counter_nonce
key_wrapping_data['encoding_option'] = self._kdw_encoding_option
if not any(key_wrapping_data.values()):
key_wrapping_data = {}
return key_wrapping_data
@key_wrapping_data.setter
def key_wrapping_data(self, value):
"""
Set the key wrapping data attributes using a dictionary.
"""
if value is None:
value = {}
elif not isinstance(value, dict):
raise TypeError("Key wrapping data must be a dictionary.")
self._kdw_wrapping_method = value.get('wrapping_method')
eki = value.get('encryption_key_information')
if eki is None:
eki = {}
self._kdw_eki_unique_identifier = eki.get('unique_identifier')
eki_cp = eki.get('cryptographic_parameters')
if eki_cp is None:
eki_cp = {}
self._kdw_eki_cp_block_cipher_mode = eki_cp.get('block_cipher_mode')
self._kdw_eki_cp_padding_method = eki_cp.get('padding_method')
self._kdw_eki_cp_hashing_algorithm = eki_cp.get('hashing_algorithm')
self._kdw_eki_cp_key_role_type = eki_cp.get('key_role_type')
self._kdw_eki_cp_digital_signature_algorithm = \
eki_cp.get('digital_signature_algorithm')
self._kdw_eki_cp_cryptographic_algorithm = \
eki_cp.get('cryptographic_algorithm')
self._kdw_eki_cp_random_iv = eki_cp.get('random_iv')
self._kdw_eki_cp_iv_length = eki_cp.get('iv_length')
self._kdw_eki_cp_tag_length = eki_cp.get('tag_length')
self._kdw_eki_cp_fixed_field_length = eki_cp.get('fixed_field_length')
self._kdw_eki_cp_invocation_field_length = \
eki_cp.get('invocation_field_length')
self._kdw_eki_cp_counter_length = eki_cp.get('counter_length')
self._kdw_eki_cp_initial_counter_value = \
eki_cp.get('initial_counter_value')
mski = value.get('mac_signature_key_information')
if mski is None:
mski = {}
self._kdw_mski_unique_identifier = mski.get('unique_identifier')
mski_cp = mski.get('cryptographic_parameters')
if mski_cp is None:
mski_cp = {}
self._kdw_mski_cp_block_cipher_mode = mski_cp.get('block_cipher_mode')
self._kdw_mski_cp_padding_method = mski_cp.get('padding_method')
self._kdw_mski_cp_hashing_algorithm = mski_cp.get('hashing_algorithm')
self._kdw_mski_cp_key_role_type = mski_cp.get('key_role_type')
self._kdw_mski_cp_digital_signature_algorithm = \
mski_cp.get('digital_signature_algorithm')
self._kdw_mski_cp_cryptographic_algorithm = \
mski_cp.get('cryptographic_algorithm')
self._kdw_mski_cp_random_iv = mski_cp.get('random_iv')
self._kdw_mski_cp_iv_length = mski_cp.get('iv_length')
self._kdw_mski_cp_tag_length = mski_cp.get('tag_length')
self._kdw_mski_cp_fixed_field_length = \
mski_cp.get('fixed_field_length')
self._kdw_mski_cp_invocation_field_length = \
mski_cp.get('invocation_field_length')
self._kdw_mski_cp_counter_length = mski_cp.get('counter_length')
self._kdw_mski_cp_initial_counter_value = \
mski_cp.get('initial_counter_value')
self._kdw_mac_signature = value.get('mac_signature')
self._kdw_iv_counter_nonce = value.get('iv_counter_nonce')
self._kdw_encoding_option = value.get('encoding_option')
class SymmetricKey(Key):
"""
The SymmetricKey class of the simplified KMIP object hierarchy.
A SymmetricKey is a core KMIP object that is the subject of key
management operations. For more information, see Section 2.2 of the KMIP
1.1 specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the SymmetricKey.
cryptographic_length: The length in bits of the SymmetricKey value.
value: The bytes of the SymmetricKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for
SymmetricKey application.
names: The string names of the SymmetricKey.
key_wrapping_data: A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'symmetric_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'SymmetricKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value, masks=None,
name='Symmetric Key', key_wrapping_data=None):
"""
Create a SymmetricKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
masks(list): A list of CryptographicUsageMask enumerations defining
how the key will be used. Optional, defaults to None.
name(string): The string name of the key. Optional, defaults to
'Symmetric Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(SymmetricKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.SYMMETRIC_KEY
self.key_format_type = enums.KeyFormatType.RAW
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.names = [name]
if masks:
self.cryptographic_usage_masks.extend(masks)
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._process_start_date = None
self._protect_stop_date = None
self.validate()
def validate(self):
"""
Verify that the contents of the SymmetricKey object are valid.
Raises:
TypeError: if the types of any SymmetricKey attributes are invalid
ValueError: if the key length and key value length do not match
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
if not self.key_wrapping_data:
if (len(self.value) * 8) != self.cryptographic_length:
msg = "key length ({0}) not equal to key value length ({1})"
msg = msg.format(
self.cryptographic_length,
len(self.value) * 8
)
raise ValueError(msg)
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "SymmetricKey({0}, {1}, {2}, {3})".format(
algorithm,
length,
value,
key_wrapping_data
)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SymmetricKey):
if self.value != other.value:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SymmetricKey):
return not (self == other)
else:
return NotImplemented
event.listen(SymmetricKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class PublicKey(Key):
"""
The PublicKey class of the simplified KMIP object hierarchy.
A PublicKey is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the PublicKey.
cryptographic_length: The length in bits of the PublicKey.
value: The bytes of the PublicKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for PublicKey
application.
names: The list of string names of the PublicKey.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'public_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'PublicKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value,
format_type=enums.KeyFormatType.X_509, masks=None,
name='Public Key', key_wrapping_data=None):
"""
Create a PublicKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
format_type(KeyFormatType): An enumeration defining the format of
the key value. Optional, defaults to enums.KeyFormatType.X_509.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used. Optional, defaults to None.
name(string): The string name of the key. Optional, defaults to
'Public Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(PublicKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.PUBLIC_KEY
self._valid_formats = [
enums.KeyFormatType.RAW,
enums.KeyFormatType.X_509,
enums.KeyFormatType.PKCS_1]
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.key_format_type = format_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_domain_parameters = list()
self.validate()
def validate(self):
"""
Verify that the contents of the PublicKey object are valid.
Raises:
TypeError: if the types of any PublicKey attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
elif not isinstance(self.key_format_type, enums.KeyFormatType):
raise TypeError("key format type must be a KeyFormatType "
"enumeration")
elif self.key_format_type not in self._valid_formats:
raise ValueError("key format type must be one of {0}".format(
self._valid_formats))
# TODO (peter-hamilton) Verify that the key bytes match the key format
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
format_type = "format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "PublicKey({0}, {1}, {2}, {3}, {4})".format(
algorithm, length, value, format_type, key_wrapping_data)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, PublicKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, PublicKey):
return not (self == other)
else:
return NotImplemented
event.listen(PublicKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class PrivateKey(Key):
"""
The PrivateKey class of the simplified KMIP object hierarchy.
A PrivateKey is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the PrivateKey.
cryptographic_length: The length in bits of the PrivateKey.
value: The bytes of the PrivateKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for PrivateKey
application. Optional, defaults to None.
names: The list of string names of the PrivateKey. Optional, defaults
to 'Private Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'private_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'PrivateKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value, format_type, masks=None,
name='Private Key', key_wrapping_data=None):
"""
Create a PrivateKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
format_type(KeyFormatType): An enumeration defining the format of
the key value.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used.
name(string): The string name of the key.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(PrivateKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.PRIVATE_KEY
self._valid_formats = [
enums.KeyFormatType.RAW,
enums.KeyFormatType.PKCS_1,
enums.KeyFormatType.PKCS_8]
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.key_format_type = format_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_domain_parameters = list()
self.validate()
def validate(self):
"""
Verify that the contents of the PrivateKey object are valid.
Raises:
TypeError: if the types of any PrivateKey attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
elif not isinstance(self.key_format_type, enums.KeyFormatType):
raise TypeError("key format type must be a KeyFormatType "
"enumeration")
elif self.key_format_type not in self._valid_formats:
raise ValueError("key format type must be one of {0}".format(
self._valid_formats))
# TODO (peter-hamilton) Verify that the key bytes match the key format
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
format_type = "format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "PrivateKey({0}, {1}, {2}, {3}, {4})".format(
algorithm, length, value, format_type, key_wrapping_data)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, PrivateKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, PrivateKey):
return not (self == other)
else:
return NotImplemented
event.listen(PrivateKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class SplitKey(Key):
"""
"""
__mapper_args__ = {"polymorphic_identity": "SplitKey"}
__table_args__ = {"sqlite_autoincrement": True}
__tablename__ = "split_keys"
unique_identifier = sqlalchemy.Column(
"uid",
sqlalchemy.Integer,
sqlalchemy.ForeignKey("keys.uid"),
primary_key=True
)
# Split Key object fields
_split_key_parts = sqlalchemy.Column(
"_split_key_parts",
sqlalchemy.Integer,
default=None
)
_key_part_identifier = sqlalchemy.Column(
"_key_part_identifier",
sqlalchemy.Integer,
default=None
)
_split_key_threshold = sqlalchemy.Column(
"_split_key_threshold",
sqlalchemy.Integer,
default=None
)
_split_key_method = sqlalchemy.Column(
"_split_key_method",
sql.EnumType(enums.SplitKeyMethod),
default=None
)
_prime_field_size = sqlalchemy.Column(
"_prime_field_size",
sqlalchemy.BigInteger,
default=None
)
def __init__(self,
cryptographic_algorithm=None,
cryptographic_length=None,
key_value=None,
cryptographic_usage_masks=None,
name="Split Key",
key_format_type=enums.KeyFormatType.RAW,
key_wrapping_data=None,
split_key_parts=None,
key_part_identifier=None,
split_key_threshold=None,
split_key_method=None,
prime_field_size=None):
"""
Create a SplitKey.
Args:
cryptographic_algorithm(enum): A CryptographicAlgorithm enumeration
identifying the type of algorithm for the split key. Required.
cryptographic_length(int): The length in bits of the split key.
Required.
key_value(bytes): The bytes representing the split key. Required.
cryptographic_usage_masks(list): A list of CryptographicUsageMask
enumerations defining how the split key will be used. Optional,
defaults to None.
name(string): The string name of the split key. Optional, defaults
to "Split Key".
key_format_type (enum): A KeyFormatType enumeration specifying the
format of the split key. Optional, defaults to Raw.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the split key has been wrapped.
Optional, defaults to None.
split_key_parts (int): An integer specifying the total number of
parts of the split key. Required.
key_part_identifier (int): An integer specifying which key part
of the split key this key object represents. Required.
split_key_threshold (int): An integer specifying the minimum
number of key parts required to reconstruct the split key.
Required.
split_key_method (enum): A SplitKeyMethod enumeration specifying
how the key was split. Required.
prime_field_size (int): A big integer specifying the prime field
size used for the Polynomial Sharing Prime Field split key
method. Optional, defaults to None.
"""
super(SplitKey, self).__init__(key_wrapping_data=key_wrapping_data)
self._object_type = enums.ObjectType.SPLIT_KEY
self.key_format_type = key_format_type
self.value = key_value
self.cryptographic_algorithm = cryptographic_algorithm
self.cryptographic_length = cryptographic_length
self.names = [name]
if cryptographic_usage_masks:
self.cryptographic_usage_masks.extend(cryptographic_usage_masks)
self.split_key_parts = split_key_parts
self.key_part_identifier = key_part_identifier
self.split_key_threshold = split_key_threshold
self.split_key_method = split_key_method
self.prime_field_size = prime_field_size
@property
def split_key_parts(self):
return self._split_key_parts
@split_key_parts.setter
def split_key_parts(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._split_key_parts = value
else:
raise TypeError("The split key parts must be an integer.")
@property
def key_part_identifier(self):
return self._key_part_identifier
@key_part_identifier.setter
def key_part_identifier(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._key_part_identifier = value
else:
raise TypeError("The key part identifier must be an integer.")
@property
def split_key_threshold(self):
return self._split_key_threshold
@split_key_threshold.setter
def split_key_threshold(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._split_key_threshold = value
else:
raise TypeError("The split key threshold must be an integer.")
@property
def split_key_method(self):
return self._split_key_method
@split_key_method.setter
def split_key_method(self, value):
if (value is None) or (isinstance(value, enums.SplitKeyMethod)):
self._split_key_method = value
else:
raise TypeError(
"The split key method must be a SplitKeyMethod enumeration."
)
@property
def prime_field_size(self):
return self._prime_field_size
@prime_field_size.setter
def prime_field_size(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._prime_field_size = value
else:
raise TypeError("The prime field size must be an integer.")
def __repr__(self):
cryptographic_algorithm = "cryptographic_algorithm={0}".format(
self.cryptographic_algorithm
)
cryptographic_length = "cryptographic_length={0}".format(
self.cryptographic_length
)
key_value = "key_value={0}".format(binascii.hexlify(self.value))
key_format_type = "key_format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
cryptographic_usage_masks = "cryptographic_usage_masks={0}".format(
self.cryptographic_usage_masks
)
names = "name={0}".format(self.names)
split_key_parts = "split_key_parts={0}".format(self.split_key_parts)
key_part_identifier = "key_part_identifier={0}".format(
self.key_part_identifier
)
split_key_threshold = "split_key_threshold={0}".format(
self.split_key_threshold
)
split_key_method = "split_key_method={0}".format(self.split_key_method)
prime_field_size = "prime_field_size={0}".format(self.prime_field_size)
return "SplitKey({0})".format(
", ".join(
[
cryptographic_algorithm,
cryptographic_length,
key_value,
key_format_type,
key_wrapping_data,
cryptographic_usage_masks,
names,
split_key_parts,
key_part_identifier,
split_key_threshold,
split_key_method,
prime_field_size
]
)
)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SplitKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
elif self.cryptographic_usage_masks != \
other.cryptographic_usage_masks:
return False
elif self.names != other.names:
return False
elif self.split_key_parts != other.split_key_parts:
return False
elif self.key_part_identifier != other.key_part_identifier:
return False
elif self.split_key_threshold != other.split_key_threshold:
return False
elif self.split_key_method != other.split_key_method:
return False
elif self.prime_field_size != other.prime_field_size:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SplitKey):
return not (self == other)
else:
return NotImplemented
event.listen(
SplitKey._names,
"append",
sql.attribute_append_factory("name_index"),
retval=False
)
class Certificate(CryptographicObject):
"""
The Certificate class of the simplified KMIP object hierarchy.
A Certificate is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
certificate_type: The type of the Certificate.
value: The bytes of the Certificate.
cryptographic_usage_masks: The list of usage mask flags for
Certificate application.
names: The list of string names of the Certificate.
"""
__tablename__ = 'certificates'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
certificate_type = Column(
'certificate_type', sql.EnumType(enums.CertificateType))
__mapper_args__ = {
'polymorphic_identity': 'Certificate'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self, certificate_type, value, masks=None,
name='Certificate'):
"""
Create a Certificate.
Args:
certificate_type(CertificateType): An enumeration defining the
type of the certificate.
value(bytes): The bytes representing the certificate.
masks(list): A list of CryptographicUsageMask enumerations
defining how the certificate will be used.
name(string): The string name of the certificate.
"""
super(Certificate, self).__init__()
self._object_type = enums.ObjectType.CERTIFICATE
self.value = value
self.certificate_type = certificate_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._cryptographic_algorithm = None
self._cryptographic_length = None
self._certificate_length = None
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_parameters = list()
self._digital_signature_algorithm = list()
self.validate()
def validate(self):
"""
Verify that the contents of the Certificate object are valid.
Raises:
TypeError: if the types of any Certificate attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("certificate value must be bytes")
elif not isinstance(self.certificate_type,
enums.CertificateType):
raise TypeError("certificate type must be a CertificateType "
"enumeration")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"certificate mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("certificate name {0} must be a string".format(
position))
def __str__(self):
return str(binascii.hexlify(self.value))
class X509Certificate(Certificate):
"""
The X509Certificate class of the simplified KMIP object hierarchy.
An X509Certificate is a core KMIP object that is the subject of key
management operations. For more information, see Section 2.2 of the KMIP
1.1 specification.
Attributes:
value: The bytes of the Certificate.
cryptographic_usage_masks: The list of usage mask flags for
Certificate application.
names: The list of string names of the Certificate.
"""
__tablename__ = 'x509_certificates'
unique_identifier = Column('uid', Integer,
ForeignKey('certificates.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'X509Certificate'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, masks=None, name='X.509 Certificate'):
"""
Create an X509Certificate.
Args:
value(bytes): The bytes representing the certificate.
masks(list): A list of CryptographicUsageMask enumerations
defining how the certificate will be used.
name(string): The string name of the certificate.
"""
super(X509Certificate, self).__init__(
enums.CertificateType.X_509, value, masks, name)
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._x509_certificate_identifier = None
self._x509_certificate_subject = None
self._x509_certificate_issuer = None
self.validate()
def __repr__(self):
certificate_type = "certificate_type={0}".format(self.certificate_type)
value = "value={0}".format(binascii.hexlify(self.value))
return "X509Certificate({0}, {1})".format(certificate_type, value)
def __eq__(self, other):
if isinstance(other, X509Certificate):
if self.value != other.value:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, X509Certificate):
return not (self == other)
else:
return NotImplemented
event.listen(X509Certificate._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class SecretData(CryptographicObject):
"""
The SecretData class of the simplified KMIP object hierarchy.
SecretData is one of several CryptographicObjects and is one of the core
KMIP objects that are the subject of key management operations. For more
information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_usage_masks: A list of usage mask enumerations
describing how the CryptographicObject will be used.
data_type: The type of the secret value.
"""
__tablename__ = 'secret_data_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
data_type = Column('data_type', sql.EnumType(enums.SecretDataType))
__mapper_args__ = {
'polymorphic_identity': 'SecretData'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, data_type, masks=None, name='Secret Data'):
"""
Create a SecretData object.
Args:
value(bytes): The bytes representing secret data.
data_type(SecretDataType): An enumeration defining the type of the
secret value.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used.
name(string): The string name of the key.
"""
super(SecretData, self).__init__()
self._object_type = enums.ObjectType.SECRET_DATA
self.value = value
self.data_type = data_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self.validate()
def validate(self):
"""
Verify that the contents of the SecretData object are valid.
Raises:
TypeError: if the types of any SecretData attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("secret value must be bytes")
elif not isinstance(self.data_type, enums.SecretDataType):
raise TypeError("secret data type must be a SecretDataType "
"enumeration")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"secret data mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("secret data name {0} must be a string".format(
position))
def __repr__(self):
value = "value={0}".format(binascii.hexlify(self.value))
data_type = "data_type={0}".format(self.data_type)
return "SecretData({0}, {1})".format(value, data_type)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SecretData):
if self.value != other.value:
return False
elif self.data_type != other.data_type:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SecretData):
return not (self == other)
else:
return NotImplemented
event.listen(SecretData._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class OpaqueObject(ManagedObject):
"""
The OpaqueObject class of the simplified KMIP object hierarchy.
OpaqueObject is one of several ManagedObjects and is one of the core KMIP
objects that are the subject of key management operations. For more
information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
opaque_type: The type of the opaque value.
"""
__tablename__ = 'opaque_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('managed_objects.uid'),
primary_key=True)
opaque_type = Column('opaque_type', sql.EnumType(enums.OpaqueDataType))
__mapper_args__ = {
'polymorphic_identity': 'OpaqueData'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, opaque_type, name='Opaque Object'):
"""
Create a OpaqueObject.
Args:
value(bytes): The bytes representing opaque data.
opaque_type(OpaqueDataType): An enumeration defining the type of
the opaque value.
name(string): The string name of the opaque object.
"""
super(OpaqueObject, self).__init__()
self._object_type = enums.ObjectType.OPAQUE_DATA
self.value = value
self.opaque_type = opaque_type
self.names.append(name)
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._digest = None
self._revocation_reason = None
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._destroy_date = None
self._compromise_occurrence_date = None
self._compromise_date = None
self.validate()
def validate(self):
"""
Verify that the contents of the OpaqueObject are valid.
Raises:
TypeError: if the types of any OpaqueObject attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("opaque value must be bytes")
elif not isinstance(self.opaque_type, enums.OpaqueDataType):
raise TypeError("opaque data type must be an OpaqueDataType "
"enumeration")
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("opaque data name {0} must be a string".format(
position))
def __repr__(self):
value = "value={0}".format(binascii.hexlify(self.value))
opaque_type = "opaque_type={0}".format(self.opaque_type)
return "OpaqueObject({0}, {1})".format(value, opaque_type)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, OpaqueObject):
if self.value != other.value:
return False
elif self.opaque_type != other.opaque_type:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, OpaqueObject):
return not (self == other)
else:
return NotImplemented
event.listen(OpaqueObject._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class ApplicationSpecificInformation(sql.Base):
__tablename__ = "app_specific_info"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_application_namespace = sqlalchemy.Column(
"application_namespace",
sqlalchemy.String
)
_application_data = sqlalchemy.Column(
"application_data",
sqlalchemy.String
)
managed_objects = sqlalchemy.orm.relationship(
"ManagedObject",
secondary=app_specific_info_map,
back_populates="app_specific_info"
)
def __init__(self,
application_namespace=None,
application_data=None):
"""
Create an ApplicationSpecificInformation attribute.
Args:
application_namespace (str): A string specifying the application
namespace. Required.
application_data (str): A string specifying the application data.
Required.
"""
super(ApplicationSpecificInformation, self).__init__()
self.application_namespace = application_namespace
self.application_data = application_data
@property
def application_namespace(self):
return self._application_namespace
@application_namespace.setter
def application_namespace(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._application_namespace = value
else:
raise TypeError("The application namespace must be a string.")
@property
def application_data(self):
return self._application_data
@application_data.setter
def application_data(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._application_data = value
else:
raise TypeError("The application data must be a string.")
def __repr__(self):
application_namespace = "application_namespace='{}'".format(
self.application_namespace
)
application_data = "application_data='{}'".format(
self.application_data
)
return "ApplicationSpecificInformation({})".format(
", ".join(
[
application_namespace,
application_data
]
)
)
def __str__(self):
return str(
{
"application_namespace": self.application_namespace,
"application_data": self.application_data
}
)
def __eq__(self, other):
if isinstance(other, ApplicationSpecificInformation):
if self.application_namespace != other.application_namespace:
return False
elif self.application_data != other.application_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ApplicationSpecificInformation):
return not (self == other)
else:
return NotImplemented
class ObjectGroup(sql.Base):
__tablename__ = "object_groups"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_object_group = sqlalchemy.Column(
"object_group",
sqlalchemy.String,
nullable=False
)
managed_objects = sqlalchemy.orm.relationship(
"ManagedObject",
secondary=object_group_map,
back_populates="object_groups"
)
def __init__(self, object_group=None):
"""
Create an ObjectGroup attribute.
Args:
object_group (str): A string specifying the object group. Required.
"""
super(ObjectGroup, self).__init__()
self.object_group = object_group
@property
def object_group(self):
return self._object_group
@object_group.setter
def object_group(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._object_group = value
else:
raise TypeError("The object group must be a string.")
def __repr__(self):
object_group = "object_group='{}'".format(self.object_group)
return "ObjectGroup({})".format(object_group)
def __str__(self):
return str({"object_group": self.object_group})
def __eq__(self, other):
if isinstance(other, ObjectGroup):
if self.object_group != other.object_group:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ObjectGroup):
return not (self == other)
else:
return NotImplemented
| apache-2.0 | 5,590,796,173,671,300,000 | 34.45098 | 79 | 0.593939 | false |
rackerlabs/marconi | marconi/queues/transport/wsgi/v1_0/homedoc.py | 1 | 4656 | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03
JSON_HOME = {
'resources': {
#------------------------------------------------------------------
# Queues
#------------------------------------------------------------------
'rel/queues': {
'href-template': '/v1/queues{?marker,limit,detailed}',
'href-vars': {
'marker': 'param/marker',
'limit': 'param/queue_limit',
'detailed': 'param/detailed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/queue': {
'href-template': '/v1/queues/{queue_name}',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET', 'HEAD', 'PUT', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
'rel/queue-metadata': {
'href-template': '/v1/queues/{queue_name}/metadata',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET', 'PUT'],
'formats': {
'application/json': {},
},
},
},
'rel/queue-stats': {
'href-template': '/v1/queues/{queue_name}/stats',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
#------------------------------------------------------------------
# Messages
#------------------------------------------------------------------
'rel/messages': {
'href-template': ('/v1/queues/{queue_name}/messages'
'{?marker,limit,echo,include_claimed}'),
'href-vars': {
'queue_name': 'param/queue_name',
'marker': 'param/marker',
'limit': 'param/messages_limit',
'echo': 'param/echo',
'include_claimed': 'param/include_claimed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post-messages': {
'href-template': '/v1/queues/{queue_name}/messages',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json'],
},
},
#------------------------------------------------------------------
# Claims
#------------------------------------------------------------------
'rel/claim': {
'href-template': '/v1/queues/{queue_name}/claims{?limit}',
'href-vars': {
'queue_name': 'param/queue_name',
'limit': 'param/claim_limit',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
}
}
class Resource(object):
def __init__(self):
document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4)
self.document_utf8 = document.encode('utf-8')
def on_get(self, req, resp, project_id):
resp.data = self.document_utf8
resp.content_type = 'application/json-home'
resp.cache_control = ['max-age=86400']
# status defaults to 200
| apache-2.0 | 9,216,185,514,893,187,000 | 31.788732 | 79 | 0.391323 | false |
tdeboissiere/DeepLearningImplementations | DenseNet/run_cifar10.py | 1 | 7219 | from __future__ import print_function
import os
import time
import json
import argparse
import densenet
import numpy as np
import keras.backend as K
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.utils import np_utils
def run_cifar10(batch_size,
nb_epoch,
depth,
nb_dense_block,
nb_filter,
growth_rate,
dropout_rate,
learning_rate,
weight_decay,
plot_architecture):
""" Run CIFAR10 experiments
:param batch_size: int -- batch size
:param nb_epoch: int -- number of training epochs
:param depth: int -- network depth
:param nb_dense_block: int -- number of dense blocks
:param nb_filter: int -- initial number of conv filter
:param growth_rate: int -- number of new filters added by conv layers
:param dropout_rate: float -- dropout rate
:param learning_rate: float -- learning rate
:param weight_decay: float -- weight decay
:param plot_architecture: bool -- whether to plot network architecture
"""
###################
# Data processing #
###################
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
nb_classes = len(np.unique(y_train))
img_dim = X_train.shape[1:]
if K.image_data_format() == "channels_first":
n_channels = X_train.shape[1]
else:
n_channels = X_train.shape[-1]
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# Normalisation
X = np.vstack((X_train, X_test))
# 2 cases depending on the image ordering
if K.image_data_format() == "channels_first":
for i in range(n_channels):
mean = np.mean(X[:, i, :, :])
std = np.std(X[:, i, :, :])
X_train[:, i, :, :] = (X_train[:, i, :, :] - mean) / std
X_test[:, i, :, :] = (X_test[:, i, :, :] - mean) / std
elif K.image_data_format() == "channels_last":
for i in range(n_channels):
mean = np.mean(X[:, :, :, i])
std = np.std(X[:, :, :, i])
X_train[:, :, :, i] = (X_train[:, :, :, i] - mean) / std
X_test[:, :, :, i] = (X_test[:, :, :, i] - mean) / std
###################
# Construct model #
###################
model = densenet.DenseNet(nb_classes,
img_dim,
depth,
nb_dense_block,
growth_rate,
nb_filter,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
# Model output
model.summary()
# Build optimizer
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=["accuracy"])
if plot_architecture:
from keras.utils.visualize_util import plot
plot(model, to_file='./figures/densenet_archi.png', show_shapes=True)
####################
# Network training #
####################
print("Training")
list_train_loss = []
list_test_loss = []
list_learning_rate = []
for e in range(nb_epoch):
if e == int(0.5 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 10.))
if e == int(0.75 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 100.))
split_size = batch_size
num_splits = X_train.shape[0] / split_size
arr_splits = np.array_split(np.arange(X_train.shape[0]), num_splits)
l_train_loss = []
start = time.time()
for batch_idx in arr_splits:
X_batch, Y_batch = X_train[batch_idx], Y_train[batch_idx]
train_logloss, train_acc = model.train_on_batch(X_batch, Y_batch)
l_train_loss.append([train_logloss, train_acc])
test_logloss, test_acc = model.evaluate(X_test,
Y_test,
verbose=0,
batch_size=64)
list_train_loss.append(np.mean(np.array(l_train_loss), 0).tolist())
list_test_loss.append([test_logloss, test_acc])
list_learning_rate.append(float(K.get_value(model.optimizer.lr)))
# to convert numpy array to json serializable
print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))
d_log = {}
d_log["batch_size"] = batch_size
d_log["nb_epoch"] = nb_epoch
d_log["optimizer"] = opt.get_config()
d_log["train_loss"] = list_train_loss
d_log["test_loss"] = list_test_loss
d_log["learning_rate"] = list_learning_rate
json_file = os.path.join('./log/experiment_log_cifar10.json')
with open(json_file, 'w') as fp:
json.dump(d_log, fp, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run CIFAR10 experiment')
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size')
parser.add_argument('--nb_epoch', default=30, type=int,
help='Number of epochs')
parser.add_argument('--depth', type=int, default=7,
help='Network depth')
parser.add_argument('--nb_dense_block', type=int, default=1,
help='Number of dense blocks')
parser.add_argument('--nb_filter', type=int, default=16,
help='Initial number of conv filters')
parser.add_argument('--growth_rate', type=int, default=12,
help='Number of new filters added by conv layers')
parser.add_argument('--dropout_rate', type=float, default=0.2,
help='Dropout rate')
parser.add_argument('--learning_rate', type=float, default=1E-3,
help='Learning rate')
parser.add_argument('--weight_decay', type=float, default=1E-4,
help='L2 regularization on weights')
parser.add_argument('--plot_architecture', type=bool, default=False,
help='Save a plot of the network architecture')
args = parser.parse_args()
print("Network configuration:")
for name, value in parser.parse_args()._get_kwargs():
print(name, value)
list_dir = ["./log", "./figures"]
for d in list_dir:
if not os.path.exists(d):
os.makedirs(d)
run_cifar10(args.batch_size,
args.nb_epoch,
args.depth,
args.nb_dense_block,
args.nb_filter,
args.growth_rate,
args.dropout_rate,
args.learning_rate,
args.weight_decay,
args.plot_architecture)
| mit | -2,395,165,933,684,536,000 | 34.214634 | 79 | 0.532207 | false |
dex4er/django-pyc | django_pyc/management/commands/clearpyc.py | 1 | 2097 | import argparse
import os
import re
import sys
from django.core.management import base
class Command(base.BaseCommand):
help = \
"""
Clears .pyc files from the project.
"""
pattern = r'^.+\.pyc$'
def add_arguments(self, parser):
parser.add_argument(
'--noinput', dest='noinput', action='store_true', default=False,
help="Do NOT prompt the user for input of any kind."
)
parser.add_argument(
'-f', '--force', dest='force', action='store_true', default=False,
help="Force the removing files without user interaction."
)
parser.add_argument(
'-p', '--with-pythonpath', dest='with_pythonpath', action='store_true', default=False,
help="Remove also PYTHONPATH libraries."
)
parser.add_argument(
'path', nargs=argparse.REMAINDER,
help="Directories with libraries"
)
def handle(self, *args, **options):
dirs = options['path'] or sys.path[:1]
if options['with_pythonpath']:
dirs += sys.path[1:]
for d in dirs:
d = d or '.'
if os.path.isdir(d) and os.access(d, os.W_OK):
for dirname, _, filenames in os.walk(d):
for filename in filenames:
fullname = os.path.join(dirname, filename)
if re.search(self.pattern, fullname):
if not options['force'] and not options['noinput']:
confirm_action = input(
"Do you want to delete '%s'? [y/N] " % fullname)
if confirm_action != 'y':
continue
os.remove(fullname)
if int(options['verbosity']) >= 2:
self.stdout.write("Removed %s" % fullname)
else:
if int(options['verbosity']) >= 2:
self.stdout.write("Skipped %s" % d)
| lgpl-3.0 | 5,796,214,227,565,749,000 | 36.446429 | 98 | 0.48784 | false |
daite/textparser | analyze_japanese.py | 1 | 5424 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015 daite
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bs4 import BeautifulSoup as BS
from urlparse import urljoin
import requests
import argparse
import setting
import codecs
import os
class JapTextParser:
def __init__(self, mode='editorial'):
'''
:: init function of basic class
'''
self.mode = mode
self.text_encoding = setting.text_encoding
self.output_file = setting.output_file
self.dedupe_output_file = setting.dedupe_output_file
def analyze_japanese_text(self, search_filter=setting.word_filter):
'''
:: analyze japanese text when given search_filter
:: need to add error handling
'''
for sentence in self.get_text_from_url():
url = setting.app_url(setting.app_id,
search_filter,
sentence)
r = requests.get(url)
status_code = r.status_code
if status_code == 200:
print '[%s] ===> [%s] OK' %(self.mode, status_code)
self.save_text(r.text)
else:
raise RuntimeError("check it")
self.dedupe() # deduping text
def get_text_from_url(self):
'''
:: get text from url
'''
pass
def dedupe(self):
'''
:: dedupe text data
'''
print('deduping.....')
text_list = set()
with codecs.open(self.output_file, 'r',
encoding=self.text_encoding) as f:
for x in f.readlines() :
text_list.add(x)
for text in text_list:
with codecs.open(self.dedupe_output_file, 'a',
encoding=self.text_encoding) as g:
g.write(text)
print('cleaning up...')
os.remove(self.output_file)
def save_text(self, res_text):
'''
:: save useful information to txt file
:: returned by yahoo japanese analyze server
'''
for word in BS(res_text).findAll('word'):
category = word.find('pos').text
kanji = word.find('surface').text
hiragana = word.find('reading').text
try:
with codecs.open(self.output_file, 'a',
encoding=self.text_encoding) as f:
text = '%s\t%s\t%s' %(category, kanji, hiragana)
f.write(text + '\r\n')
except Exception as e:
os.remove(self.output_file)
raise RuntimeError("Error", e)
@staticmethod
def get_japanese_meaning(kanji):
'''
:: get japanese meaning from kotobank
'''
url = 'https://kotobank.jp/word/%s' %japanese_word
try:
japanese_meaning = BS(requests.get(url).text).\
find('meta', {'property':'og:description'})['content']
except:
japanese_meaning = 'errors!'
return japanese_meaning
@staticmethod
def get_response(url):
'''
:: staticmethod -> get BS response from url
'''
return BS(requests.get(url).content)
class AsahiParser(JapTextParser):
'''
:: AsahiParser class
'''
def get_text_from_url(self):
'''
:: override function from base class
'''
if self.mode == 'editorial':
url = setting.asahi_editorial_url
else:
url = setting.asahi_tensheng_url
soup = self.get_response(url)
div_tag = soup.find('div', {'class': 'ArticleText'})
for p_tag in div_tag.findAll('p'):
yield p_tag.text
class NikkeiParser(JapTextParser):
'''
:: NikkeiParser class
'''
def get_text_from_url(self):
'''
:: override function from base class
:: get the lastest 2 editorial pages
'''
nikkei_main_url = setting.nikkei_main_url
soup_main = self.get_response(nikkei_main_url).\
findAll('h4', {'class': 'cmn-article_title'})[:2]
for s in soup_main:
nikkei_editorial_url = urljoin(setting.nikkei_host_url, s.find('a')['href'])
soup_editorial = self.get_response(nikkei_editorial_url).\
find('div', {'class': 'cmn-article_text JSID_key_fonttxt'})
for text in soup_editorial.findAll('p'):
yield text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t','--tensheng',
help='fetch asahi tensheng',
action="store_true")
parser.add_argument('-e','--editorial',
help='fetch asahi editorial',
action="store_true")
parser.add_argument('-n','--nikkei',
help='fetch nikkei editorial',
action="store_true")
args = parser.parse_args()
if args.tensheng:
a = AsahiParser(mode='tensheng')
a.analyze_japanese_text()
elif args.editorial:
a = AsahiParser()
a.analyze_japanese_text()
elif args.nikkei:
n = NikkeiParser()
n.analyze_japanese_text()
else:
parser.print_help()
exit(1)
| mit | -3,033,339,715,961,497,000 | 29.133333 | 80 | 0.665007 | false |
znick/anytask | anytask/tasks/models.py | 1 | 16245 | # coding: utf-8
import copy
import sys
import json
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q, Max
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.utils.html import escape
from courses.models import Course
from groups.models import Group
def check_json(text):
try:
text_to_json = json.loads(text, strict=False)
if not isinstance(text_to_json, dict):
raise ValueError
return text_to_json
except (ValueError, TypeError):
return False
def get_lang_text(text, lang):
text_ = check_json(text)
if text_:
lang = lang if lang in text_ else settings.LANGUAGE_CODE
return text_[lang]
return unicode(text)
class Task(models.Model):
title = models.CharField(max_length=191, db_index=True, null=True, blank=True)
short_title = models.CharField(max_length=15, db_index=True, null=True, blank=True)
course = models.ForeignKey(Course, db_index=True, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=True, blank=True, default=None)
groups = models.ManyToManyField(Group, blank=False, related_name='groups_set')
weight = models.IntegerField(db_index=True, null=False, blank=False, default=0)
is_hidden = models.BooleanField(db_index=True, null=False, blank=False, default=False)
parent_task = models.ForeignKey('self', db_index=True, null=True, blank=True, related_name='children')
task_text = models.TextField(null=True, blank=True, default=None)
score_max = models.IntegerField(db_index=True, null=False, blank=False, default=0)
max_students = models.IntegerField(null=False, blank=False, default=0)
contest_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
rb_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
TYPE_FULL = 'All'
TYPE_SIMPLE = 'Only mark'
TYPE_SEMINAR = 'Seminar'
TYPE_MATERIAL = 'Material'
TYPE_IPYNB = 'Jupyter Notebook'
TASK_TYPE_CHOICES = (
(TYPE_FULL, _('s_obsuzhdeniem')),
(TYPE_SIMPLE, _('tolko_ocenka')),
(TYPE_MATERIAL, _('material')),
(TYPE_SEMINAR, _('seminar')),
(TYPE_IPYNB, _('jupyter notebook'))
)
type = models.CharField(db_index=False, max_length=128, choices=TASK_TYPE_CHOICES, default=TYPE_FULL)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
deadline_time = models.DateTimeField(auto_now=False, blank=True, null=True, default=None)
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
contest_id = models.IntegerField(db_index=True, null=False, blank=False, default=0)
problem_id = models.CharField(max_length=128, db_index=True, null=True, blank=True)
send_to_users = models.BooleanField(db_index=False, null=False, blank=False, default=False)
sended_notify = models.BooleanField(db_index=True, null=False, blank=False, default=True)
one_file_upload = models.BooleanField(db_index=False, null=False, blank=False, default=False)
accepted_after_contest_ok = models.BooleanField(db_index=False, null=False, blank=False, default=False)
score_after_deadline = models.BooleanField(db_index=False, null=False, blank=False, default=True)
nb_assignment_name = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return unicode(self.title)
def get_title(self, lang=settings.LANGUAGE_CODE):
return escape(get_lang_text(self.title, lang))
def get_description(self, lang=settings.LANGUAGE_CODE):
return get_lang_text(self.task_text, lang)
def is_text_json(self):
return check_json(self.task_text)
@property
def max_students_on_task(self):
return self.max_students or self.course.max_students_per_task or settings.PYTHONTASK_MAX_USERS_PER_TASK
def user_can_take_task(self, user):
for task_taken in TaskTaken.objects.filter(task=self):
task_taken.update_status()
if user.is_anonymous():
return (False, 'Необходимо залогиниться')
if self.is_hidden:
return (False, 'Задача скрыта')
if not self.course.groups.filter(students=user).count():
return (False, u'Необходимо числиться в одной из групп курса')
if Task.objects.filter(parent_task=self).count() > 0:
return (False, u'')
if TaskTaken.objects.filter(task=self).filter(user=user).filter(
Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED))).count() != 0:
return (False, u'')
if self.parent_task is not None:
tasks = Task.objects.filter(parent_task=self.parent_task)
if TaskTaken.objects.filter(user=user).filter(task__in=tasks) \
.exclude(status=TaskTaken.STATUS_CANCELLED) \
.exclude(status=TaskTaken.STATUS_DELETED) \
.count() > 0:
return (False, u'Вы уже взяли другую подзадачу из этой задачи')
max_not_scored_tasks = self.course.max_not_scored_tasks or \
settings.PYTHONTASK_MAX_TASKS_WITHOUT_SCORE_PER_STUDENT
if max_not_scored_tasks:
if TaskTaken.objects.filter(user=user) \
.filter(task__course=self.course) \
.filter(status=TaskTaken.STATUS_TAKEN).count() >= max_not_scored_tasks:
return (False, u'У вас слишком много неоцененных задач')
max_incomplete_tasks = self.course.max_incomplete_tasks or settings.PYTHONTASK_MAX_INCOMPLETE_TASKS
if max_incomplete_tasks:
all_scored = TaskTaken.objects.filter(user=user).filter(task__course=self.course) \
.filter(Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(
status=TaskTaken.STATUS_SCORED)))
if sum(t.score != t.task.score_max for t in all_scored) + 1 > max_incomplete_tasks:
return (False, u'У вас слишком много не до конца доделанных задач')
max_students = self.max_students_on_task or settings.PYTHONTASK_MAX_USERS_PER_TASK
if max_students:
if TaskTaken.objects.filter(task=self).filter(Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(
status=TaskTaken.STATUS_SCORED))).count() >= max_students:
return (
False,
u'Задача не может быть взята более чем %d студентами' % max_students)
try:
task_taken = TaskTaken.objects.filter(task=self).filter(user=user).get(status=TaskTaken.STATUS_BLACKLISTED)
blacklist_expired_date = task_taken.blacklisted_till
if blacklist_expired_date:
return (False, u'Вы сможете взять эту задачу с %s' % blacklist_expired_date.strftime("%d.%m.%Y"))
except TaskTaken.DoesNotExist:
pass
return (True, u'')
def user_can_cancel_task(self, user):
if user.is_anonymous() or self.is_hidden:
return False
if TaskTaken.objects.filter(task=self).filter(user=user).filter(status=TaskTaken.STATUS_TAKEN).count() != 0:
return True
return False
def user_can_score_task(self, user):
if user.is_anonymous():
return False
return self.course.user_is_teacher(user)
def user_can_pass_task(self, user):
if user.is_anonymous():
return False
if not self.course.is_python_task:
if self.user_can_take_task(user):
return True
try:
task_taken = self.get_task_takens().get(user=user)
return (task_taken.status == TaskTaken.STATUS_TAKEN or task_taken.status == TaskTaken.STATUS_SCORED)
except TaskTaken.DoesNotExist:
return False
return False
def has_parent(self):
return self.parent_task is not None
def has_subtasks(self):
return Task.objects.filter(parent_task=self).count() > 0
def get_subtasks(self):
return Task.objects.filter(parent_task=self)
def get_task_takens(self):
return TaskTaken.objects.filter(task=self).filter(
Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED)))
def add_user_properties(self, user):
self.can_take = self.user_can_take_task(user)
self.can_cancel = self.user_can_cancel_task(user)
self.can_score = self.user_can_score_task(user)
self.can_pass = self.user_can_pass_task(user)
self.is_shown = not self.is_hidden or self.course.user_is_teacher(user)
def has_issue_access(self):
return self.type not in [self.TYPE_SIMPLE, self.TYPE_MATERIAL, self.TYPE_SEMINAR]
def set_position_in_new_group(self, groups=None):
if not groups:
groups = self.course.groups.all()
else:
for task_related in TaskGroupRelations.objects.filter(task=self).exclude(group__in=groups):
task_related.deleted = True
task_related.save()
for group in list(groups):
task_related, created = TaskGroupRelations.objects.get_or_create(task=self, group=group)
if created:
max_position = TaskGroupRelations.objects.filter(group=group).exclude(id=task_related.id) \
.aggregate(Max('position'))['position__max']
task_related.position = max_position + 1 if max_position is not None else 0
else:
task_related.deleted = False
task_related.save()
def get_url_in_course(self):
return reverse('courses.views.seminar_page', kwargs={'course_id': self.course_id, 'task_id': self.id})
class TaskLog(models.Model):
title = models.CharField(max_length=191, db_index=True, null=True, blank=True)
course = models.ForeignKey(Course, db_index=False, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=True, blank=True, default=None)
groups = models.ManyToManyField(Group, blank=False, related_name='groups_log_set')
weight = models.IntegerField(db_index=False, null=False, blank=False, default=0)
parent_task = models.ForeignKey('self', db_index=True, null=True, blank=True, related_name='parent_task_set')
task_text = models.TextField(null=True, blank=True, default=None)
score_max = models.IntegerField(db_index=False, null=False, blank=False, default=0)
contest_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
rb_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
TYPE_FULL = 'All'
TYPE_SIMPLE = 'Only mark'
TASK_TYPE_CHOICES = (
(TYPE_FULL, _(u's_obsuzhdeniem')),
(TYPE_SIMPLE, _(u'tolko_ocenka')),
)
type = models.CharField(db_index=False, max_length=128, choices=TASK_TYPE_CHOICES, default=TYPE_FULL)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
deadline_time = models.DateTimeField(auto_now=False, null=True, default=None)
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
contest_id = models.IntegerField(db_index=True, null=False, blank=False, default=0)
problem_id = models.CharField(max_length=128, db_index=True, null=True, blank=True)
def __unicode__(self):
return unicode(self.title)
class TaskTaken(models.Model):
STATUS_TAKEN = 0
STATUS_CANCELLED = 1
STATUS_BLACKLISTED = 2
STATUS_SCORED = 3
STATUS_DELETED = 4
user = models.ForeignKey(User, db_index=True, null=False, blank=False)
task = models.ForeignKey(Task, db_index=True, null=False, blank=False)
issue = models.ForeignKey('issues.Issue', db_index=True, null=True, blank=False)
TASK_TAKEN_STATUSES = (
(STATUS_TAKEN, u'Task taken'),
(STATUS_CANCELLED, u'Task cancelled'),
(STATUS_BLACKLISTED, u'Task blacklisted'),
(STATUS_SCORED, u'Task scored'),
(STATUS_DELETED, u'TaskTaken deleted')
)
status = models.IntegerField(choices=TASK_TAKEN_STATUSES, db_index=True, blank=False, default=0)
EDIT = 'EDIT'
QUEUE = 'QUEUE'
OK = 'OK'
STATUS_CHECK_CHOICES = (
(EDIT, u'Дорешивание'),
(QUEUE, u'Ожидает проверки'),
(OK, u'Задача зачтена и/или больше не принимается'),
)
status_check = models.CharField(db_index=True, max_length=5, choices=STATUS_CHECK_CHOICES, default=EDIT)
taken_time = models.DateTimeField(blank=True, null=True)
blacklisted_till = models.DateTimeField(blank=True, null=True)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
@property
def score(self):
self.update_status()
if not self.issue:
return 0
return self.issue.mark
def update_status(self):
if self.issue and abs(self.issue.mark) > sys.float_info.epsilon and self.status != self.STATUS_SCORED:
self.scored()
if not self.issue.get_byname('responsible_name'):
group = self.task.course.get_user_group(self.user)
if group:
default_teacher = self.task.course.get_default_teacher(group)
if default_teacher:
self.issue.set_byname('responsible_name', default_teacher, author=None)
def take(self):
self.status = self.STATUS_TAKEN
if self.taken_time is None:
self.taken_time = timezone.now()
self.save()
def cancel(self):
dt_from_taken_delta = timezone.now() - self.taken_time
if (dt_from_taken_delta.days) <= settings.PYTHONTASK_MAX_DAYS_TO_FULL_CANCEL:
self.taken_time = None
self.status = self.STATUS_CANCELLED
self.save()
def blacklist(self):
self.status = self.STATUS_BLACKLISTED
self.blacklisted_till = timezone.now() + timedelta(days=settings.PYTHONTASK_DAYS_DROP_FROM_BLACKLIST)
self.save()
def scored(self):
self.status = self.STATUS_SCORED
self.save()
def mark_deleted(self):
self.status = self.STATUS_DELETED
self.taken_time = None
self.blacklisted_till = None
self.save()
class Meta:
unique_together = (("user", "task"),)
def __unicode__(self):
return unicode(self.task) + " (" + unicode(self.user) + ")"
class TaskGroupRelations(models.Model):
task = models.ForeignKey(Task, db_index=False, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=False, blank=False)
position = models.IntegerField(db_index=False, null=False, blank=False, default=0)
deleted = models.BooleanField(db_index=False, null=False, blank=False, default=False)
class Meta:
unique_together = ("task", "group")
def __unicode__(self):
return ' '.join([unicode(self.task), unicode(self.group), unicode(self.position)])
def task_save_to_log_post_save(sender, instance, created, **kwargs):
task_log = TaskLog()
task_log_dict = copy.deepcopy(instance.__dict__)
task_log_dict['id'] = None
task_log.__dict__ = task_log_dict
task_log.sended_notify = False
task_log.save()
task_log.groups.add(*instance.groups.all())
# post_save.connect(task_save_to_log_post_save, sender=Task)
| mit | 2,597,945,272,377,163,300 | 39.148615 | 119 | 0.650166 | false |
elishowk/django-poser | poser/utils/page.py | 1 | 2014 | # -*- coding: utf-8 -*-
from django.conf import settings
import re
APPEND_TO_SLUG = "-copy"
COPY_SLUG_REGEX = re.compile(r'^.*-copy(?:-(\d)*)?$')
def is_valid_page_slug(page, slug, site, path=None):
"""Validates given slug depending on settings.
"""
# Exclude the page with the publisher_state == page.PUBLISHER_STATE_DELETE
from poser.models.pagemodel import Page
qs = Page.objects.filter(site=site)
## Check for slugs
if qs.filter(slug=slug).count():
return False
## Check for path
if path and qs.filter(path=path).count():
return False
return True
def get_available_slug(page, new_slug=None):
"""Smart function generates slug for title if current title slug cannot be
used. Appends APPEND_TO_SLUG to slug and checks it again.
(Used in page copy function)
Returns: slug
"""
slug = new_slug or page.slug
# We need the full path for the title to check for conflicting urls
page.slug = slug
page.update_path()
path = page.path
# This checks for conflicting slugs/overwrite_url, for both published and unpublished pages
# This is a simpler check than in page_resolver.is_valid_url which
# takes into account actualy page URL
if not is_valid_page_slug(page, slug, page.site, path):
# add nice copy attribute, first is -copy, then -copy-2, -copy-3, ....
match = COPY_SLUG_REGEX.match(slug)
if match:
try:
next = int(match.groups()[0]) + 1
slug = "-".join(slug.split('-')[:-1]) + "-%d" % next
except TypeError:
slug = slug + "-2"
else:
slug = slug + APPEND_TO_SLUG
return get_available_slug(page, slug)
else:
return slug
def check_title_slugs(page):
"""Checks page slugs for duplicity if required, used after page move/
cut/paste.
"""
old_slug = page.slug
page.slug = get_available_slug(page)
if page.slug != old_slug:
page.save()
| agpl-3.0 | 1,039,450,840,049,646,100 | 31.483871 | 95 | 0.618669 | false |
donkawechico/arguman.org | web/premises/views.py | 1 | 23259 | # -*- coding:utf-8 -*-
import json
from datetime import timedelta
from markdown2 import markdown
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.db.models import Max, Sum
from django.utils.timezone import now
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.views.generic import DetailView, TemplateView, CreateView, View
from django.views.generic.edit import UpdateView
from django.utils.translation import get_language
from django.db.models import Count
from blog.models import Post
from premises.models import Contention, Premise
from premises.forms import (ArgumentCreationForm, PremiseCreationForm,
PremiseEditForm, ReportForm)
from premises.signals import (added_premise_for_premise,
added_premise_for_contention,
reported_as_fallacy,
supported_a_premise)
from premises.templatetags.premise_tags import check_content_deletion
from premises.mixins import PaginationMixin, NextURLMixin
from newsfeed.models import Entry
from profiles.mixins import LoginRequiredMixin
from profiles.models import Profile
def get_ip_address(request):
return (request.META.get('HTTP_X_FORWARDED_FOR') or
request.META.get('REMOTE_ADDR'))
class ContentionDetailView(DetailView):
queryset = (Contention.objects
.select_related('user')
.prefetch_related('premises'))
context_object_name = 'contention'
def get_template_names(self):
view = self.request.GET.get("view")
name = ("list_view" if view == "list" else "tree_view")
return ["premises/%s.html" % name]
def get_parent(self):
premise_id = self.kwargs.get("premise_id")
if premise_id:
return get_object_or_404(Premise, id=premise_id)
def get_premises(self):
contention = self.get_parent() or self.get_object()
return contention.published_children()
def get_context_data(self, **kwargs):
contention = self.get_object()
edit_mode = (
self.request.user.is_superuser or
self.request.user.is_staff or
contention.user == self.request.user)
return super(ContentionDetailView, self).get_context_data(
premises=self.get_premises(),
parent_premise=self.get_parent(),
path=contention.get_absolute_url(),
edit_mode=edit_mode,
serialized=contention.serialize(),
**kwargs)
class ContentionJsonView(DetailView):
model = Contention
def render_to_response(self, context, **response_kwargs):
contention = self.get_object(self.get_queryset())
return HttpResponse(json.dumps({
"nodes": self.build_tree(contention, self.request.user),
}), content_type="application/json")
def build_tree(self, contention, user):
return {
"name": contention.title,
"parent": None,
"pk": contention.pk,
"owner": contention.owner,
"sources": contention.sources,
"is_singular": self.is_singular(contention),
"children": self.get_premises(contention, user)
}
def get_premises(self, contention, user, parent=None):
children = [{
"pk": premise.pk,
"name": premise.text,
"parent": parent.text if parent else None,
"reportable_by_authenticated_user": self.user_can_report(
premise, user),
"report_count": premise.reports.count(),
"user": {
"id": premise.user.id,
"username": premise.user.username,
"absolute_url": reverse("auth_profile",
args=[premise.user.username])
},
"sources": premise.sources,
"premise_type": premise.premise_class(),
"children": (self.get_premises(contention, user, parent=premise)
if premise.published_children().exists() else [])
} for premise in contention.published_premises(parent)]
return children
def user_can_report(self, premise, user):
if user.is_authenticated() and user != premise.user:
return not premise.reported_by(user)
return False
def is_singular(self, contention):
result = contention.premises.all().aggregate(
max_sibling=Max('sibling_count'))
return result['max_sibling'] <= 1
class HomeView(TemplateView, PaginationMixin):
template_name = "index.html"
tab_class = "featured"
paginate_by = 20
def get_context_data(self, **kwargs):
contentions = self.get_contentions()
if self.request.user.is_authenticated():
notifications_qs = self.get_unread_notifications()
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
else:
notifications = None
return super(HomeView, self).get_context_data(
next_page_url=self.get_next_page_url(),
tab_class=self.tab_class,
notifications=notifications,
has_next_page=self.has_next_page(),
announcements=self.get_announcements(),
contentions=contentions, **kwargs)
def get_announcements(self):
return Post.objects.filter(is_announcement=True)
def get_unread_notifications(self):
return (self.request.user
.notifications
.filter(is_read=False)[:5])
def mark_as_read(self, notifications):
pks = notifications.values_list("id", flat=True)
(self.request.user
.notifications
.filter(id__in=pks)
.update(is_read=True))
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.language()
.filter(is_featured=True)
.order_by("-date_modification"))
if paginate:
contentions = (contentions[self.get_offset(): self.get_limit()])
return contentions
class NotificationsView(LoginRequiredMixin, HomeView):
template_name = "notifications.html"
def get_context_data(self, **kwargs):
notifications_qs = self.request.user.notifications.all()[:40]
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
return super(HomeView, self).get_context_data(
notifications=notifications,
**kwargs)
class SearchView(HomeView):
tab_class = 'search'
template_name = 'search/search.html'
partial_templates = {
'contentions': 'search/contention.html',
'users': 'search/profile.html',
'premises' : 'search/premise.html'
}
method_mapping = {'contentions': "get_contentions",
'users': "get_users",
'premises': "get_premises"}
def dispatch(self, request, *args, **kwargs):
self.type = request.GET.get('type', 'contentions')
if not self.method_mapping.get(self.type):
raise Http404()
return super(SearchView, self).dispatch(request, *args, **kwargs)
def get_keywords(self):
return self.request.GET.get('keywords') or ""
def has_next_page(self):
method = getattr(self, self.method_mapping[self.type])
total = method().count()
return total > (self.get_offset() + self.paginate_by)
def get_search_bundle(self):
method = getattr(self, self.method_mapping[self.type])
return [{'template': self.partial_templates[self.type],
'object': item} for item in method()]
def get_context_data(self, **kwargs):
return super(SearchView, self).get_context_data(
results=self.get_search_bundle(),
**kwargs)
def get_next_page_url(self):
offset = self.get_offset() + self.paginate_by
return '?offset=%(offset)s&keywords=%(keywords)s&type=%(type)s' % {
"offset": offset,
"type": self.type,
"keywords": self.get_keywords()
}
def get_premises(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 3:
result = Premise.objects.none()
else:
result = (Premise.objects.filter(
argument__language=get_language(),
text__contains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def get_users(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Profile.objects.none()
else:
result = (Profile.objects.filter(
username__icontains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def get_contentions(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Contention.objects.none()
else:
result = (Contention
.objects
.filter(title__icontains=keywords,
language=get_language()))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
class NewsView(HomeView):
tab_class = "news"
def get_contentions(self, paginate=True):
contentions = (
Contention
.objects
.language()
.filter(is_published=True)
)
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class StatsView(HomeView):
tab_class = "stats"
template_name = "stats.html"
partial_templates = {
Profile: "stats/profile.html",
Contention: "stats/contention.html",
Premise: "stats/premise.html",
}
method_mapping = {
"active_users": "get_active_users",
"supported_users": "get_supported_users",
"disgraced_users": "get_disgraced_users",
"supported_premises": "get_supported_premises",
"fallacy_premises": "get_fallacy_premises",
"crowded_contentions": "get_crowded_contentions",
}
time_ranges = [7, 30]
def get_context_data(self, **kwargs):
return super(StatsView, self).get_context_data(
stats=self.get_stats_bundle(),
stats_type=self.get_stats_type(),
days=self.days,
**kwargs)
def get_stats_type(self):
return self.request.GET.get("what")
def build_time_filters(self, date_field="date_creation"):
days = self.request.GET.get("days")
if not days or days == "all":
self.days = None
return {}
try:
days = int(days)
except (TypeError, ValueError):
days = None
if not days or days not in self.time_ranges:
raise Http404()
self.days = days
field_expression = "%s__gt" % date_field
return {
field_expression: timezone.now() - timedelta(days=days)
}
def get_stats_bundle(self):
stat_type = self.get_stats_type()
if stat_type not in self.method_mapping:
raise Http404()
method = getattr(self, self.method_mapping[stat_type])
return [
{
"template": self.partial_templates[type(item)],
"object": item
} for item in method()
]
def get_active_users(self):
return Profile.objects.annotate(
premise_count=Sum("premise"),
).filter(
premise_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-premise_count")[:10]
def get_supported_users(self):
return Profile.objects.annotate(
supporter_count=Sum("premise__supporters"),
).filter(
supporter_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-supporter_count")[:10]
def get_disgraced_users(self):
return Profile.objects.annotate(
report_count=Sum("premise__reports"),
).filter(
report_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-report_count")[:10]
def get_supported_premises(self):
return Premise.objects.annotate(
supporter_count=Sum("supporters")
).filter(
argument__language=get_language(),
supporter_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-supporter_count")[:50]
def get_fallacy_premises(self):
return Premise.objects.annotate(
report_count=Sum("reports"),
).filter(
report_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-report_count")[:10]
def get_crowded_contentions(self):
return Contention.objects.annotate(
premise_count=Sum("premises"),
).filter(
language=get_language(),
premise_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-premise_count")[:10]
class UpdatedArgumentsView(HomeView):
tab_class = "updated"
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.filter(is_published=True)
.order_by('-date_modification'))
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class ControversialArgumentsView(HomeView):
tab_class = "controversial"
def get_contentions(self, paginate=True):
last_week = now() - timedelta(days=3)
contentions = (Contention
.objects
.annotate(num_children=Count('premises'))
.order_by('-num_children')
.filter(date_modification__gte=last_week))
if paginate:
return contentions[self.get_offset():self.get_limit()]
return contentions
class AboutView(TemplateView):
template_name = "about.html"
def get_text_file(self):
language = get_language()
return render_to_string("about-%s.md" % language)
def get_context_data(self, **kwargs):
content = markdown(self.get_text_file())
return super(AboutView, self).get_context_data(
content=content, **kwargs)
class TosView(TemplateView):
template_name = "tos.html"
def get_context_data(self, **kwargs):
content = markdown(render_to_string("tos.md"))
return super(TosView, self).get_context_data(
content=content, **kwargs)
class ArgumentCreationView(LoginRequiredMixin, CreateView):
template_name = "premises/new_contention.html"
form_class = ArgumentCreationForm
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.ip_address = get_ip_address(self.request)
form.instance.language = get_language()
form.instance.is_published = True
response = super(ArgumentCreationView, self).form_valid(form)
form.instance.update_sibling_counts()
return response
class ArgumentUpdateView(LoginRequiredMixin, UpdateView):
template_name = "premises/edit_contention.html"
form_class = ArgumentCreationForm
def get_queryset(self):
contentions = Contention.objects.all()
if self.request.user.is_superuser:
return contentions
return contentions.filter(user=self.request.user)
def form_valid(self, form):
form.instance.user = self.request.user
response = super(ArgumentUpdateView, self).form_valid(form)
form.instance.update_sibling_counts()
return response
class ArgumentPublishView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = True
contention.save()
messages.info(request, u"Argument is published now.")
return redirect(contention)
class ArgumentUnpublishView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = False
contention.save()
messages.info(request, u"Argüman yayından kaldırıldı.")
return redirect(contention)
class ArgumentDeleteView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
if check_content_deletion(contention):
# remove notification
Entry.objects.delete(contention.get_newsfeed_type(), contention.id)
contention.delete()
messages.info(request, u"Argument has been removed.")
return redirect("home")
else:
messages.info(request, u"Argument cannot be deleted.")
return redirect(contention)
delete = post
class PremiseEditView(LoginRequiredMixin, UpdateView):
template_name = "premises/edit_premise.html"
form_class = PremiseEditForm
def get_queryset(self):
premises = Premise.objects.all()
if self.request.user.is_superuser:
return premises
return premises.filter(user=self.request.user)
def form_valid(self, form):
response = super(PremiseEditView, self).form_valid(form)
form.instance.argument.update_sibling_counts()
return response
def get_context_data(self, **kwargs):
return super(PremiseEditView, self).get_context_data(**kwargs)
class PremiseCreationView(NextURLMixin, LoginRequiredMixin, CreateView):
template_name = "premises/new_premise.html"
form_class = PremiseCreationForm
def get_context_data(self, **kwargs):
return super(PremiseCreationView, self).get_context_data(
contention=self.get_contention(),
view=self.get_view_name(),
parent=self.get_parent(),
**kwargs)
def form_valid(self, form):
contention = self.get_contention()
form.instance.user = self.request.user
form.instance.argument = contention
form.instance.parent = self.get_parent()
form.instance.is_approved = True
form.instance.ip_address = get_ip_address(self.request)
form.save()
contention.update_sibling_counts()
if form.instance.parent:
added_premise_for_premise.send(sender=self,
premise=form.instance)
else:
added_premise_for_contention.send(sender=self,
premise=form.instance)
contention.date_modification = timezone.now()
contention.save()
return redirect(
form.instance.get_parent().get_absolute_url() +
self.get_next_parameter()
)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
def get_parent(self):
parent_pk = self.kwargs.get("pk")
if parent_pk:
return get_object_or_404(Premise, pk=parent_pk)
class PremiseSupportView(NextURLMixin, LoginRequiredMixin, View):
def get_premise(self):
premises = Premise.objects.exclude(user=self.request.user)
return get_object_or_404(premises, pk=self.kwargs['pk'])
def post(self, request, *args, **kwargs):
premise = self.get_premise()
premise.supporters.add(self.request.user)
supported_a_premise.send(sender=self, premise=premise,
user=self.request.user)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
class PremiseUnsupportView(PremiseSupportView):
def delete(self, request, *args, **kwargs):
premise = self.get_premise()
premise.supporters.remove(self.request.user)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
post = delete
class PremiseDeleteView(LoginRequiredMixin, View):
def get_premise(self):
if self.request.user.is_staff:
premises = Premise.objects.all()
else:
premises = Premise.objects.filter(user=self.request.user)
return get_object_or_404(premises,
pk=self.kwargs['pk'])
def delete(self, request, *args, **kwargs):
premise = self.get_premise()
premise.delete()
premise.update_sibling_counts()
contention = self.get_contention()
if not contention.premises.exists():
contention.is_published = False
contention.save()
return redirect(contention)
post = delete
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
class ReportView(NextURLMixin, LoginRequiredMixin, CreateView):
form_class = ReportForm
template_name = "premises/report.html"
def get_context_data(self, **kwargs):
return super(ReportView, self).get_context_data(
premise=self.get_premise(),
view=self.get_view_name(),
**kwargs)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
def get_premise(self):
return get_object_or_404(Premise, pk=self.kwargs['pk'])
def get_initial(self):
return {
'contention': self.get_contention(),
'premise': self.get_premise(),
'reporter': self.request.user
}
def form_valid(self, form):
contention = self.get_contention()
premise = self.get_premise()
form.instance.contention = contention
form.instance.premise = premise
form.instance.reporter = self.request.user
form.save()
reported_as_fallacy.send(sender=self, report=form.instance)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
| mit | -2,597,903,517,027,247,000 | 32.458993 | 79 | 0.603122 | false |
jnosal/seth | seth/tests/test_authentication.py | 1 | 2947 | from seth import auth
from seth.tests import IntegrationTestBase
from seth.classy.rest import generics
class DefaultAuthenticatedResource(generics.GenericApiView):
authentication_policy = None
def get(self, **kwargs):
return {}
class BaseAuthenticatedTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
config.register_resource(DefaultAuthenticatedResource, '/test_basic')
def test_default_setup(self):
r = self.app.get('/test_basic')
self.assertEqual(r.status_int, 200)
class TokenAuthenticationPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class CheckQueryParamsResource(generics.GenericApiView):
authentication_policy = auth.SecretTokenAuthenticationPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResource, '/test_token')
def test_no_token_in_params(self):
r = self.app.get('/test_token', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_wrong_token_in_params(self):
r = self.app.get('/test_token?token=wrong_token', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_token_in_params_wrong_param_name(self):
r = self.app.get('/test_token?tokennamewrong=secret', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_token_param_name_and_value(self):
r = self.app.get('/test_token?token=secret')
self.assertEqual(r.status_int, 200)
class CheckHeaderAuthenticatioPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class AllowHeaderAuthPolicy(auth.HeaderAuthenticationPolicy):
header_name = 'My-Header'
header_secret = 'My-Value'
class CheckQueryParamsResourceSecond(generics.GenericApiView):
authentication_policy = AllowHeaderAuthPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_header')
def test_no_header_in_request(self):
r = self.app.get('/test_header', headers={}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_header_in_request_but_incorrect_value(self):
r = self.app.get('/test_header', headers={'My-Header': '123'}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_value_in_header_but_wrong_header_name(self):
r = self.app.get('/test_header', headers={'Wrong': 'My-Value'}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_header_name_and_value(self):
r = self.app.get('/test_header', headers={'My-Header': 'My-Value'}, expect_errors=True)
self.assertEqual(r.status_int, 200) | mit | 4,533,596,743,129,733,600 | 33.682353 | 95 | 0.673906 | false |
mitsuhiko/django | django/test/utils.py | 1 | 7082 | from __future__ import with_statement
import sys
import time
import os
import warnings
from django.conf import settings, UserSettingsHolder
from django.core import mail
from django.core.mail.backends import locmem
from django.test.signals import template_rendered, setting_changed
from django.template import Template, loader, TemplateDoesNotExist
from django.template.loaders import cached
from django.utils.translation import deactivate
from django.utils.functional import wraps
__all__ = (
'Approximate', 'ContextList', 'get_runner', 'override_settings',
'setup_test_environment', 'teardown_test_environment',
)
RESTORE_LOADERS_ATTR = '_original_template_source_loaders'
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val-other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, basestring):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
del mail.outbox
def get_warnings_state():
"""
Returns an object containing the state of the warnings module
"""
# There is no public interface for doing this, but this implementation of
# get_warnings_state and restore_warnings_state appears to work on Python
# 2.4 to 2.7.
return warnings.filters[:]
def restore_warnings_state(state):
"""
Restores the state of the warnings module when passed an object that was
returned by get_warnings_state()
"""
warnings.filters = state[:]
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
def setup_test_template_loader(templates_dict, use_cached_loader=False):
"""
Changes Django to only find templates from within a dictionary (where each
key is the template name and each value is the corresponding template
content to return).
Use meth:`restore_template_loaders` to restore the original loaders.
"""
if hasattr(loader, RESTORE_LOADERS_ATTR):
raise Exception("loader.%s already exists" % RESTORE_LOADERS_ATTR)
def test_template_loader(template_name, template_dirs=None):
"A custom template loader that loads templates from a dictionary."
try:
return (templates_dict[template_name], "test:%s" % template_name)
except KeyError:
raise TemplateDoesNotExist(template_name)
if use_cached_loader:
template_loader = cached.Loader(('test_template_loader',))
template_loader._cached_loaders = (test_template_loader,)
else:
template_loader = test_template_loader
setattr(loader, RESTORE_LOADERS_ATTR, loader.template_source_loaders)
loader.template_source_loaders = (template_loader,)
return template_loader
def restore_template_loaders():
"""
Restores the original template loaders after
:meth:`setup_test_template_loader` has been run.
"""
loader.template_source_loaders = getattr(loader, RESTORE_LOADERS_ATTR)
delattr(loader, RESTORE_LOADERS_ATTR)
class OverrideSettingsHolder(UserSettingsHolder):
"""
A custom setting holder that sends a signal upon change.
"""
def __setattr__(self, name, value):
UserSettingsHolder.__setattr__(self, name, value)
setting_changed.send(sender=name, setting=name, value=value)
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TestCase
if isinstance(test_func, type) and issubclass(test_func, TestCase):
class inner(test_func):
def _pre_setup(innerself):
self.enable()
super(inner, innerself)._pre_setup()
def _post_teardown(innerself):
super(inner, innerself)._post_teardown()
self.disable()
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = OverrideSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
| bsd-3-clause | 2,877,639,968,131,744,300 | 30.616071 | 78 | 0.656735 | false |
foursquare/pants | tests/python/pants_test/java/test_nailgun_integration.py | 1 | 1196 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class TestNailgunIntegration(PantsRunIntegrationTest):
def test_scala_repl_helloworld_input(self):
"""Integration test to exercise possible closed-loop breakages in NailgunClient, NailgunSession
and InputReader.
"""
target = 'examples/src/scala/org/pantsbuild/example/hello/welcome'
pants_run = self.run_pants(
command=['repl', target, '--quiet'],
stdin_data=(
'import org.pantsbuild.example.hello.welcome.WelcomeEverybody\n'
'println(WelcomeEverybody("World" :: Nil).head)\n'
),
# Override the PANTS_CONFIG_FILES="pants.travis-ci.ini" used within TravisCI to enable
# nailgun usage for the purpose of exercising that stack in the integration test.
config={'DEFAULT': {'execution_strategy': 'nailgun'}}
)
self.assert_success(pants_run)
self.assertIn('Hello, World!', pants_run.stdout_data.splitlines())
| apache-2.0 | 4,745,170,059,667,853,000 | 43.296296 | 99 | 0.72408 | false |
PyCQA/astroid | astroid/__pkginfo__.py | 1 | 1535 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2014-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2017 Ceridwen <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radosław Ganczarek <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2017 Hugo <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 Calen Pennington <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2019 Uilian Ries <[email protected]>
# Copyright (c) 2019 Thomas Hisch <[email protected]>
# Copyright (c) 2020-2021 hippo91 <[email protected]>
# Copyright (c) 2020 David Gilman <[email protected]>
# Copyright (c) 2020 Konrad Weihmann <[email protected]>
# Copyright (c) 2020 Felix Mölder <[email protected]>
# Copyright (c) 2020 Michael <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
__version__ = "2.6.3-dev0"
version = __version__
| lgpl-2.1 | -3,546,346,228,540,776,000 | 53.678571 | 85 | 0.745265 | false |
KSchopmeyer/smipyping | smicli/__init__.py | 1 | 1459 | # (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
__init__ for smicli click components.
"""
# smicli click support libraries
from .smicli import * # noqa: F401,F403
from ._click_context import * # noqa: F401,F403
from ._click_common import * # noqa: F401,F403
from ._common_options import * # noqa: F401,F403
from ._click_configfile import * # noqa: F401,F403
from ._tableoutput import * # noqa: F401,F403
from ._cmd_targets import * # noqa: F401,F403
from ._cmd_provider import * # noqa: F401,F403
from ._cmd_explorer import * # noqa: F401,F403
from ._cmd_cimping import * # noqa: F401,F403
from ._cmd_sweep import * # noqa: F401,F403
from ._cmd_history import * # noqa: F401,F403
from ._cmd_programs import * # noqa: F401,F403
from ._cmd_users import * # noqa: F401,F403
from ._cmd_companies import * # noqa: F401,F403
from ._cmd_notifications import * # noqa: F401,F403
| mit | -666,475,931,558,996,700 | 40.685714 | 74 | 0.7183 | false |
rustyhowell/raytracer_py | hitable.py | 1 | 2340 | from collections import namedtuple
from vector3 import Vec3, dot
from math import sqrt
from ray import Ray
HitRecord = namedtuple("HitRecord", ['t', 'p', 'normal', 'material'])
class Hitable:
def hit(self, ray_, t_min, t_max):
"""
Determine if the ray will hit the object
:param ray_:
:param t_min:
:param t_max:
:return: Return a tuple: true/hitrecord or False, None
"""
raise NotImplemented("Override in subclass")
class Sphere(Hitable):
def __init__(self, center, radius, material):
self.center = center
self.radius = radius
self.material = material
def hit(self, ray_, t_min, t_max):
assert isinstance(ray_, Ray)
oc = ray_.origin - self.center
a = dot(ray_.direction, ray_.direction)
b = dot(oc, ray_.direction)
c = dot(oc, oc) - self.radius * self.radius
discriminant = b * b - a * c
if discriminant > 0.0:
temp = (-b - sqrt(b*b - a * c)) / a
if t_min < temp < t_max:
p = ray_.point_at_parameter(temp)
rec = HitRecord(t=temp,
p=p,
normal=(p - self.center) / self.radius,
material=self.material
)
return True, rec
temp = (-b + sqrt(b*b - a * c)) / a
if t_min < temp < t_max:
p = ray_.point_at_parameter(temp)
rec = HitRecord(t=temp,
p=p,
normal=(p - self.center) / self.radius,
material=self.material
)
return True, rec
return False, None
class HitableList(Hitable):
def __init__(self):
self.shapes = []
def append(self, shape):
self.shapes.append(shape)
def hit(self, ray_, t_min, t_max):
hit_anything = False
closest_so_far = t_max
rec = None
for shape in self.shapes:
hit, tmprec = shape.hit(ray_, t_min, closest_so_far)
if hit:
hit_anything = True
closest_so_far = tmprec.t
rec = tmprec
return hit_anything, rec
| mit | 6,299,270,001,141,219,000 | 29 | 71 | 0.478205 | false |
heracek/django-nonrel | tests/regressiontests/file_uploads/tests.py | 1 | 11960 | #! -*- coding: utf-8 -*-
import errno
import os
import shutil
from StringIO import StringIO
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import MultiPartParser
from django.test import TestCase, client
from django.utils import simplejson
from django.utils import unittest
from django.utils.hashcompat import sha_constructor
from models import FileModel, temp_storage, UPLOAD_TO
import uploadhandler
UNICODE_FILENAME = u'test-0123456789_中文_Orléans.jpg'
class FileUploadTests(TestCase):
def test_simple_upload(self):
post_data = {
'name': 'Ringo',
'file_field': open(__file__),
}
response = self.client.post('/file_uploads/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
tdir = tempfile.gettempdir()
file1 = tempfile.NamedTemporaryFile(suffix=".file1", dir=tdir)
file1.write('a' * (2 ** 21))
file1.seek(0)
file2 = tempfile.NamedTemporaryFile(suffix=".file2", dir=tdir)
file2.write('a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in post_data.keys():
try:
post_data[key + '_hash'] = sha_constructor(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = sha_constructor(post_data[key]).hexdigest()
response = self.client.post('/file_uploads/verify/', post_data)
self.assertEqual(response.status_code, 200)
def test_unicode_file_name(self):
tdir = tempfile.gettempdir()
# This file contains chinese symbols and an accented char in the name.
file1 = open(os.path.join(tdir, UNICODE_FILENAME.encode('utf-8')), 'w+b')
file1.write('b' * (2 ** 10))
file1.seek(0)
post_data = {
'file_unicode': file1,
}
response = self.client.post('/file_uploads/unicode_name/', post_data)
file1.close()
try:
os.unlink(file1.name)
except:
pass
self.assertEqual(response.status_code, 200)
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-syle.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = []
for i, name in enumerate(scary_file_names):
payload.extend([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.'
])
payload.extend([
'--' + client.BOUNDARY + '--',
'',
])
payload = "\r\n".join(payload)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/file_uploads/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(payload),
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
recieved = simplejson.loads(response.content)
for i, name in enumerate(scary_file_names):
got = recieved["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
name = "%s.txt" % ("f"*500)
payload = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="%s"' % name,
'Content-Type: application/octet-stream',
'',
'Oops.'
'--' + client.BOUNDARY + '--',
'',
])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/file_uploads/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(payload),
}
got = simplejson.loads(self.client.request(**r).content)
self.assert_(len(got['file']) < 256, "Got a long file name (%s characters)." % len(got['file']))
def test_custom_upload_handler(self):
# A small file (under the 5M quota)
smallfile = tempfile.NamedTemporaryFile()
smallfile.write('a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile = tempfile.NamedTemporaryFile()
bigfile.write('a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
response = self.client.post('/file_uploads/quota/', {'f': smallfile})
got = simplejson.loads(response.content)
self.assert_('f' in got)
# Large files don't go through.
response = self.client.post("/file_uploads/quota/", {'f': bigfile})
got = simplejson.loads(response.content)
self.assert_('f' not in got)
def test_extra_content_type(self):
f = tempfile.NamedTemporaryFile()
f.write('a' * (2 ** 21))
f.seek(0)
f.content_type = 'text/plain; blob-key=upload blob key; other=test'
response = self.client.post("/file_uploads/content_type_extra/", {'f': f})
got = simplejson.loads(response.content)
self.assertEqual(got['f'], 'upload blob key')
def test_broken_custom_upload_handler(self):
f = tempfile.NamedTemporaryFile()
f.write('a' * (2 ** 21))
f.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
self.assertRaises(
AttributeError,
self.client.post,
'/file_uploads/quota/broken/',
{'f': f}
)
def test_fileupload_getlist(self):
file1 = tempfile.NamedTemporaryFile()
file1.write('a' * (2 ** 23))
file1.seek(0)
file2 = tempfile.NamedTemporaryFile()
file2.write('a' * (2 * 2 ** 18))
file2.seek(0)
file2a = tempfile.NamedTemporaryFile()
file2a.write('a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/file_uploads/getlist_count/', {
'file1': file1,
'field1': u'test',
'field2': u'test3',
'field3': u'test5',
'field4': u'test6',
'field5': u'test7',
'file2': (file2, file2a)
})
got = simplejson.loads(response.content)
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super(POSTAccessingHandler, self).handle_uncaught_exception(request, resolver, exc_info)
p = request.POST
return ret
post_data = {
'name': 'Ringo',
'file_field': open(__file__),
}
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload('a').read(2)
except Exception, reference_error:
pass
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
try:
response = self.client.post('/file_uploads/upload_errors/', post_data)
except reference_error.__class__, err:
self.failIf(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception, err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
class DirectoryCreationTests(unittest.TestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
def setUp(self):
self.obj = FileModel()
if not os.path.isdir(temp_storage.location):
os.makedirs(temp_storage.location)
if os.path.isdir(UPLOAD_TO):
os.chmod(UPLOAD_TO, 0700)
shutil.rmtree(UPLOAD_TO)
def tearDown(self):
os.chmod(temp_storage.location, 0700)
shutil.rmtree(temp_storage.location)
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(temp_storage.location, 0500)
try:
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', 'x'))
except OSError, err:
self.assertEquals(err.errno, errno.EACCES)
except Exception, err:
self.fail("OSError [Errno %s] not raised." % errno.EACCES)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
fd = open(UPLOAD_TO, 'w')
fd.close()
try:
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', 'x'))
except IOError, err:
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEquals(err.args[0],
"%s exists and is not a directory." % UPLOAD_TO)
except:
self.fail("IOError not raised")
class MultiParserTests(unittest.TestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
parser = MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
| bsd-3-clause | -6,755,040,905,477,396,000 | 36.952381 | 110 | 0.573484 | false |
PaddlePaddle/models | PaddleRec/dssm/infer.py | 1 | 1407 | import paddle.fluid as fluid
import numpy as np
import sys
import args
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
def infer(args):
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(args.model_dir, exe)
#构造测试数据
sample_size = 100
l_Qs = []
pos_l_Ds = []
for i in range(sample_size):
l_Q = np.random.rand(1, args.TRIGRAM_D)
l_Qs.append(l_Q)
l_D = np.random.rand(1, args.TRIGRAM_D)
pos_l_Ds.append(l_D)
res = []
for i in range(sample_size):
con_sim = exe.run(infer_program,
feed={"query": l_Qs[i].astype('float32').reshape(1,args.TRIGRAM_D),
"doc_pos": pos_l_Ds[i].astype('float32').reshape(1,args.TRIGRAM_D)},
fetch_list=fetch_vars,
return_numpy=True)
logger.info("query_doc_sim: {:.5f}".format(np.array(con_sim).reshape(-1,1)[0][0]))
if __name__ == "__main__":
import paddle
paddle.enable_static()
args = args.parse_args()
infer(args) | apache-2.0 | -6,892,686,518,800,901,000 | 31.465116 | 105 | 0.55914 | false |
andres-liiver/IAPB13_suvendatud | Kodutoo_16/Kodutoo_16_Andres.py | 1 | 2985 | '''
Kodutoo 16
14.11.2014
Andres Liiver
'''
import time
from matplotlib import pyplot as plt
from Tund16gen import *
def timeFunc(func, *args):
start = time.clock()
func(*args)
return time.clock() - start
def linear_search(lst, num):
for item in lst:
if item == num:
return True
return False
def binary_search(lst, num, sort=False):
if sort:
lst = sorted(lst)
imin = 0
imax = len(lst)-1
while imax >= imin:
imid = (imin+imax) // 2
if lst[imid] == num:
return True
elif lst[imid] < num:
imin = imid + 1
else:
imax = imid - 1
return False
def main():
linearTimes = []
binary1Times = []
binary2Times = []
ns = [2**i for i in range(1, 13)]
for n in ns:
lst, gen = gimme_my_input(n, "blah")
times = []
# linear search test
for i in range(len(lst)):
times.append(timeFunc(linear_search, lst, next(gen)))
avg_time = sum(times) / len(times)
linearTimes.append(avg_time)
# binary search test 1
times = []
sortedList = sorted(lst)
for i in range(len(lst)):
times.append(timeFunc(binary_search, sortedList, next(gen)))
avg_time = sum(times) / len(times)
binary1Times.append(avg_time)
# binary search test 2
times = []
for i in range(len(lst)):
times.append(timeFunc(binary_search, lst, next(gen), True))
avg_time = sum(times) / len(times)
binary2Times.append(avg_time)
# print table of results
print("| algorithm \t| n \t\t| time (s)")
print()
# print Linear Search
for i, n in enumerate(ns):
if n < 10000:
print("| {0} \t| {1} \t\t| {2:.8f}".format("Linear", n, linearTimes[i]))
else:
print("| {0} \t| {1} \t| {2:.8f}".format("Linear", n, linearTimes[i]))
print()
# print Binary Search (presorted)
for i, n in enumerate(ns):
if n < 10000:
print("| {0} | {1} \t\t| {2:.8f}".format("Bin (presort)", n, binary1Times[i]))
else:
print("| {0} | {1} \t| {2:.8f}".format("Bin (presort)", n, binary1Times[i]))
print()
# print Binary Search (sort)
for i, n in enumerate(ns):
if n < 10000:
print("| {0} \t| {1} \t\t| {2:.8f}".format("Bin (sort)", n, binary2Times[i]))
else:
print("| {0} \t| {1} \t| {2:.8f}".format("Bin (sort)", n, binary2Times[i]))
# plot the times
ax = plt.subplot()
ax.set_xlabel("n")
ax.set_xscale("log")
ax.set_ylabel("Time (s)")
ax.set_yscale("log")
ax.plot(ns, linearTimes, "r", label="Linear Search")
ax.plot(ns, binary1Times, "g", label="Binary Search (presorted)")
ax.plot(ns, binary2Times, "b", label="Binary Search (sort)")
ax.legend(loc="upper left", shadow=True);
plt.show()
if __name__ == "__main__":
main() | mit | -7,427,043,884,688,275,000 | 23.883333 | 90 | 0.529313 | false |
shouya/thinking-dumps | automata/homework/project2/CYK.py | 1 | 4714 | '''
CYK algorithm for Context Free Language
Author: Chenguang Zhu
CS154, Stanford University
'''
import sys,traceback
import os
import string
maxProductionNum = 100 #max number of productions
VarNum = 4
production = [[0] * 3 for i in range(maxProductionNum+1)]
'''Prouductions in Chomsky Normal Form (CNF)
production[i][0] is the number for the variable (0~3, 0: S 1: A, 2: B, 3: C)
If this production is A->BC (two variables), then production[i][1] and production[i][2] will contain the numbers for these two variables
If this production is A->a (a single terminal), then production[i][1] will contain the number for the terminal (0 or 1, 0: a, 1: b), production[i][2]=-1'''
X = [[[False]*3 for i in range(10)] for j in range(10)]
'''X[i][j][s]=true if and only if variable s (0~3, 0: S 1: A, 2: B, 3: C) is in X_ij defined in CYK
Suppose the length of string to be processed is L, then 0<=i<=j<L '''
#check whether (a,b,c) exists in production
def existProd(a, b, c):
global production
for i in range(len(production)):
if ((production[i][0]==a) and
(production[i][1]==b) and
(production[i][2]==c)):
return True
return False
'''CYK algorithm
Calculate the array X
w is the string to be processed'''
def calcCYK(w):
global X
global VarNum
L=len(w)
X=[[[False]*VarNum for i in range(L)] for j in range(L)]
# X=[[[] for i in range(L)] for j in range(L)]
for x in range(L):
calc_cell_basic(x, w)
for dist in range(1,L):
calc_row(dist, L)
tmp = [[lengthify(i) for i in j] for j in X]
X = tmp
def calc_row(dist, l):
global X
for i in range(l - dist):
head = i
tail = i + dist
calc_cell(head, tail)
def lengthify(xs):
global VarNum
result = [False] * VarNum
i = 0
for x in xs:
result[i] = x
i += 1
return result
def calc_cell_basic(col, w):
global X
ww = w[col]
poss = [False] * VarNum
for i in range(7):
if existProd(i,ww,-1):
poss[i] = True
X[col][col] = poss
def prod(xs, ys):
result = []
for x in range(len(xs)):
for y in range(len(ys)):
if xs[x] and ys[y]:
for i in range(7):
if existProd(i, x, y):
result.append(i)
return result
def calc_cell(head, tail):
global X
poss = [False] * VarNum
for i in range(tail - head):
xs = X[head][head + i]
ys = X[head + i + 1][tail]
for i in prod(xs, ys):
poss[i] = True
X[head][tail] = poss
def Start(filename):
global X
global VarNum
global production
result=''
#read data case line by line from file
try:
br=open(filename,'r')
#example on Page 8 of lecture 15_CFL5
production=[[0]*3 for i in range(7)]
production[0][0]=0; production[0][1]=1; production[0][2]=2 #S->AB
production[1][0]=1; production[1][1]=2; production[1][2]=3 #A->BC
production[2][0]=1; production[2][1]=0; production[2][2]=-1 #A->a
production[3][0]=2; production[3][1]=1; production[3][2]=3 #B->AC
production[4][0]=2; production[4][1]=1; production[4][2]=-1 #B->b
production[5][0]=3; production[5][1]=0; production[5][2]=-1 #C->a
production[6][0]=3; production[6][1]=1; production[6][2]=-1 #C->b
result=''
#Read File Line By Line
for string in br:
string=string.strip()
print 'Processing '+string+'...'
length=len(string)
w=[0]*length
for i in range(length):
w[i]=ord(string[i])-ord('a') #convert 'a' to 0 and 'b' to 1
#Use CYK algorithm to calculate X
calcCYK(w)
#Get/print the full table X
for step in range(length-1,-1,-1):
for i in range(length-step):
j=i+step
for k in range(VarNum):
if (X[i][j][k]):
result=result+str(k)
result=result+' '
result=result+'\n'
#Close the input stream
br.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,limit=2, file=sys.stdout)
result=result+'error'
return result
def main(filepath):
return Start(filepath)
if __name__ == '__main__':
main(sys.argv[1])
| mit | 6,106,430,936,488,291,000 | 27.098765 | 157 | 0.530972 | false |
nexusriot/cinder | cinder/volume/drivers/remotefs.py | 1 | 57137 | # Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import inspect
import json
import os
import re
import tempfile
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import compute
from cinder import db
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume import driver
LOG = logging.getLogger(__name__)
nas_opts = [
# TODO(eharney): deprecate nas_ip and change this to nas_host
cfg.StrOpt('nas_ip',
default='',
help='IP address or Hostname of NAS system.'),
cfg.StrOpt('nas_login',
default='admin',
help='User name to connect to NAS system.'),
cfg.StrOpt('nas_password',
default='',
help='Password to connect to NAS system.',
secret=True),
cfg.IntOpt('nas_ssh_port',
default=22,
min=1, max=65535,
help='SSH port to use to connect to NAS system.'),
cfg.StrOpt('nas_private_key',
default='',
help='Filename of private key to use for SSH authentication.'),
cfg.StrOpt('nas_secure_file_operations',
default='auto',
help=('Allow network-attached storage systems to operate in a '
'secure environment where root level access is not '
'permitted. If set to False, access is as the root user '
'and insecure. If set to True, access is not as root. '
'If set to auto, a check is done to determine if this is '
'a new installation: True is used if so, otherwise '
'False. Default is auto.')),
cfg.StrOpt('nas_secure_file_permissions',
default='auto',
help=('Set more secure file permissions on network-attached '
'storage volume files to restrict broad other/world '
'access. If set to False, volumes are created with open '
'permissions. If set to True, volumes are created with '
'permissions for the cinder user and group (660). If '
'set to auto, a check is done to determine if '
'this is a new installation: True is used if so, '
'otherwise False. Default is auto.')),
cfg.StrOpt('nas_share_path',
default='',
help=('Path to the share to use for storing Cinder volumes. '
'For example: "/srv/export1" for an NFS server export '
'available at 10.0.5.10:/srv/export1 .')),
cfg.StrOpt('nas_mount_options',
default=None,
help=('Options used to mount the storage backend file system '
'where Cinder volumes are stored.')),
]
old_vol_type_opts = [cfg.DeprecatedOpt('glusterfs_sparsed_volumes'),
cfg.DeprecatedOpt('glusterfs_qcow2_volumes')]
volume_opts = [
cfg.StrOpt('nas_volume_prov_type',
default='thin',
choices=['thin', 'thick'],
deprecated_opts=old_vol_type_opts,
help=('Provisioning type that will be used when '
'creating volumes.')),
]
CONF = cfg.CONF
CONF.register_opts(nas_opts)
CONF.register_opts(volume_opts)
def locked_volume_id_operation(f, external=False):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named
with the id of the volume. This lock can be used by driver methods
to prevent conflicts with other operations modifying the same volume.
May be applied to methods that take a 'volume' or 'snapshot' argument.
"""
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('volume'):
volume_id = call_args['volume']['id']
elif call_args.get('snapshot'):
volume_id = call_args['snapshot']['volume']['id']
else:
err_msg = _('The decorated method must accept either a volume or '
'a snapshot object')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, volume_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD):
"""Common base for drivers that work like NFS."""
driver_volume_type = None
driver_prefix = 'remotefs'
volume_backend_name = None
SHARE_FORMAT_REGEX = r'.+:/.+'
def __init__(self, *args, **kwargs):
super(RemoteFSDriver, self).__init__(*args, **kwargs)
self.shares = {}
self._mounted_shares = []
self._execute_as_root = True
self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
if self.configuration:
self.configuration.append_config_values(nas_opts)
self.configuration.append_config_values(volume_opts)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
data = {'export': volume['provider_location'],
'name': volume['name']}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(RemoteFSDriver, self).do_setup(context)
# Validate the settings for our secure file options.
self.configuration.nas_secure_file_permissions = \
self.configuration.nas_secure_file_permissions.lower()
self.configuration.nas_secure_file_operations = \
self.configuration.nas_secure_file_operations.lower()
valid_secure_opts = ['auto', 'true', 'false']
secure_options = {'nas_secure_file_permissions':
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
"'auto', 'true', or 'false'") % err_parms
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
def _get_provisioned_capacity(self):
"""Returns the provisioned capacity.
Get the sum of sizes of volumes, snapshots and any other
files on the mountpoint.
"""
provisioned_size = 0.0
for share in self.shares.keys():
mount_path = self._get_mount_point_for_share(share)
out, _ = self._execute('du', '--bytes', mount_path,
run_as_root=True)
provisioned_size += int(out.split()[0])
return round(provisioned_size / units.Gi, 2)
def _get_mount_point_base(self):
"""Returns the mount point base for the remote fs.
This method facilitates returning mount point base
for the specific remote fs. Override this method
in the respective driver to return the entry to be
used while attach/detach using brick in cinder.
If not overridden then it returns None without
raising exception to continue working for cases
when not used with brick.
"""
LOG.debug("Driver specific implementation needs to return"
" mount_point_base.")
return None
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
:returns: provider_location update dict for database
"""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _do_create_volume(self, volume):
"""Create a volume on given remote share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
# If touch file exist, set the bootable flag for the volume
if (os.path.isfile('/etc/cinder/recogimage')):
LOG.debug('DEBUG : setting bootable flag for the volume')
volume['bootable'] = 1
self._create_sparsed_file(volume_path, volume_size, volume)
# Do not try to change permissions of the file here, as we are operating on a sym-link that is not local
else:
self._create_sparsed_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and mount them locally."""
mounted_shares = []
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s'), exc)
self._mounted_shares = mounted_shares
LOG.debug('Available shares %s', self._mounted_shares)
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
mounted_path = self.local_path(volume)
self._delete(mounted_path)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume, connector):
"""Exports the volume.
Can optionally return a dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def delete_snapshot(self, snapshot):
"""Delete snapshot.
Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
"""
pass
def _delete(self, path):
# Note(lpetrut): this method is needed in order to provide
# interoperability with Windows as it will be overridden.
self._execute('rm', '-f', path, run_as_root=self._execute_as_root)
def _create_sparsed_file(self, path, size):
"""Creates a sparse file of a given size in GiB."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=self._execute_as_root)
def _create_regular_file(self, path, size):
"""Creates a regular file of given size in GiB."""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=self._execute_as_root)
def _fallocate(self, path, size):
"""Creates a raw file of given size in GiB using fallocate."""
self._execute('fallocate', '--length=%sG' % size,
path, run_as_root=True)
def _create_qcow2_file(self, path, size_gb):
"""Creates a QCOW2 file of a given size in GiB."""
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.Gi),
run_as_root=self._execute_as_root)
def _set_rw_permissions(self, path):
"""Sets access permissions for given NFS path.
Volume file permissions are set based upon the value of
secure_file_permissions: 'true' sets secure access permissions and
'false' sets more open (insecure) access permissions.
:param path: the volume file path.
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
LOG.debug('File path %(path)s is being set with permissions: '
'%(permissions)s',
{'path': path, 'permissions': permissions})
else:
permissions = 'ugo+rw'
LOG.warning(_LW('%(path)s is being set with open permissions: '
'%(perm)s'), {'path': path, 'perm': permissions})
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_all(self, path):
"""Sets 666 permissions for the path."""
self._execute('chmod', 'ugo+rw', path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_owner(self, path):
"""Sets read-write permissions to the owner for the path."""
self._execute('chmod', 'u+rw', path,
run_as_root=self._execute_as_root)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
remotefs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(remotefs_share),
volume['name'])
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
run_as_root = self._execute_as_root
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'],
run_as_root=run_as_root)
# NOTE (leseb): Set the virtual size of the image
# the raw conversion overwrote the destination file
# (which had the correct size)
# with the fetched glance image size,
# thus the initial 'size' parameter is not honored
# this sets the size to the one asked in the first place by the user
# and then verify the final virtual size
image_utils.resize_image(self.local_path(volume), volume['size'],
run_as_root=run_as_root)
data = image_utils.qemu_img_info(self.local_path(volume),
run_as_root=run_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d") % virt_size))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def _read_config_file(self, config_file):
# Returns list of lines in file
with open(config_file) as f:
return f.readlines()
def _load_shares_config(self, share_file=None):
self.shares = {}
if all((self.configuration.nas_ip,
self.configuration.nas_share_path)):
LOG.debug('Using nas_ip and nas_share_path configuration.')
nas_ip = self.configuration.nas_ip
nas_share_path = self.configuration.nas_share_path
share_address = '%s:%s' % (nas_ip, nas_share_path)
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
msg = (_("Share %s ignored due to invalid format. Must "
"be of form address:/export. Please check the "
"nas_ip and nas_share_path settings."),
share_address)
raise exception.InvalidConfigurationValue(msg)
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
LOG.debug('Loading shares from %s.', share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/vol_name
# or
# host:/vol_name -o options=123,rw --other
if not share.strip():
# Skip blank or whitespace-only lines
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 1)
# results in share_info =
# [ 'address:/vol', '-o options=123,rw --other' ]
share_address = share_info[0].strip()
# Replace \040 with a space, to support paths with spaces
share_address = share_address.replace("\\040", " ")
share_opts = None
if len(share_info) > 1:
share_opts = share_info[1].strip()
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
LOG.error(_LE("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
LOG.debug("shares loaded: %s", self.shares)
def _get_mount_point_for_share(self, path):
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, update the stats first.
"""
if refresh or not self._stats:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = 'Open Source'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
self._stats = data
def _get_capacity_info(self, share):
raise NotImplementedError()
def _find_share(self, volume_size_in_gib):
raise NotImplementedError()
def _ensure_share_mounted(self, share):
raise NotImplementedError()
def secure_file_operations_enabled(self):
"""Determine if driver is operating in Secure File Operations mode.
The Cinder Volume driver needs to query if this driver is operating
in a secure file mode; check our nas_secure_file_operations flag.
"""
if self.configuration.nas_secure_file_operations == 'true':
return True
return False
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
This method must be overridden by child wishing to use secure
NAS file operations. This base method will set the NAS security
options to false.
"""
doc_html = "http://docs.openstack.org/admin-guide-cloud" \
"/blockstorage_nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
LOG.warning(_LW("The NAS file operations will be run as root: "
"allowing root level access at the storage backend. "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration."),
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered "
"an insecure NAS environment. Please see %s for "
"information on a secure NFS configuration."),
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
"""Determine NAS security option setting when 'auto' is assigned.
This method determines the final 'true'/'false' setting of an NAS
security option when the default value of 'auto' has been detected.
If the nas option isn't 'auto' then its current value is used.
:param nas_option: The NAS security option value loaded from config.
:param mount_point: Mount where indicator file is written.
:param is_new_cinder_install: boolean for new Cinder installation.
:return string: 'true' or 'false' for new option setting.
"""
if nas_option == 'auto':
# For auto detection, we first check to see if we have been
# through this process before by checking for the existence of
# the Cinder secure environment indicator file.
file_name = '.cinderSecureEnvIndicator'
file_path = os.path.join(mount_point, file_name)
if os.path.isfile(file_path):
nas_option = 'true'
LOG.info(_LI('Cinder secure environment '
'indicator file exists.'))
else:
# The indicator file does not exist. If it is a new
# installation, set to 'true' and create the indicator file.
if is_new_cinder_install:
nas_option = 'true'
try:
with open(file_path, 'w') as fh:
fh.write('Detector file for Cinder secure '
'environment usage.\n')
fh.write('Do not delete this file.\n')
# Set the permissions on our special marker file to
# protect from accidental removal (owner write only).
self._execute('chmod', '640', file_path,
run_as_root=False)
LOG.info(_LI('New Cinder secure environment indicator'
' file created at path %s.'), file_path)
except IOError as err:
LOG.error(_LE('Failed to created Cinder secure '
'environment indicator file: %s'),
err)
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
nas_option = 'false'
return nas_option
class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD):
"""Base class for remotefs drivers implementing qcow2 snapshots.
Driver must implement:
_local_volume_dir(self, volume)
"""
def __init__(self, *args, **kwargs):
self._remotefsclient = None
self.base = None
self._nova = None
super(RemoteFSSnapDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
super(RemoteFSSnapDriver, self).do_setup(context)
self._nova = compute.API()
def _local_volume_dir(self, volume):
share = volume['provider_location']
local_dir = self._get_mount_point_for_share(share)
return local_dir
def _local_path_volume(self, volume):
path_to_disk = os.path.join(
self._local_volume_dir(volume),
volume['name'])
return path_to_disk
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path = '%s.%s' % (vol_path, snapshot['id'])
return snap_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
Returns a string representing the contents of the file.
"""
with open(filename, 'r') as f:
return f.read()
def _write_info_file(self, info_path, snap_info):
if 'active' not in snap_info.keys():
msg = _("'active' must be present when writing snap_info.")
raise exception.RemoteFSException(msg)
with open(info_path, 'w') as f:
json.dump(snap_info, f, indent=1, sort_keys=True)
def _qemu_img_info_base(self, path, volume_name, basedir):
"""Sanitize image_utils' qemu_img_info.
This code expects to deal only with relative filenames.
"""
info = image_utils.qemu_img_info(path)
if info.image:
info.image = os.path.basename(info.image)
if info.backing_file:
backing_file_template = \
"(%(basedir)s/[0-9a-f]+/)?%" \
"(volname)s(.(tmp-snap-)?[0-9a-f-]+)?$" % {
'basedir': basedir,
'volname': volume_name
}
if not re.match(backing_file_template, info.backing_file):
msg = _("File %(path)s has invalid backing file "
"%(bfile)s, aborting.") % {'path': path,
'bfile': info.backing_file}
raise exception.RemoteFSException(msg)
info.backing_file = os.path.basename(info.backing_file)
return info
def _qemu_img_info(self, path, volume_name):
raise NotImplementedError()
def _img_commit(self, path):
self._execute('qemu-img', 'commit', path,
run_as_root=self._execute_as_root)
self._delete(path)
def _rebase_img(self, image, backing_file, volume_format):
self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image,
'-F', volume_format, run_as_root=self._execute_as_root)
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information.
:param: info_path: path to file
:param: empty_if_missing: True=return empty dict if no file
"""
if not os.path.exists(info_path):
if empty_if_missing is True:
return {}
return json.loads(self._read_file(info_path))
def _get_backing_chain_for_path(self, volume, path):
"""Returns list of dicts containing backing-chain information.
Includes 'filename', and 'backing-filename' for each
applicable entry.
Consider converting this to use --backing-chain and --output=json
when environment supports qemu-img 1.5.0.
:param volume: volume reference
:param path: path to image file at top of chain
"""
output = []
info = self._qemu_img_info(path, volume['name'])
new_info = {}
new_info['filename'] = os.path.basename(path)
new_info['backing-filename'] = info.backing_file
output.append(new_info)
while new_info['backing-filename']:
filename = new_info['backing-filename']
path = os.path.join(self._local_volume_dir(volume), filename)
info = self._qemu_img_info(path, volume['name'])
backing_filename = info.backing_file
new_info = {}
new_info['filename'] = filename
new_info['backing-filename'] = backing_filename
output.append(new_info)
return output
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str.
Returns string in a hex format.
"""
if isinstance(base_str, six.text_type):
base_str = base_str.encode('utf-8')
return hashlib.md5(base_str).hexdigest()
def _get_mount_point_for_share(self, share):
"""Return mount point for share.
:param share: example 172.18.194.100:/var/fs
"""
return self._remotefsclient.get_mount_point(share)
def _get_available_capacity(self, share):
"""Calculate available space on the share.
:param share: example 172.18.194.100:/var/fs
"""
mount_point = self._get_mount_point_for_share(share)
out, _ = self._execute('df', '--portability', '--block-size', '1',
mount_point,
run_as_root=self._execute_as_root)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _get_capacity_info(self, remotefs_share):
available, size = self._get_available_capacity(remotefs_share)
return size, available, size - available
def _get_mount_point_base(self):
return self.base
def _ensure_share_writable(self, path):
"""Ensure that the Cinder user can write to the share.
If not, raise an exception.
:param path: path to test
:raises: RemoteFSException
:returns: None
"""
prefix = '.cinder-write-test-' + str(os.getpid()) + '-'
try:
tempfile.NamedTemporaryFile(prefix=prefix, dir=path)
except OSError:
msg = _('Share at %(dir)s is not writable by the '
'Cinder volume service. Snapshot operations will not be '
'supported.') % {'dir': path}
raise exception.RemoteFSException(msg)
def _copy_volume_to_image(self, context, volume, image_service,
image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path, volume['name'])
backing_file = info.backing_file
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if backing_file or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if not snap_info:
# No info file = no snapshots exist
vol_path = os.path.basename(self.local_path(volume))
return vol_path
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'),
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
volume['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: RemoteFSException(msg) if operation fails
:returns: None
"""
LOG.debug('Deleting snapshot %s:', snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
vol_path = self._local_volume_dir(snapshot['volume'])
self._ensure_share_writable(vol_path)
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_LI('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.'), snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(
snapshot_path,
snapshot['volume']['name'])
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
LOG.warning(_LW('No backing file found for %s, allowing '
'snapshot to be deleted.'), snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(vol_path, base_file)
base_file_img_info = self._qemu_img_info(base_path,
snapshot['volume']['name'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = os.path.join(vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.items():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
LOG.debug('No %(base_id)s found for %(file)s',
{'base_id': 'base_id', 'file': snapshot_file})
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted, |
# exist) | commited down) |
self._img_commit(snapshot_path)
# Active file has changed
snap_info['active'] = base_file
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# (guaranteed to | (being deleted, | (guaranteed to | (may exist)
# exist, not | commited down) | exist, needs |
# used here) | | ptr update) |
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.RemoteFSException(msg)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.RemoteFSException(msg)
self._img_commit(snapshot_path)
higher_file_path = os.path.join(vol_path, higher_file)
base_file_fmt = base_file_img_info.file_format
self._rebase_img(higher_file_path, base_file, base_file_fmt)
# Remove snapshot_file from info
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
volume['size'])
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
raise NotImplementedError()
def _do_create_snapshot(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
info = self._qemu_img_info(backing_path_full_path,
snapshot['volume']['name'])
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
self._set_rw_permissions(new_snap_path)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot, providing a
qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
responsible for transitioning the VM between them and handling live
transfers of data between files as required.
If volume is detached, create locally with qemu-img. Cinder handles
manipulation of qcow2 files.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb', (* changed!)
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' } (* added!)
4. Snapshot deletion when volume is attached ('in-use' state):
* When first snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "aaaa" and
makes snapshot with id "bbbb" point to the base image.
Snapshot with id "bbbb" is the active image.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "bbbb" by
pulling volume-1234's data into volume-1234.bbbb. This
(logically) removes snapshot with id "bbbb" and the active
file remains the same.
volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb' }
TODO (deepakcs): Change this once Nova supports blockCommit for
in-use volumes.
5. Snapshot deletion when volume is detached ('available' state):
* When first snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The data from it is merged into its parent.
volume-1234.bbbb is rebased, having volume-1234 as its new
parent.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The base image, volume-1234 becomes the active image for this
volume again.
volume-1234
info file: { 'active': 'volume-1234' } (* changed!)
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
new_snap_path = self._get_new_snap_path(snapshot)
if status == 'in-use':
self._create_snapshot_online(snapshot,
backing_filename,
new_snap_path)
else:
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
# Perform online snapshot via Nova
context = snapshot['context']
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s', result)
except Exception:
LOG.exception(_LE('Call to Nova to create snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s',
{'id': snapshot['id'],
'status': s['status']})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.RemoteFSException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if info['active_file'] == info['snapshot_file']:
# blockRebase/Pull base into active
# info['base'] => snapshot_file
file_to_delete = info['base_file']
if info['base_id'] is None:
# Passing base=none to blockRebase ensures that
# libvirt blanks out the qcow2 backing file pointer
new_base = None
else:
new_base = info['new_base_file']
snap_info[info['base_id']] = info['snapshot_file']
delete_info = {'file_to_merge': new_base,
'merge_target_file': None, # current
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
else:
# blockCommit snapshot into base
# info['base'] <= snapshot_file
# delete record of snapshot
file_to_delete = info['snapshot_file']
delete_info = {'file_to_merge': info['snapshot_file'],
'merge_target_file': info['base_file'],
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
try:
self._nova.delete_volume_snapshot(
context,
snapshot['id'],
delete_info)
except Exception:
LOG.exception(_LE('Call to Nova delete snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 7200
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'deleting':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
else:
LOG.debug('status of snapshot %s is still "deleting"... '
'waiting', snapshot['id'])
time.sleep(increment)
seconds_elapsed += increment
else:
msg = _('Unable to delete snapshot %(id)s, '
'status: %(status)s.') % {'id': snapshot['id'],
'status': s['status']}
raise exception.RemoteFSException(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for deletion of snapshot %(id)s.') %\
{'id': snapshot['id']}
raise exception.RemoteFSException(msg)
# Write info file updated above
self._write_info_file(info_path, snap_info)
# Delete stale file
path_to_delete = os.path.join(
self._local_volume_dir(snapshot['volume']), file_to_delete)
self._execute('rm', '-f', path_to_delete, run_as_root=True)
@locked_volume_id_operation
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
@locked_volume_id_operation
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@locked_volume_id_operation
def create_volume_from_snapshot(self, volume, snapshot):
return self._create_volume_from_snapshot(volume, snapshot)
@locked_volume_id_operation
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
return self._create_cloned_volume(volume, src_vref)
@locked_volume_id_operation
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
return self._copy_volume_to_image(context,
volume,
image_service,
image_meta)
| apache-2.0 | -5,967,509,913,570,102,000 | 38.459254 | 109 | 0.548926 | false |
Connexions/openstax-cms | news/models.py | 1 | 15456 | from bs4 import BeautifulSoup
from django.db import models
from django import forms
from wagtail.core.models import Page, Orderable
from wagtail.core.fields import RichTextField, StreamField
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel, InlinePanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.embeds.blocks import EmbedBlock
from wagtail.search import index
from wagtail.core import blocks
from wagtail.core.blocks import TextBlock, StructBlock, StreamBlock, FieldBlock, CharBlock, RichTextBlock, RawHTMLBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.documents.blocks import DocumentChooserBlock
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.api import APIField
from wagtail.images.api.fields import ImageRenditionField
from wagtail.core.models import Site
from modelcluster.fields import ParentalKey
from modelcluster.contrib.taggit import ClusterTaggableManager
from taggit.models import TaggedItemBase
from openstax.functions import build_image_url
from snippets.models import NewsSource
class ImageChooserBlock(ImageChooserBlock):
def get_api_representation(self, value, context=None):
if value:
return {
'id': value.id,
'title': value.title,
'original': value.get_rendition('original').attrs_dict,
}
class PullQuoteBlock(StructBlock):
quote = TextBlock("quote title")
attribution = CharBlock()
class Meta:
icon = "openquote"
class ImageFormatChoiceBlock(FieldBlock):
field = forms.ChoiceField(choices=(
('left', 'Wrap left'), ('right', 'Wrap right'), ('mid', 'Mid width'), ('full', 'Full width'),
))
class HTMLAlignmentChoiceBlock(FieldBlock):
field = forms.ChoiceField(choices=(
('normal', 'Normal'), ('full', 'Full width'),
))
class ImageBlock(StructBlock):
image = ImageChooserBlock()
caption = RichTextBlock()
alignment = ImageFormatChoiceBlock()
alt_text = blocks.CharBlock(required=False)
class AlignedHTMLBlock(StructBlock):
html = RawHTMLBlock()
alignment = HTMLAlignmentChoiceBlock()
class Meta:
icon = "code"
class BlogStreamBlock(StreamBlock):
paragraph = RichTextBlock(icon="pilcrow")
aligned_image = ImageBlock(label="Aligned image", icon="image")
pullquote = PullQuoteBlock()
aligned_html = AlignedHTMLBlock(icon="code", label='Raw HTML')
document = DocumentChooserBlock(icon="doc-full-inverse")
embed = EmbedBlock(icon="media", label="Embed Media URL")
class NewsIndex(Page):
intro = RichTextField(blank=True)
press_kit = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def articles(self):
articles = NewsArticle.objects.live().child_of(self)
article_data = {}
for article in articles:
article_data['{}'.format(article.slug)] = {
'detail_url': '/apps/cms/api/v2/pages/{}/'.format(article.pk),
'date': article.date,
'heading': article.heading,
'subheading': article.subheading,
'body_blurb': article.first_paragraph,
'pin_to_top': article.pin_to_top,
'article_image': article.article_image,
'article_image_alt': article.featured_image_alt_text,
'author': article.author,
'tags': [tag.name for tag in article.tags.all()],
}
return article_data
content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
DocumentChooserPanel('press_kit'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('intro'),
APIField('press_kit'),
APIField('articles'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
subpage_types = ['news.NewsArticle']
parent_page_types = ['pages.HomePage']
max_count = 1
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/blog/'.format(Site.find_for_request(request).root_url),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
class NewsArticleTag(TaggedItemBase):
content_object = ParentalKey('news.NewsArticle', related_name='tagged_items')
class NewsArticle(Page):
date = models.DateField("Post date")
heading = models.CharField(max_length=250, help_text="Heading displayed on website")
subheading = models.CharField(max_length=250, blank=True, null=True)
author = models.CharField(max_length=250)
featured_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text="Image should be 1200 x 600"
)
featured_image_alt_text = models.CharField(max_length=250, blank=True, null=True)
def get_article_image(self):
return build_image_url(self.featured_image)
article_image = property(get_article_image)
tags = ClusterTaggableManager(through=NewsArticleTag, blank=True)
body = StreamField(BlogStreamBlock())
pin_to_top = models.BooleanField(default=False)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def first_paragraph(self):
paragraphs = []
for block in self.body:
if block.block_type == 'paragraph':
paragraphs.append(str(block.value))
first_paragraph_parsed = []
soup = BeautifulSoup(paragraphs[0], "html.parser")
for tag in soup.findAll('p'):
first_paragraph_parsed.append(tag)
return str(first_paragraph_parsed[0])
search_fields = Page.search_fields + [
index.SearchField('body'),
index.SearchField('tags'),
]
content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('title'),
FieldPanel('heading'),
FieldPanel('subheading'),
FieldPanel('author'),
ImageChooserPanel('featured_image'),
FieldPanel('featured_image_alt_text'),
FieldPanel('tags'),
StreamFieldPanel('body'),
FieldPanel('pin_to_top'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('date'),
APIField('title'),
APIField('heading'),
APIField('subheading'),
APIField('author'),
APIField('article_image'),
APIField('featured_image_small', serializer=ImageRenditionField('width-420', source='featured_image')),
APIField('featured_image_alt_text'),
APIField('tags'),
APIField('body'),
APIField('pin_to_top'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
parent_page_types = ['news.NewsIndex']
def save(self, *args, **kwargs):
if self.pin_to_top:
current_pins = self.__class__.objects.filter(pin_to_top=True)
for pin in current_pins:
if pin != self:
pin.pin_to_top = False
pin.save()
return super(NewsArticle, self).save(*args, **kwargs)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/blog/{}/'.format(Site.find_for_request(request).root_url, self.slug),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
class Experts(models.Model):
name = models.CharField(max_length=255)
email = models.EmailField(blank=True, null=True)
title = models.CharField(max_length=255)
bio = models.TextField()
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
def get_expert_image(self):
return build_image_url(self.image)
expert_image = property(get_expert_image)
api_fields = [
APIField('name'),
APIField('email'),
APIField('title'),
APIField('bio'),
APIField('expert_image')
]
panels = [
FieldPanel('name'),
FieldPanel('email'),
FieldPanel('title'),
FieldPanel('bio'),
ImageChooserPanel('image'),
]
class ExpertsBios(Orderable, Experts):
experts_bios = ParentalKey('news.PressIndex', related_name='experts_bios')
class NewsMentionChooserBlock(SnippetChooserBlock):
def get_api_representation(self, value, context=None):
if value:
return {
'id': value.id,
'name': value.name,
'logo': value.news_logo,
}
class NewsMentionBlock(blocks.StructBlock):
source = NewsMentionChooserBlock(NewsSource)
url = blocks.URLBlock()
headline = blocks.CharBlock()
date = blocks.DateBlock()
class Meta:
icon = 'document'
class MissionStatement(models.Model):
statement = models.CharField(max_length=255)
api_fields = ('statement', )
class MissionStatements(Orderable, MissionStatement):
mission_statements = ParentalKey('news.PressIndex', related_name='mission_statements')
class PressIndex(Page):
press_kit = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_press_kit(self):
return build_image_url(self.press_kit)
press_kit_url = property(get_press_kit)
press_inquiry_name = models.CharField(max_length=255, blank=True, null=True)
press_inquiry_phone = models.CharField(max_length=255)
press_inquiry_email = models.EmailField()
experts_heading = models.CharField(max_length=255)
experts_blurb = models.TextField()
mentions = StreamField([
('mention', NewsMentionBlock(icon='document')),
], null=True)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/press/'.format(Site.find_for_request(request).root_url),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
@property
def releases(self):
releases = PressRelease.objects.live().child_of(self)
releases_data = {}
for release in releases:
releases_data['press/{}'.format(release.slug)] = {
'detail_url': '/apps/cms/api/v2/pages/{}/'.format(release.pk),
'date': release.date,
'heading': release.heading,
'excerpt': release.excerpt,
'author': release.author,
}
return releases_data
content_panels = Page.content_panels + [
DocumentChooserPanel('press_kit'),
FieldPanel('press_inquiry_name'),
FieldPanel('press_inquiry_phone'),
FieldPanel('press_inquiry_email'),
FieldPanel('experts_heading'),
FieldPanel('experts_blurb'),
InlinePanel('experts_bios', label="Experts"),
StreamFieldPanel('mentions'),
InlinePanel('mission_statements', label="Mission Statement"),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('press_kit'),
APIField('press_kit_url'),
APIField('releases'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image'),
APIField('experts_heading'),
APIField('experts_blurb'),
APIField('experts_bios'),
APIField('mentions'),
APIField('mission_statements'),
APIField('press_inquiry_name'),
APIField('press_inquiry_phone'),
APIField('press_inquiry_email')
]
subpage_types = ['news.PressRelease']
parent_page_types = ['pages.HomePage']
max_count = 1
class PressRelease(Page):
date = models.DateField("PR date")
heading = models.CharField(max_length=250, help_text="Heading displayed on website")
subheading = models.CharField(max_length=250, blank=True, null=True)
author = models.CharField(max_length=250)
featured_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
featured_image_alt_text = models.CharField(max_length=250, blank=True, null=True)
def get_article_image(self):
return build_image_url(self.featured_image)
article_image = property(get_article_image)
excerpt = models.CharField(max_length=255)
body = StreamField(BlogStreamBlock())
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/press/{}'.format(Site.find_for_request(request).root_url, self.slug),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
search_fields = Page.search_fields + [
index.SearchField('body'),
]
content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('title'),
FieldPanel('heading'),
FieldPanel('subheading'),
FieldPanel('author'),
ImageChooserPanel('featured_image'),
FieldPanel('featured_image_alt_text'),
FieldPanel('excerpt'),
StreamFieldPanel('body'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('date'),
APIField('title'),
APIField('heading'),
APIField('subheading'),
APIField('author'),
APIField('article_image'),
APIField('featured_image_alt_text'),
APIField('excerpt'),
APIField('body'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
| agpl-3.0 | -7,732,224,415,095,894,000 | 29.788845 | 119 | 0.614777 | false |
gonicus/gosa | common/src/gosa/common/components/mqtt_proxy.py | 1 | 3896 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import uuid
from tornado import gen
from gosa.common import Environment
from gosa.common.components.json_exception import JSONRPCException
from gosa.common.components.mqtt_handler import MQTTHandler
from gosa.common.gjson import dumps, loads
from tornado.concurrent import Future
class MQTTException(Exception):
pass
class MQTTServiceProxy(object):
"""
The MQTTServiceProxy provides a simple way to use GOsa RPC
services from various clients. Using the proxy object, you
can directly call methods without the need to know where
it actually gets executed::
>>> from gosa.common.components.mqtt_proxy import MQTTServiceProxy
>>> proxy = MQTTServiceProxy('localhost')
>>> proxy.getMethods()
This will return a dictionary describing the available methods.
=============== ============
Parameter Description
=============== ============
mqttHandler MQTTHandler used to connect to the MQTT service broker
serviceAddress Address string describing the target queue to bind to, must be skipped if no special queue is needed
serviceName *internal*
methods *internal*
=============== ============
The MQTTService proxy creates a temporary MQTT *reply to* queue, which
is used for command results.
"""
worker = {}
def __init__(self, mqttHandler=None, serviceAddress=None, serviceName=None,
methods=None):
self.__handler = mqttHandler if mqttHandler is not None else MQTTHandler()
self.__serviceName = serviceName
self.__serviceAddress = serviceAddress
self.__methods = methods
self.env = Environment.getInstance()
# Retrieve methods
if self.__methods is None:
self.__serviceName = "getMethods"
self.__methods = self.__call__()
self.__serviceName = None
#pylint: disable=W0613
def login(self, user, password): # pragma: nocover
return True
def logout(self): # pragma: nocover
return True
def close(self): # pragma: nocover
pass
def getProxy(self):
return MQTTServiceProxy(self.__handler, self.__serviceAddress, None, methods=self.__methods)
def __getattr__(self, name):
if self.__serviceName is not None:
name = "%s/%s" % (self.__serviceName, name)
return MQTTServiceProxy(self.__handler, self.__serviceAddress, name, methods=self.__methods)
@gen.coroutine
def __call__(self, *args, **kwargs):
data = {}
if '__user__' in kwargs:
data['user'] = kwargs['__user__']
del kwargs['__user__']
if '__session_id__' in kwargs:
data['session_id'] = kwargs['__session_id__']
del kwargs['__session_id__']
# Default to 'core' queue
call_id = uuid.uuid4()
topic = "%s/%s" % (self.__serviceAddress, call_id)
if isinstance(self.__methods, Future):
self.__methods = yield self.__methods
if self.__methods and self.__serviceName not in self.__methods:
raise NameError("name '%s' not defined" % self.__serviceName)
# Send
data.update({
"method": self.__serviceName,
"id": "mqttrpc",
"sender": self.env.uuid
})
data["kwparams"] = kwargs
data["params"] = args
postdata = dumps(data)
response = yield self.__handler.send_sync_message(postdata, topic, qos=2)
resp = loads(response)
if 'error' in resp and resp['error'] is not None:
raise JSONRPCException(resp['error'])
return resp['result']
| lgpl-2.1 | 8,577,437,640,126,136,000 | 31.198347 | 120 | 0.61037 | false |
XtremeTeam/Lucy-bot | brain/plugins/delirium.py | 1 | 13549 | #===islucyplugin===
# -*- coding: utf-8 -*-
# lucy plugin
# delirium.py
# Initial Copyright © 2007 Als <[email protected]>
# Modifications Copyright © 2014 x-team <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
poke_nicks={}
inv_id=[]
PROTECT_INV=[]
turn_msgs={}
global_en2ru_table = dict(zip(u"abcdefghijklmnopqrstuvwxyzɐqɔpǝɟƃɥıɾʞlɯuodbɹsʇnʌʍxʎzABCDEFGHIJKLMNOPQRSTUVWXYZ", u"ɐqɔpǝɟƃɥıɾʞlɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyzɐqɔpǝɟƃɥıɾʞlɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz"))
def handler_upside_last(type, source, parameters):
nick=source[2]
groupchat=source[1]
jid=get_true_jid(groupchat+'/'+nick)
if parameters:
reply(type,source,reduce(lambda x,y:global_en2ru_table.get(x,x)+global_en2ru_table.get(y,y),parameters))
else:
if turn_msgs[groupchat][jid] is None:
reply(type,source,u'you nothing write')
return
if turn_msgs[groupchat][jid] == u'upside':
reply(type,source,u'forbidden')
return
tmsg=turn_msgs[groupchat][jid]
reply(type,source,reduce(lambda x,y:global_en2ru_table.get(x,x)+global_en2ru_table.get(y,y),tmsg))
def handler_upside_save_msg(type, source, body):
time.sleep(1)
nick=source[2]
groupchat=source[1]
jid=get_true_jid(groupchat+'/'+nick)
if groupchat in turn_msgs.keys():
if jid in turn_msgs[groupchat].keys() and jid != groupchat and jid != JID:
turn_msgs[groupchat][jid]=body
def handler_upside_join(groupchat, nick, aff, role):
jid=get_true_jid(groupchat+'/'+nick)
if not groupchat in turn_msgs.keys():
turn_msgs[groupchat] = {}
if not jid in turn_msgs[groupchat].keys() and jid != JID:
turn_msgs[groupchat][jid]=None
def handler_poke(type, source, parameters):
if type=='private':
reply(type,source,u':-P')
return
groupchat = source[1]
if parameters:
if parameters==u'last10':
cnt=0
rep=''
nicks = set()
for x in [poke_nicks[source[1]] for x in poke_nicks]:
nicks = nicks | set(x)
for x in nicks:
cnt=cnt+1
rep += str(cnt)+u') '+x+u'\n'
reply('private',source,rep[:-1])
return
if not poke_nicks.has_key(source[1]):
poke_nicks[source[1]]=source[1]
poke_nicks[source[1]]=[]
if len(poke_nicks[source[1]])==10:
poke_nicks[source[1]]=[]
else:
poke_nicks[source[1]].append(source[2])
if not parameters == get_bot_nick(source[1]):
if parameters in GROUPCHATS[source[1]]:
pokes=[]
pokes.extend(poke_work(source[1]))
pokes.extend(eval(read_file('settings/delirium.txt'))['poke'])
rep = random.choice(pokes)
msg(source[1],u'/me '+rep % parameters)
else:
reply(type, source, u'Are you sure that username/jid is here :-?')
else:
reply(type, source, u'Intelligent, hard, yes? ]:->')
else:
reply(type, source, u'Dream :-D')
def handler_poke_add(type, source, parameters):
if not parameters:
reply(type, source, u'And?')
if not parameters.count('%s'):
reply(type, source, u'I do not see %s.')
return
res=poke_work(source[1],1,parameters)
if res:
reply(type, source, u' That poke has been added.')
else:
reply(type, source, u'No longer available.')
def handler_poke_del(type, source, parameters):
if not parameters:
reply(type, source, u'And?')
if parameters=='*':
parameters='0'
else:
try:
int(parameters)
except:
reply(type,source,u'Invalid syntax!')
res=poke_work(source[1],2,parameters)
if res:
reply(type, source, u'Deleted!')
else:
reply(type, source, u'No more available!')
def handler_poke_list(type, source, parameters):
rep,res=u'',poke_work(source[1],3)
if res:
res=sorted(res.items(),lambda x,y: int(x[0]) - int(y[0]))
for num,phrase in res:
rep+=num+u') '+phrase+u'\n'
reply(type,source,rep.strip())
else:
reply(type,source,u'No custom phrases!')
def handler_test(type, source, parameters):
reply(type,source,u'Passed! But you do know this is pointless :| .. i have better things to do')
def handler_clean_conf(type, source, parameters):
if GROUPCHATS.has_key(source[1]):
for x in range(1, 21):
msg(source[1], '')
time.sleep(0.1)
reply('public',source,u'This conference room has been cleaned')
def handler_afools_control(type, source, parameters):
if parameters:
try:
int(parameters)
except:
reply(type,source,u'Invalid syntax!')
if int(parameters)>1:
reply(type,source,u'Invalid syntax!')
if parameters=="1":
GCHCFGS[source[1]]['afools']=1
reply(type,source,u'Jokes are enabled!')
else:
GCHCFGS[source[1]]['afools']=0
reply(type,source,u'Jokes are disabled!')
write_file('settings/'+source[1]+'/config.cfg', str(GCHCFGS[source[1]]))
else:
if GCHCFGS[source[1]]['afools']==1:
reply(type,source,u'Jokes are enabled!')
else:
reply(type,source,u'Jokes are disabled!')
def get_afools_state(gch):
if not 'afools' in GCHCFGS[gch]:
GCHCFGS[gch]['afools']=0
def poke_work(gch,action=None,phrase=None):
DBPATH='settings/'+gch+'/delirium.txt'
if check_file(gch,'delirium.txt'):
pokedb = eval(read_file(DBPATH))
if action==1:
for x in range(1, 21):
if str(x) in pokedb.keys():
continue
else:
pokedb[str(x)]=phrase
write_file(DBPATH, str(pokedb))
return True
return False
elif action==2:
if phrase=='0':
pokedb.clear()
write_file(DBPATH, str(pokedb))
return True
else:
try:
del pokedb[phrase]
write_file(DBPATH, str(pokedb))
return True
except:
return False
elif action==3:
return pokedb
else:
return pokedb.values()
else:
return None
def remix_string(parameters):
remixed=[]
for word in parameters.split():
tmp=[]
if len(word)<=1:
remixed.append(word)
continue
elif len(word)==2:
tmp=list(word)
random.shuffle(tmp)
remixed.append(u''.join(tmp))
elif len(word)==3:
tmp1=list(word[1:])
tmp2=list(word[:-1])
tmp=random.choice([tmp1,tmp2])
if tmp==tmp1:
random.shuffle(tmp)
remixed.append(word[0]+u''.join(tmp))
else:
random.shuffle(tmp)
remixed.append(u''.join(tmp)+word[-1])
elif len(word)>=4:
tmp=list(word[1:-1])
random.shuffle(tmp)
remixed.append(word[0]+u''.join(tmp)+word[-1])
return u' '.join(remixed)
def handler_kick_ass(type, source, parameters):
if GROUPCHATS.has_key(source[1]):
if len(parameters.split()) == 3:
splitdata = string.split(parameters)
rep,jid,msgnum,smlnum = '','',int(splitdata[1]),int(splitdata[2])
if msgnum>500 or smlnum>500:
reply(type,source,u'sorry, the amount should no more than 500 :-(')
return
reply(type,source,u'WARNING SPAMMING!!!')
if splitdata[0]==u':)':
for x in range(0, msgnum):
for y in range(0, smlnum):
rep += u':) '
msg(source[1], rep)
rep = ''
time.sleep(0.5)
else:
if splitdata[0].count('@'):
jid=splitdata[0]
else:
jid=source[1]+'/'+splitdata[0]
print jid
for x in range(0, msgnum):
for y in range(0, smlnum):
rep += u':) '
msg(jid, rep)
rep=''
time.sleep(0.5)
reply(type,source,u'Success!!!')
else:
reply(type,source,u'read "help spam"')
def invite_join(msg):
mas, fromjid, body = msg.getChildren(), msg.getFrom(), ''
try:
cp=msg.getBody()
body=cp.split()[0]
except: return
if INVITE_JOIN!='1': return
if not fromjid in PROTECT_INV:
PROTECT_INV.append(fromjid)
for x in mas:
try:
gch=fromjid
file='settings/inviteblock.txt'
txt=eval(read_file(file))
if gch in txt:
print 'room in balacklist'
return
if gch not in GROUPCHATS:
iq = xmpp.Iq('get')
id='dis'+str(random.randrange(1, 9999))
globals()['inv_id'].append(id)
iq.setID(id)
query=iq.addChild('query', {}, [], xmpp.NS_DISCO_ITEMS)
iq.setTo(gch)
JCON.SendAndCallForResponse(iq, inv_join_answ, {'gch': gch, 'body': body})
except: pass
def inv_join_answ(coze,res,gch,body):
id = res.getID()
if not id in globals()['inv_id']:
return
if res:
if res.getType()=='result':
try:
props=res.getQueryChildren()
d=''
n=0
for x in props:
i=x.getAttrs()['jid']
n+=1
print n
if n>2:
print 'ok'
gch=str(gch)
get_gch_cfg(gch)
#MACROS.load(gch)
join_groupchat(gch)
if popups_check(gch):
print 'joined'
#handler_admin_join('public', [gch,body,body], gch)
except:
pass
def hnd_ivite_block(type,source,parameters):
if not parameters:
try:
txt=eval(read_file('settings/inviteblock.txt'))
rep=''
for x in txt:
rep+=x+'\n'
if rep=='':
reply(type,source,u'dunno :-S')
return
reply(type,source,rep)
except:
pass
if len(parameters)<3:
return
try:
txt=eval(read_file('settings/inviteblock.txt'))
if not parameters.lower() in txt:
txt[parameters.lower()]={}
write_file('settings/inviteblock.txt',str(txt))
reply(type,source,u'whaaat? :-O '+parameters)
return
del txt[parameters.lower()]
write_file('settings/inviteblock.txt',str(txt))
reply(type,source,u'Deleted')
except:
pass
def handler_default_bot_nick(type, source, parameters):
add_gch(source[1], DEFAULT_NICK)
join_groupchat(source[1], DEFAULT_NICK)
reply(type, source, u'OK, the default nickname has been set.')
register_message_handler(handler_upside_save_msg)
register_join_handler(handler_upside_join)
register_command_handler(handler_upside_last, 'upside', ['all'], 10, 'replace english letters with upside down.', 'upside', ['upside'])
register_command_handler(handler_default_bot_nick, 'sdbn', ['delirium','en','all'], 20, 'Change the nickname of the bot to it\'s default, which is '+DEFAULT_NICK, 'sdbn')
register_command_handler(handler_poke, 'poke', ['fun','all','*','poke'], 10, 'Poke the user. Forces him to pay attention to you /in chat, специально для слоупоков.\nlast10 instead of a nick show a list of workers who poked latest.', 'poke <nick>|<parameter>', ['poke qwerty','poke + sing %s','poke - 2','poke *'])
register_command_handler(handler_poke_add, 'poke+', ['fun','all','*','poke'], 20, 'Add a custom phrases. The variable %s in the phrase refers to a place to insert a nickname (mandatory parameter). The phrase should be written by a third person, it will use the following form "/me your phrase". max number of custom phrases is 20 characters.', 'poke+ <phrase>', ['poke+ sing %s'])
register_command_handler(handler_poke_del, 'poke-', ['fun','all','*','poke'], 20, 'Delete a custom phrase. Write the number of phrase to erase the words, the bot will delete it permanently. Write the commands "poked*" to view the list. In order to delete all phrases just specify "*" instead of phrase number.', 'poke- <number>', ['poke- 5','poke- *'])
register_command_handler(handler_poke_list, 'poke*', ['fun','all','*','poke'], 20, 'Displays a list of all custom phrases and its number.', 'poke*', ['poke*'])
register_command_handler(handler_test, 'test', ['fun','info','all'], 0, 'Check the bot, passed simply answers!', 'test', ['test'])
register_command_handler(handler_clean_conf, 'clean', ['fun','muc','all','*'], 15, 'Clean current conference (with null character).', 'clean', ['clean'])
register_command_handler(handler_afools_control, 'afools', ['fun','muc','all','*'], 30, 'Enables and disables the bots jokes, which the bot sometimes substitutes (command is always executed!) Standard response of commands.', 'afools <1|0>', ['afools 1','afools 0'])
#register_command_handler(invite_join)
register_command_handler(hnd_ivite_block, 'inviteblock', ['все','мод','суперадмин'], 40, 'Добавляет/удаляет запрет на вход бота в определенную комнату по инвайту.Без параметров показывает список.', 'антиджойн <комната>', ['антиджойн уг@conference.jabber.ru'])
#The listed of below command handler are not recommended
register_command_handler(handler_kick_ass, 'spam', ['fun','superadmin','muc','all','*'], 31, 'Spamming a JID in roster or a nick current conference with smiles ( :) ).\nTarget of spam is determine by first parameter <nick>.\nRepetition of spam is determine by the second parameter <amount>.\nThe amount of spam determined by third parameter <amount>.\nWrite this command in private.', 'spam <nick> <amount> <amount>', ['spam [email protected] 50 10','spam guy 100 8'])
register_stage1_init(get_afools_state)
| gpl-2.0 | -7,725,319,307,172,654,000 | 35.870166 | 468 | 0.633551 | false |
Sightline-Networks/email_bar | email_bar.py | 1 | 1124 | #!/bin/python3
from argparse import ArgumentParser
from configparser import ConfigParser
from os.path import expanduser
from mailbox import MaildirMessage, Maildir
config = ConfigParser()
config_mailboxes = {}
config.read(expanduser('~/.config/email_bar.cfg'))
parser = ArgumentParser()
parser.add_argument('--only', help='only check specified mailbox', action='store')
parser.add_argument('--no-title', help='do not display the title', action='store_true')
args = parser.parse_args()
if args.only:
config_mailboxes[args.only] = 0
# Else read through the config and check all of the mailboxes
else:
for mailbox in config.sections():
config_mailboxes[mailbox] = 0
# Iter through and see what has not been read
for mailbox in config_mailboxes:
maildir = Maildir(config.get(mailbox, 'path'))
for mail in maildir:
if 'S' not in mail.get_flags():
config_mailboxes[mailbox] += 1
for mailbox in config_mailboxes.keys():
if args.no_title:
print(config_mailboxes[mailbox])
else:
print("%s: %s " % (mailbox, config_mailboxes[mailbox]))
| gpl-2.0 | -6,381,215,655,860,284,000 | 22.416667 | 87 | 0.689502 | false |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/kafka/protocol/admin.py | 1 | 7824 | from __future__ import absolute_import
from .api import Request, Response
from .types import Array, Boolean, Bytes, Int16, Int32, Schema, String
class ApiVersionResponse_v0(Response):
API_KEY = 18
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16)))
)
class ApiVersionResponse_v1(Response):
API_KEY = 18
API_VERSION = 1
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16))),
('throttle_time_ms', Int32)
)
class ApiVersionRequest_v0(Request):
API_KEY = 18
API_VERSION = 0
RESPONSE_TYPE = ApiVersionResponse_v0
SCHEMA = Schema()
class ApiVersionRequest_v1(Request):
API_KEY = 18
API_VERSION = 1
RESPONSE_TYPE = ApiVersionResponse_v1
SCHEMA = ApiVersionRequest_v0.SCHEMA
ApiVersionRequest = [ApiVersionRequest_v0, ApiVersionRequest_v1]
ApiVersionResponse = [ApiVersionResponse_v0, ApiVersionResponse_v1]
class CreateTopicsResponse_v0(Response):
API_KEY = 19
API_VERSION = 0
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class CreateTopicsResponse_v1(Response):
API_KEY = 19
API_VERSION = 1
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsResponse_v2(Response):
API_KEY = 19
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsRequest_v0(Request):
API_KEY = 19
API_VERSION = 0
RESPONSE_TYPE = CreateTopicsResponse_v0
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32)
)
class CreateTopicsRequest_v1(Request):
API_KEY = 19
API_VERSION = 1
RESPONSE_TYPE = CreateTopicsResponse_v1
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32),
('validate_only', Boolean)
)
class CreateTopicsRequest_v2(Request):
API_KEY = 19
API_VERSION = 2
RESPONSE_TYPE = CreateTopicsResponse_v2
SCHEMA = CreateTopicsRequest_v1.SCHEMA
CreateTopicsRequest = [
CreateTopicsRequest_v0, CreateTopicsRequest_v1, CreateTopicsRequest_v2
]
CreateTopicsResponse = [
CreateTopicsResponse_v0, CreateTopicsResponse_v1, CreateTopicsResponse_v2
]
class DeleteTopicsResponse_v0(Response):
API_KEY = 20
API_VERSION = 0
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsResponse_v1(Response):
API_KEY = 20
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsRequest_v0(Request):
API_KEY = 20
API_VERSION = 0
RESPONSE_TYPE = DeleteTopicsResponse_v0
SCHEMA = Schema(
('topics', Array(String('utf-8'))),
('timeout', Int32)
)
class DeleteTopicsRequest_v1(Request):
API_KEY = 20
API_VERSION = 1
RESPONSE_TYPE = DeleteTopicsResponse_v1
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
DeleteTopicsRequest = [DeleteTopicsRequest_v0, DeleteTopicsRequest_v1]
DeleteTopicsResponse = [DeleteTopicsResponse_v0, DeleteTopicsResponse_v1]
class ListGroupsResponse_v0(Response):
API_KEY = 16
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsResponse_v1(Response):
API_KEY = 16
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsRequest_v0(Request):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse_v0
SCHEMA = Schema()
class ListGroupsRequest_v1(Request):
API_KEY = 16
API_VERSION = 1
RESPONSE_TYPE = ListGroupsResponse_v1
SCHEMA = ListGroupsRequest_v0.SCHEMA
ListGroupsRequest = [ListGroupsRequest_v0, ListGroupsRequest_v1]
ListGroupsResponse = [ListGroupsResponse_v0, ListGroupsResponse_v1]
class DescribeGroupsResponse_v0(Response):
API_KEY = 15
API_VERSION = 0
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsResponse_v1(Response):
API_KEY = 15
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsRequest_v0(Request):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse_v0
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
class DescribeGroupsRequest_v1(Request):
API_KEY = 15
API_VERSION = 1
RESPONSE_TYPE = DescribeGroupsResponse_v1
SCHEMA = DescribeGroupsRequest_v0.SCHEMA
DescribeGroupsRequest = [DescribeGroupsRequest_v0, DescribeGroupsRequest_v1]
DescribeGroupsResponse = [DescribeGroupsResponse_v0, DescribeGroupsResponse_v1]
class SaslHandShakeResponse_v0(Response):
API_KEY = 17
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('enabled_mechanisms', Array(String('utf-8')))
)
class SaslHandShakeRequest_v0(Request):
API_KEY = 17
API_VERSION = 0
RESPONSE_TYPE = SaslHandShakeResponse_v0
SCHEMA = Schema(
('mechanism', String('utf-8'))
)
SaslHandShakeRequest = [SaslHandShakeRequest_v0]
SaslHandShakeResponse = [SaslHandShakeResponse_v0]
| mit | -3,200,656,330,581,579,300 | 25.255034 | 79 | 0.576176 | false |
kylef/bluepaste | bluepaste/models.py | 1 | 2381 | import datetime
import json
from hashlib import sha1
import requests
import peewee
from rivr_peewee import Database
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import JsonLexer
from bluepaste.lexer import BlueprintLexer
from bluepaste.config import BLUEPRINT_PARSER_URL
database = Database()
EXPIRE_CHOICES = (
(600, 'In 10 minutes'),
(3600, 'In one hour'),
(3600*24, 'In one day'),
(3600*24*7, 'In one week'),
(3600*24*14, 'In two weeks'),
(3600*24*30, 'In one month'),
)
EXPIRE_DEFAULT = 3600*24*14
class User(database.Model):
email = peewee.CharField(unique=True)
class Blueprint(database.Model):
slug = peewee.CharField(max_length=40, unique=True)
expires = peewee.DateTimeField()
author = peewee.ForeignKeyField(User, related_name='blueprints', null=True)
def create_revision(self, message, content):
ast = requests.post(BLUEPRINT_PARSER_URL, data=content).json()['ast']
ast_json = json.dumps(ast)
created_at = datetime.datetime.now()
slug_content = '{}\n{}'.format(created_at.isoformat(), content)
slug = sha1(slug_content).hexdigest()
return Revision.create(blueprint=self, slug=slug, content=content, message=message, ast_json=ast_json)
@property
def last_revision(self):
return self.revisions[0]
class Revision(database.Model):
blueprint = peewee.ForeignKeyField(Blueprint, related_name='revisions')
slug = peewee.CharField(max_length=40, unique=True)
content = peewee.TextField()
created_at = peewee.DateTimeField(default=datetime.datetime.now)
ast_json = peewee.TextField()
message = peewee.TextField(null=True)
class Meta:
order_by = ('-created_at',)
indexes = (
(('blueprint', 'slug'), True),
)
def __str__(self):
return self.content
@property
def highlighted_content(self):
return highlight(self.content, BlueprintLexer(), HtmlFormatter())
@property
def ast(self):
if not hasattr(self, '_ast'):
self._ast = json.loads(self.ast_json)
return self._ast
@property
def highlighted_ast(self):
ast = json.dumps(self.ast, sort_keys=True, indent=2, separators=(',', ': '))
return highlight(ast, JsonLexer(), HtmlFormatter())
| mit | 5,149,579,032,192,973,000 | 27.686747 | 110 | 0.660227 | false |
ovnicraft/server-tools | auth_brute_force/tests/test_brute_force.py | 1 | 14413 | # -*- coding: utf-8 -*-
# Copyright 2017 Tecnativa - Jairo Llopis
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from threading import current_thread
from urllib import urlencode
from decorator import decorator
from mock import patch
from werkzeug.utils import redirect
from odoo import http
from odoo.tests.common import at_install, HttpCase, post_install
from odoo.tools import mute_logger
from ..models import res_authentication_attempt, res_users
GARBAGE_LOGGERS = (
"werkzeug",
res_authentication_attempt.__name__,
res_users.__name__,
)
# HACK https://github.com/odoo/odoo/pull/24833
def skip_unless_addons_installed(*addons):
"""Decorator to skip a test unless some addons are installed.
:param *str addons:
Addon names that should be installed.
:param reason:
Explain why you must skip this test.
"""
@decorator
def _wrapper(method, self, *args, **kwargs):
installed = self.addons_installed(*addons)
if not installed:
missing = set(addons) - installed
self.skipTest("Required addons not installed: %s" %
",".join(sorted(missing)))
return method(self, *args, **kwargs)
return _wrapper
@at_install(False)
@post_install(True)
# Skip CSRF validation on tests
@patch(http.__name__ + ".WebRequest.validate_csrf", return_value=True)
# Skip specific browser forgery on redirections
@patch(http.__name__ + ".redirect_with_hash", side_effect=redirect)
# Faster tests without calls to geolocation API
@patch(res_authentication_attempt.__name__ + ".urlopen", return_value="")
class BruteForceCase(HttpCase):
def setUp(self):
super(BruteForceCase, self).setUp()
# Some tests could retain environ from last test and produce fake
# results without this patch
# HACK https://github.com/odoo/odoo/issues/24183
# TODO Remove in v12
try:
del current_thread().environ
except AttributeError:
pass
# Complex password to avoid conflicts with `password_security`
self.good_password = "Admin$%02584"
self.data_demo = {
"login": "demo",
"password": "Demo%&/(908409**",
}
with self.cursor() as cr:
env = self.env(cr)
env["ir.config_parameter"].set_param(
"auth_brute_force.max_by_ip_user", 3)
env["ir.config_parameter"].set_param(
"auth_brute_force.max_by_ip", 4)
# Clean attempts to be able to count in tests
env["res.authentication.attempt"].search([]).unlink()
# Make sure involved users have good passwords
env.user.password = self.good_password
env["res.users"].search([
("login", "=", self.data_demo["login"]),
]).password = self.data_demo["password"]
# HACK https://github.com/odoo/odoo/pull/24833
def addons_installed(self, *addons):
"""Know if the specified addons are installed."""
found = self.env["ir.module.module"].search([
("name", "in", addons),
("state", "not in", ["uninstalled", "uninstallable"]),
])
return set(addons) - set(found.mapped("name"))
@skip_unless_addons_installed("web")
@mute_logger(*GARBAGE_LOGGERS)
def test_web_login_existing(self, *args):
"""Remote is banned with real user on web login form."""
data1 = {
"login": "admin",
"password": "1234", # Wrong
}
# Make sure user is logged out
self.url_open("/web/session/logout", timeout=30)
# Fail 3 times
for n in range(3):
response = self.url_open("/web/login", bytes(urlencode(data1)), 30)
# If you fail, you get /web/login again
self.assertTrue(
response.geturl().endswith("/web/login"),
"Unexpected URL %s" % response.geturl(),
)
# Admin banned, demo not
with self.cursor() as cr:
env = self.env(cr)
self.assertFalse(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
data1["login"],
),
)
self.assertTrue(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
"demo",
),
)
# Now I know the password, but login is rejected too
data1["password"] = self.good_password
response = self.url_open("/web/login", bytes(urlencode(data1)), 30)
self.assertTrue(
response.geturl().endswith("/web/login"),
"Unexpected URL %s" % response.geturl(),
)
# IP has been banned, demo user cannot login
with self.cursor() as cr:
env = self.env(cr)
self.assertFalse(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
"demo",
),
)
# Attempts recorded
with self.cursor() as cr:
env = self.env(cr)
failed = env["res.authentication.attempt"].search([
("result", "=", "failed"),
("login", "=", data1["login"]),
("remote", "=", "127.0.0.1"),
])
self.assertEqual(len(failed), 3)
banned = env["res.authentication.attempt"].search([
("result", "=", "banned"),
("remote", "=", "127.0.0.1"),
])
self.assertEqual(len(banned), 1)
# Unban
banned.action_whitelist_add()
# Try good login, it should work now
response = self.url_open("/web/login", bytes(urlencode(data1)), 30)
self.assertTrue(response.geturl().endswith("/web"))
@skip_unless_addons_installed("web")
@mute_logger(*GARBAGE_LOGGERS)
def test_web_login_unexisting(self, *args):
"""Remote is banned with fake user on web login form."""
data1 = {
"login": "administrator", # Wrong
"password": self.good_password,
}
# Make sure user is logged out
self.url_open("/web/session/logout", timeout=30)
# Fail 3 times
for n in range(3):
response = self.url_open("/web/login", bytes(urlencode(data1)), 30)
# If you fail, you get /web/login again
self.assertTrue(
response.geturl().endswith("/web/login"),
"Unexpected URL %s" % response.geturl(),
)
# Admin banned, demo not
with self.cursor() as cr:
env = self.env(cr)
self.assertFalse(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
data1["login"],
),
)
self.assertTrue(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
self.data_demo["login"],
),
)
# Demo user can login
response = self.url_open(
"/web/login",
bytes(urlencode(self.data_demo)),
30,
)
# If you pass, you get /web
self.assertTrue(
response.geturl().endswith("/web"),
"Unexpected URL %s" % response.geturl(),
)
self.url_open("/web/session/logout", timeout=30)
# Attempts recorded
with self.cursor() as cr:
env = self.env(cr)
failed = env["res.authentication.attempt"].search([
("result", "=", "failed"),
("login", "=", data1["login"]),
("remote", "=", "127.0.0.1"),
])
self.assertEqual(len(failed), 3)
banned = env["res.authentication.attempt"].search([
("result", "=", "banned"),
("login", "=", data1["login"]),
("remote", "=", "127.0.0.1"),
])
self.assertEqual(len(banned), 0)
@mute_logger(*GARBAGE_LOGGERS)
def test_xmlrpc_login_existing(self, *args):
"""Remote is banned with real user on XML-RPC login."""
data1 = {
"login": "admin",
"password": "1234", # Wrong
}
# Fail 3 times
for n in range(3):
self.assertFalse(self.xmlrpc_common.authenticate(
self.env.cr.dbname, data1["login"], data1["password"], {}))
# Admin banned, demo not
with self.cursor() as cr:
env = self.env(cr)
self.assertFalse(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
data1["login"],
),
)
self.assertTrue(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
"demo",
),
)
# Now I know the password, but login is rejected too
data1["password"] = self.good_password
self.assertFalse(self.xmlrpc_common.authenticate(
self.env.cr.dbname, data1["login"], data1["password"], {}))
# IP has been banned, demo user cannot login
with self.cursor() as cr:
env = self.env(cr)
self.assertFalse(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
"demo",
),
)
# Attempts recorded
with self.cursor() as cr:
env = self.env(cr)
failed = env["res.authentication.attempt"].search([
("result", "=", "failed"),
("login", "=", data1["login"]),
("remote", "=", "127.0.0.1"),
])
self.assertEqual(len(failed), 3)
banned = env["res.authentication.attempt"].search([
("result", "=", "banned"),
("remote", "=", "127.0.0.1"),
])
self.assertEqual(len(banned), 1)
# Unban
banned.action_whitelist_add()
# Try good login, it should work now
self.assertTrue(self.xmlrpc_common.authenticate(
self.env.cr.dbname, data1["login"], data1["password"], {}))
@mute_logger(*GARBAGE_LOGGERS)
def test_xmlrpc_login_unexisting(self, *args):
"""Remote is banned with fake user on XML-RPC login."""
data1 = {
"login": "administrator", # Wrong
"password": self.good_password,
}
# Fail 3 times
for n in range(3):
self.assertFalse(self.xmlrpc_common.authenticate(
self.env.cr.dbname, data1["login"], data1["password"], {}))
# Admin banned, demo not
with self.cursor() as cr:
env = self.env(cr)
self.assertFalse(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
data1["login"],
),
)
self.assertTrue(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
self.data_demo["login"],
),
)
# Demo user can login
self.assertTrue(self.xmlrpc_common.authenticate(
self.env.cr.dbname,
self.data_demo["login"],
self.data_demo["password"],
{},
))
# Attempts recorded
with self.cursor() as cr:
env = self.env(cr)
failed = env["res.authentication.attempt"].search([
("result", "=", "failed"),
("login", "=", data1["login"]),
("remote", "=", "127.0.0.1"),
])
self.assertEqual(len(failed), 3)
banned = env["res.authentication.attempt"].search([
("result", "=", "banned"),
("login", "=", data1["login"]),
("remote", "=", "127.0.0.1"),
])
self.assertEqual(len(banned), 0)
@mute_logger(*GARBAGE_LOGGERS)
def test_orm_login_existing(self, *args):
"""No bans on ORM login with an existing user."""
data1 = {
"login": "admin",
"password": "1234", # Wrong
}
with self.cursor() as cr:
env = self.env(cr)
# Fail 3 times
for n in range(3):
self.assertFalse(
env["res.users"].authenticate(
cr.dbname, data1["login"], data1["password"], {}))
self.assertEqual(
env["res.authentication.attempt"].search(count=True, args=[]),
0,
)
self.assertTrue(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
data1["login"],
),
)
# Now I know the password, and login works
data1["password"] = self.good_password
self.assertTrue(
env["res.users"].authenticate(
cr.dbname, data1["login"], data1["password"], {}))
@mute_logger(*GARBAGE_LOGGERS)
def test_orm_login_unexisting(self, *args):
"""No bans on ORM login with an unexisting user."""
data1 = {
"login": "administrator", # Wrong
"password": self.good_password,
}
with self.cursor() as cr:
env = self.env(cr)
# Fail 3 times
for n in range(3):
self.assertFalse(
env["res.users"].authenticate(
cr.dbname, data1["login"], data1["password"], {}))
self.assertEqual(
env["res.authentication.attempt"].search(count=True, args=[]),
0,
)
self.assertTrue(
env["res.authentication.attempt"]._trusted(
"127.0.0.1",
data1["login"],
),
)
# Now I know the user, and login works
data1["login"] = "admin"
self.assertTrue(
env["res.users"].authenticate(
cr.dbname, data1["login"], data1["password"], {}))
| agpl-3.0 | -5,105,628,558,397,867,000 | 35.6743 | 79 | 0.497398 | false |
jacquerie/inspire-dojson | inspire_dojson/hep/rules/bd0xx.py | 1 | 12752 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""DoJSON rules for MARC fields in 0xx."""
from __future__ import absolute_import, division, print_function
import re
from collections import defaultdict
import pycountry
from dojson import utils
from idutils import is_arxiv_post_2007, is_doi, is_handle, normalize_doi
from inspire_schemas.api import load_schema
from inspire_schemas.utils import normalize_arxiv_category
from inspire_utils.helpers import force_list
from ..model import hep, hep2marc
from ...utils import force_single_element, normalize_isbn
RE_LANGUAGE = re.compile('\/| or | and |,|=|\s+')
@hep.over('isbns', '^020..')
@utils.for_each_value
def isbns(self, key, value):
"""Populate the ``isbns`` key."""
def _get_medium(value):
def _normalize(medium):
schema = load_schema('hep')
valid_media = schema['properties']['isbns']['items']['properties']['medium']['enum']
medium = medium.lower().replace('-', '').replace(' ', '')
if medium in valid_media:
return medium
elif medium == 'ebook':
return 'online'
elif medium == 'paperback':
return 'softcover'
return ''
medium = force_single_element(value.get('b', ''))
normalized_medium = _normalize(medium)
return normalized_medium
def _get_isbn(value):
a_value = force_single_element(value.get('a', ''))
normalized_a_value = a_value.replace('.', '')
if normalized_a_value:
return normalize_isbn(normalized_a_value)
return {
'medium': _get_medium(value),
'value': _get_isbn(value),
}
@hep2marc.over('020', 'isbns')
@utils.for_each_value
def isbns2marc(self, key, value):
"""Populate the ``020`` MARC field."""
return {
'a': value.get('value'),
'b': value.get('medium'),
}
@hep.over('dois', '^0247.')
def dois(self, key, value):
"""Populate the ``dois`` key.
Also populates the ``persistent_identifiers`` key through side effects.
"""
def _get_first_non_curator_source(sources):
sources_without_curator = [el for el in sources if el.upper() != 'CURATOR']
return force_single_element(sources_without_curator)
def _get_material(value):
MATERIAL_MAP = {
'ebook': 'publication',
}
q_value = force_single_element(value.get('q', ''))
normalized_q_value = q_value.lower()
return MATERIAL_MAP.get(normalized_q_value, normalized_q_value)
def _is_doi(id_, type_):
return (not type_ or type_.upper() == 'DOI') and is_doi(id_)
def _is_handle(id_, type_):
return (not type_ or type_.upper() == 'HDL') and is_handle(id_)
dois = self.get('dois', [])
persistent_identifiers = self.get('persistent_identifiers', [])
values = force_list(value)
for value in values:
id_ = force_single_element(value.get('a', ''))
material = _get_material(value)
schema = force_single_element(value.get('2', ''))
sources = force_list(value.get('9'))
source = _get_first_non_curator_source(sources)
if _is_doi(id_, schema):
dois.append({
'material': material,
'source': source,
'value': normalize_doi(id_),
})
else:
schema = 'HDL' if _is_handle(id_, schema) else schema
persistent_identifiers.append({
'material': material,
'schema': schema,
'source': source,
'value': id_,
})
self['persistent_identifiers'] = persistent_identifiers
return dois
@hep2marc.over('0247', '^dois$')
@utils.for_each_value
def dois2marc(self, key, value):
"""Populate the ``0247`` MARC field."""
return {
'2': 'DOI',
'9': value.get('source'),
'a': value.get('value'),
'q': value.get('material'),
}
@hep2marc.over('0247', '^persistent_identifiers$')
@utils.for_each_value
def persistent_identifiers2marc(self, key, value):
"""Populate the ``0247`` MARC field."""
return {
'2': value.get('schema'),
'9': value.get('source'),
'a': value.get('value'),
'q': value.get('material'),
}
@hep.over('texkeys', '^035..')
def texkeys(self, key, value):
"""Populate the ``texkeys`` key.
Also populates the ``external_system_identifiers`` and ``_desy_bookkeeping`` keys through side effects.
"""
def _is_oai(id_, schema):
return id_.startswith('oai:')
def _is_desy(id_, schema):
return id_ and schema in ('DESY',)
def _is_texkey(id_, schema):
return id_ and schema in ('INSPIRETeX', 'SPIRESTeX')
texkeys = self.get('texkeys', [])
external_system_identifiers = self.get('external_system_identifiers', [])
_desy_bookkeeping = self.get('_desy_bookkeeping', [])
values = force_list(value)
for value in values:
ids = force_list(value.get('a', ''))
other_ids = force_list(value.get('z', ''))
schema = force_single_element(value.get('9', ''))
for id_ in ids:
id_ = id_.strip()
if not id_:
continue
if _is_texkey(id_, schema):
texkeys.insert(0, id_)
elif _is_oai(id_, schema):
continue # XXX: ignored.
elif _is_desy(id_, schema):
_desy_bookkeeping.append({'identifier': id_})
else:
external_system_identifiers.insert(0, {
'schema': schema,
'value': id_,
})
for id_ in other_ids:
id_ = id_.strip()
if not id_:
continue
if _is_texkey(id_, schema):
texkeys.append(id_)
elif _is_oai(id_, schema):
continue # XXX: ignored.
elif _is_desy(id_, schema):
_desy_bookkeeping.append({'identifier': id_})
else:
external_system_identifiers.append({
'schema': schema,
'value': id_,
})
self['external_system_identifiers'] = external_system_identifiers
self['_desy_bookkeeping'] = _desy_bookkeeping
return texkeys
@hep2marc.over('035', '^texkeys$')
def texkeys2marc(self, key, value):
"""Populate the ``035`` MARC field."""
result = []
values = force_list(value)
if values:
value = values[0]
result.append({
'9': 'INSPIRETeX',
'a': value,
})
for value in values[1:]:
result.append({
'9': 'INSPIRETeX',
'z': value,
})
return result
@hep2marc.over('035', '^external_system_identifiers$')
def external_system_identifiers2marc(self, key, value):
"""Populate the ``035`` MARC field.
Also populates the ``970`` MARC field through side effects and an extra
``id_dict`` dictionary that holds potentially duplicate IDs that are
post-processed in a filter.
"""
def _is_scheme_cernkey(id_, schema):
return schema == 'CERNKEY'
def _is_scheme_spires(id_, schema):
return schema == 'SPIRES'
result_035 = self.get('035', [])
id_dict = self.get('id_dict', defaultdict(list))
result_970 = self.get('970', [])
values = force_list(value)
for value in values:
id_ = value.get('value')
schema = value.get('schema')
if _is_scheme_spires(id_, schema):
result_970.append({
'a': id_,
})
elif _is_scheme_cernkey(id_, schema):
result_035.append({
'9': 'CERNKEY',
'z': id_,
})
else:
id_dict[schema].append(id_)
self['970'] = result_970
self['id_dict'] = id_dict
return result_035
@hep.over('arxiv_eprints', '^037..')
def arxiv_eprints(self, key, value):
"""Populate the ``arxiv_eprints`` key.
Also populates the ``report_numbers`` key through side effects.
"""
def _get_clean_arxiv_eprint(id_):
return id_.split(':')[-1]
def _is_arxiv_eprint(id_, source):
return source == 'arXiv'
def _is_hidden_report_number(other_id, source):
return other_id
def _get_clean_source(source):
if source == 'arXiv:reportnumber':
return 'arXiv'
return source
arxiv_eprints = self.get('arxiv_eprints', [])
report_numbers = self.get('report_numbers', [])
values = force_list(value)
for value in values:
id_ = force_single_element(value.get('a', ''))
other_id = force_single_element(value.get('z', ''))
categories = [normalize_arxiv_category(category) for category
in force_list(value.get('c'))]
source = force_single_element(value.get('9', ''))
if _is_arxiv_eprint(id_, source):
arxiv_eprints.append({
'categories': categories,
'value': _get_clean_arxiv_eprint(id_),
})
elif _is_hidden_report_number(other_id, source):
report_numbers.append({
'hidden': True,
'source': _get_clean_source(source),
'value': other_id,
})
else:
report_numbers.append({
'source': _get_clean_source(source),
'value': id_,
})
self['report_numbers'] = report_numbers
return arxiv_eprints
@hep2marc.over('037', '^arxiv_eprints$')
def arxiv_eprints2marc(self, key, values):
"""Populate the ``037`` MARC field.
Also populates the ``035`` and the ``65017`` MARC fields through side effects.
"""
result_037 = self.get('037', [])
result_035 = self.get('035', [])
result_65017 = self.get('65017', [])
for value in values:
arxiv_id = value.get('value')
arxiv_id = 'arXiv:' + arxiv_id if is_arxiv_post_2007(arxiv_id) else arxiv_id
result_037.append({
'9': 'arXiv',
'a': arxiv_id,
'c': force_single_element(value.get('categories')),
})
result_035.append({
'9': 'arXiv',
'a': 'oai:arXiv.org:' + value.get('value'),
})
categories = force_list(value.get('categories'))
for category in categories:
result_65017.append({
'2': 'arXiv',
'a': category,
})
self['65017'] = result_65017
self['035'] = result_035
return result_037
@hep2marc.over('037', '^report_numbers$')
@utils.for_each_value
def report_numbers2marc(self, key, value):
"""Populate the ``037`` MARC field."""
def _get_mangled_source(source):
if source == 'arXiv':
return 'arXiv:reportnumber'
return source
source = _get_mangled_source(value.get('source'))
if value.get('hidden'):
return {
'9': source,
'z': value.get('value'),
}
return {
'9': source,
'a': value.get('value'),
}
@hep.over('languages', '^041..')
def languages(self, key, value):
"""Populate the ``languages`` key."""
languages = self.get('languages', [])
values = force_list(value.get('a'))
for value in values:
for language in RE_LANGUAGE.split(value):
try:
name = language.strip().capitalize()
languages.append(pycountry.languages.get(name=name).alpha_2)
except KeyError:
pass
return languages
@hep2marc.over('041', '^languages$')
@utils.for_each_value
def languages2marc(self, key, value):
"""Populate the ``041`` MARC field."""
return {'a': pycountry.languages.get(alpha_2=value).name.lower()}
| gpl-3.0 | -6,248,574,016,785,603,000 | 28.587007 | 107 | 0.559912 | false |
scopenco/netblock-tools | netnull.py | 1 | 3572 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Author: Andrey Skopenko <[email protected]>
'''A tool create ip route rules that blackhole networks by country code
(ex: RU CN etc.) For the correct execution of script need to download geip
database and country codes.'''
import csv
import sys
import optparse
import os.path
ROUTE_BIN = 'ip route'
MAXMIND_DB = \
'http://www.maxmind.com/download/geoip/database/GeoIPCountryCSV.zip'
COUTRY_DB = 'http://www.iso.org/iso/list-en1-semic-3.txt'
def main():
p = optparse.OptionParser(description=__doc__,
prog="netblock",
version="0.1",
usage="%prog [-cc] [-c] [-i] [-p] "
"[-d] [-a] country1 coutry2 ...")
p.add_option("--geoipdb",
help='Path to GeoIPCountryWhois.csv with GeoIP data',
default='GeoIPCountryWhois.csv')
p.add_option("--countrydb",
help='Path to country_names_and_code_elements_txt '
'with country codes',
default='country_names_and_code_elements_txt')
p.add_option("--cc",
action='store_true',
help='List of country codes')
p.add_option("--remove_nullroute", "-r",
help='Generate rules to remove subnets from ip route table',
action="store_true")
options, arguments = p.parse_args()
# show list of country codes
if options.cc:
if not os.path.isfile(options.countrydb):
print '%s not found! try command "wget %s"' % (
options.countrydb, COUTRY_DB)
sys.exit()
with open(options.countrydb) as f:
for line in f:
if line == "" or line.startswith("Country ") or \
";" not in line:
continue
c_name, c_code = line.strip().split(";")
c_name = ' '.join([part.capitalize() for part in
c_name.split(" ")])
print '%s\t%s' % (c_code, c_name)
return
# show help
if not arguments:
p.print_help()
sys.exit()
if not os.path.isfile(options.geoipdb):
print '%s not found! try ' \
'command "wget %s && unzip GeoIPCountryCSV.zip"' % (
options.geoipdb, MAXMIND_DB)
sys.exit()
# construct route rule tempate
base_rule = ROUTE_BIN
if options.remove_nullroute:
block_rule = base_rule + ' del blackhole %s'
else:
block_rule = base_rule + ' add blackhole %s'
# get country networks and show iptables rules
with open(options.geoipdb, 'rb') as f:
for i in csv.reader(f):
if i[4] in arguments:
network = int(i[2])
mask = int(i[3])
while (network <= mask):
x = 0
while True:
if network & (1 << x) == 0 and \
network + ((1 << (x + 1)) - 1) <= mask:
x += 1
continue
print block_rule % '%s/%s' % (get_net(network), 32 - x)
break
network += 1 << x
def get_net(network):
'''convert bin network to decimal'''
out = str(network & 255)
for x in range(3):
network = network >> 8
out = '%s.%s' % (str(network & 255), out)
return out
if __name__ == "__main__":
main()
| bsd-3-clause | 179,938,823,875,172,540 | 34.019608 | 79 | 0.493001 | false |
trolldbois/ctypeslib | setup.py | 1 | 1432 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="ctypeslib2",
version="2.3.2",
description="ctypeslib2 - FFI toolkit, relies on clang",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Loic Jaquemet",
author_email="[email protected]",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
],
url="https://github.com/trolldbois/ctypeslib",
download_url="https://github.com/trolldbois/ctypeslib/releases",
license="License :: OSI Approved :: MIT License",
packages=['ctypeslib',
'ctypeslib.codegen',
],
package_data={'ctypeslib': ['data/fundamental_type_name.tpl',
'data/headers.tpl',
'data/pointer_type.tpl',
'data/string_cast.tpl',
'data/structure_type.tpl',
]},
entry_points={
'console_scripts': [
'clang2py = ctypeslib.clang2py:main',
]},
test_suite="test.alltests",
install_requires=[
'clang>=11',
]
)
| mit | 6,116,087,930,440,521,000 | 33.926829 | 71 | 0.549581 | false |
ju1ius/clisnips | clisnips/tui/widgets/progress/process.py | 1 | 2218 | import multiprocessing
import os
import signal
from clisnips.tui.logging import logger
from .message_queue import MessageQueue
class Process(multiprocessing.Process):
def __init__(self, message_queue: MessageQueue, target, args=(), kwargs=None):
super().__init__(target=target, args=args, kwargs=kwargs or {})
self._stop_event = multiprocessing.Event()
self._message_queue = message_queue
def stop(self):
logger.debug('Stopping process %s', self.pid)
self._stop_event.set()
# allow garbage collection
if self._message_queue:
self._message_queue = None
self._target.message_queue = None
def kill(self):
self.stop()
if self.is_alive():
logger.debug('Killing process %s', self.pid)
try:
os.killpg(self.pid, signal.SIGKILL)
except OSError as err:
os.kill(self.pid, signal.SIGKILL)
def run(self):
logger.debug('Starting process %s', self.pid)
# pass the queue object to the function object
self._target.message_queue = self._message_queue
self._message_queue.start()
self._message_queue.progress(0.0)
try:
self._do_run_task()
except KeyboardInterrupt as e:
logger.debug('Process %s catched KeyboardInterrupt', self.pid)
self._message_queue.cancel()
except Exception as err:
msg = ' '.join(err.args) if len(err.args) else str(err)
self._message_queue.error(msg)
finally:
self._message_queue.finish()
self._message_queue.close()
def _do_run_task(self):
for msg in self._target(*self._args, **self._kwargs):
if isinstance(msg, float):
self._message_queue.progress(msg)
elif isinstance(msg, str):
self._message_queue.message(msg)
if self._stop_event.is_set():
self._message_queue.cancel()
logger.debug('Cancelled process %s', self.pid)
break
class BlockingProcess(Process):
def _do_run_task(self):
self._target(*self._args, **self._kwargs)
| gpl-3.0 | 8,363,412,266,626,529,000 | 32.606061 | 82 | 0.584761 | false |
sanyaade-mobiledev/renderer-service-upnp | test/rendererconsole.py | 1 | 5827 | # -*- coding: utf-8 -*-
# renderer-console
#
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Sébastien Bianti <[email protected]>
#
import dbus
import json
import xml.etree.ElementTree as ET
ROOT_OBJECT_PATH = '/com/intel/RendererServiceUPnP'
RENDERER_BUS = 'com.intel.renderer-service-upnp'
PROPS_IF_NAME = 'org.freedesktop.DBus.Properties'
INTROSPECTABLE_IF_NAME = 'org.freedesktop.DBus.Introspectable'
DEVICE_IF_NAME = 'com.intel.UPnP.RendererDevice'
PUSH_HOST_IF_NAME = 'com.intel.RendererServiceUPnP.PushHost'
MANAGER_INTERFACE = 'com.intel.RendererServiceUPnP.Manager'
MEDIAPLAYER2_IF_NAME = 'org.mpris.MediaPlayer2'
PLAYER_IF_NAME = 'org.mpris.MediaPlayer2.Player'
global bus_type
bus_type = dbus.SessionBus()
def print_json(props):
print json.dumps(props, indent=4, sort_keys=True)
def get_interface(path, if_name):
return dbus.Interface(bus_type.get_object(RENDERER_BUS, path), if_name)
class Renderer(object):
"Represent a renderer service"
def __init__(self, object_path):
self.__path = object_path
self.__propsIF = get_interface(object_path, PROPS_IF_NAME)
self.__playerIF = get_interface(object_path, PLAYER_IF_NAME)
self.__pushhostIF = get_interface(object_path, PUSH_HOST_IF_NAME)
def get_interfaces(self):
try:
introspectable_IF = get_interface(self.__path,
INTROSPECTABLE_IF_NAME)
except:
print(u"Failed to retrieve introspectable interface")
introspection = introspectable_IF.Introspect()
tree = ET.fromstring(introspection)
return [i.attrib['name'] for i in tree if i.tag == "interface"]
def interfaces(self):
for i in self.get_interfaces():
print i
def get_prop(self, prop_name, inner_if_name = ""):
return self.__propsIF.Get(inner_if_name, prop_name)
def get_props(self, inner_if_name = ""):
return self.__propsIF.GetAll(inner_if_name)
def print_props(self, inner_if_name = ""):
print_json(self.get_props(inner_if_name))
def set_prop(self, prop_name, if_name, val):
"""
Sets only the following properties :
Rate and Volume
"""
return self.__propsIF.Set(if_name, prop_name, val)
# Control methods
def play(self):
self.__playerIF.Play()
def pause(self):
self.__playerIF.Pause()
def play_pause(self):
self.__playerIF.PlayPause()
def next(self):
self.__playerIF.Next()
def open_uri(self, uri):
self.__playerIF.OpenUri(uri)
def previous(self):
self.__playerIF.Previous()
def seek(self, offset):
self.__playerIF.Seek(offset)
def goto_track(self, trackID):
self.__playerIF.GotoTrack(trackID)
def set_position(self, trackID, position):
self.__playerIF.setPosition(trackID, position)
def stop(self):
self.__playerIF.Stop()
# Push Host methods
def host_file(self, path):
return self.__pushhostIF.HostFile(path)
def remove_file(self, path):
self.__pushhostIF.RemoveFile(path)
class Manager(object):
"""
High level class for detecting Renderers and doing common operations
on RendererServiceUPnP
"""
def __init__(self):
self.__manager = get_interface(ROOT_OBJECT_PATH, MANAGER_INTERFACE)
self.__renderers = []
def update_renderers(self):
self.__renderers = self.__manager.GetServers()
def get_renderers(self):
self.update_renderers()
return self.__renderers
def renderers(self):
self.update_renderers()
for path in self.__renderers:
try:
renderer = Renderer(path)
renderer_name = renderer.get_prop("Identity")
print(u"%s : %s" % (path, renderer_name))
except:
print(u"Failed to retrieve Identity for interface %s" % path)
def get_version(self):
return self.__manager.GetVersion()
def version(self):
print self.get_version()
def release(self):
self.__manager.Release()
if __name__ == "__main__":
print("\n\t\t\tExample for using rendererconsole:")
print("\t\t\t¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n")
manager = Manager()
print("Version = %s" % manager.get_version())
print("¯¯¯¯¯¯¯")
print "\nRenderer's list:"
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯")
manager.renderers()
renderer_list = manager.get_renderers()
for name in renderer_list:
renderer = Renderer(name)
interface_list = renderer.get_interfaces()
print("\nInterfaces of %s:" % name)
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯" + "¯" * len(name))
for i in interface_list:
print i
if_name = DEVICE_IF_NAME
if (if_name in interface_list) :
print("\nProperties of %s on %s:" % (if_name, name))
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯" + (len(name) + len(if_name)) * "¯")
renderer.print_props(if_name)
| lgpl-2.1 | -1,308,506,136,399,219,500 | 28.420513 | 79 | 0.62402 | false |
fresskarma/tinyos-1.x | tools/python/pytos/util/RoutingMessages.py | 1 | 7725 | # "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
"""\
RoutingMessages: This is a package of classes that have some functionality
commonly used by routing messages, eg. Rpc and RamSymbol.
This class is not intended to be used on its own.
"""
import sys, string, time, types
import pytos.util.nescDecls as nescDecls
import pytos.Comm as Comm
import pytos.tools.Drip as Drip
import pytos.tools.Drain as Drain
from copy import deepcopy
class RoutingMessage( nescDecls.TosMsg ) :
def __init__(self, parent, amType, *structArgs) :
#store the parent
self.parent = parent
#initialize the default call parameters to none (ie, use the parent's defaults)
for (callParam,default) in self.parent.defaultCallParams :
self.__dict__[callParam] = None
nescDecls.TosMsg.__init__(self, parent.app.enums.AM_RPCCOMMANDMSG, *structArgs)
def _assignParam(self, field, param, paramId) :
"""assign a call parameter to the correct field (checking types)"""
if type(field) == nescDecls.nescType and (
type(param) == int or type(param) == long or
type(param) == float or type(param) == str or
type(param) == unicode ) :
field.value = param
elif type(field) == type(param) :
field = param
else :
raise Exception("Illegal parameter type for param #%s. Requires type %s." % (
str(paramId), str(type(field))) )
def _send(self, address, *posArgs, **nameArgs) :
commArgs = ()
#posArgs and nameArgs now contain only field values.
#now assign them to the appropriate RoutingMessage fields.
#create a temporary RoutingMessage to hold the call-time parameters
thisCall = deepcopy(self)
for i in range(len(posArgs)) :
thisCall._assignParam(thisCall.value[thisCall.fields[i+1]["name"]], posArgs[i], i)
for key in nameArgs.keys() :
if not thisCall.value.has_key(key) :
raise Exception("parameter name %s non-existent" % key)
thisCall._assignParam(thisCall.value[key], nameArgs[key], key)
thisCall.parent.sendComm.send(address, thisCall, *commArgs)
def parseCallParams(self, nameArgs) :
callParams = self.getCallParams()
#parse any call-time call parameters
for param in nameArgs.keys() :
if callParams.has_key(param) :
callParams[param] = nameArgs[param]
del nameArgs[param]
return callParams
def getCallParams(self) :
"""Use the default call parameters from the parent module, but if I have the same
field with a non-None value, use it instead"""
callParams = self.parent.getCallParams()
for param in callParams.keys() :
if self.__dict__.has_key(param) and self.__getattribute__(param) != None :
callParams[param] = self.__getattribute__(param)
return callParams
def __repr__(self) :
"""full function name"""
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def register(self, listener, comm=()) :
self.parent.receiveComm.register(self, listener, *comm)
def unregister(self, listener, comm=()) :
self.parent.receiveComm.unregister(self, listener, *comm)
class Shortcut (object):
"""used to allow multiple levels of indirection w/routing messages using dots;
ie., to allow something.module.interface.RoutingMessage()"""
def __init__(self, parent, name):
self.parent = parent
self.name = name
def __getattr__(self, name) :
name = self.name + "." + name
if self.parent._messages.has_key(name) :
return self.parent._messages.get(name)
else :
for message in self.parent._messages.values() :
if message.nescType.find(name+".") == 0 :
return Shortcut(self.parent,name)
raise Exception("Cannot find %s. Check spelling." % name)
def __repr__(self):
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def __str__(self):
string = ""
funcs = ()
messageNames = self.parent._messages.keys()
messageNames.sort()
for message in messageNames :
if message.find(self.name) == 0 :
string += str(self.parent._messages[message])
string = string.replace(self.name + "." , "" )
return string
class RoutingMessages(object) :
def __init__(self, app) :
self.app = app
self._messages = {}
## In this constructor, we connect to the routing layer as best as
## we can. This may mean creating new drip/drain instances,
## reusing old ones, reusing old Comm objects, or not connecting
## at all, depending...
if app.motecom == None:
return
#connect to sendComm: use localComm if user requested or if drip not compiled in.
self.address=app.enums.TOS_BCAST_ADDR
if app.localCommOnly==True or "AM_DRIPMSG" not in app.enums._enums:
self.sendComm = Comm.getCommObject(app, app.motecom)
else :
self.sendComm = Drip.getDripObject(app, app.motecom, app.enums.AM_RPCCOMMANDMSG)[0]
#connect to receiveComm: always use Drain unless not compiled in
if "AM_DRAINMSG" not in app.enums._enums:
self.receiveComm = Comm.getCommObject(app, app.motecom)
self.returnAddress = app.enums.TOS_BCAST_ADDR
else :
treeID = 0xfffe #can we set this automatically?
self.receiveComm = Drain.getDrainObject(app, app.motecom, treeID)[0]
if app.localCommOnly == False :
self.receiveComm.maintainTree()
if app.tosbase==True: #can we discover this like deluge?
self.returnAddress = treeID
else :
self.returnAddress = app.enums.TOS_UART_ADDR
def initializeCallParams(self, callParams) :
for (callParam,defaultVal) in self.defaultCallParams :
if callParams.has_key(callParam) :
self.__dict__[callParam] = callParams[callParam]
elif not self.__dict__.has_key(callParam):
self.__dict__[callParam] = defaultVal
def getCallParams(self) :
callParams = {}
for (callParam,default) in self.defaultCallParams :
callParams[callParam] = self.__dict__[callParam]
return callParams
def __getattr__(self, name) :
for function in self._messages.values() :
if function.nescType.find(name + ".") == 0 :
return Shortcut(self,name)
raise AttributeError("No such attribute %s" % name)
def __repr__(self) :
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def __str__(self) :
""" Print all available RoutingMessages."""
string = ""
keys = self._messages.keys()
keys.sort()
for name in keys :
string += str( self._messages[name])
return string
| bsd-3-clause | 4,245,047,028,988,728,300 | 36.139423 | 95 | 0.667961 | false |
avian2/unidecode | unidecode/x1d4.py | 1 | 3863 | data = (
'A', # 0x00
'B', # 0x01
'C', # 0x02
'D', # 0x03
'E', # 0x04
'F', # 0x05
'G', # 0x06
'H', # 0x07
'I', # 0x08
'J', # 0x09
'K', # 0x0a
'L', # 0x0b
'M', # 0x0c
'N', # 0x0d
'O', # 0x0e
'P', # 0x0f
'Q', # 0x10
'R', # 0x11
'S', # 0x12
'T', # 0x13
'U', # 0x14
'V', # 0x15
'W', # 0x16
'X', # 0x17
'Y', # 0x18
'Z', # 0x19
'a', # 0x1a
'b', # 0x1b
'c', # 0x1c
'd', # 0x1d
'e', # 0x1e
'f', # 0x1f
'g', # 0x20
'h', # 0x21
'i', # 0x22
'j', # 0x23
'k', # 0x24
'l', # 0x25
'm', # 0x26
'n', # 0x27
'o', # 0x28
'p', # 0x29
'q', # 0x2a
'r', # 0x2b
's', # 0x2c
't', # 0x2d
'u', # 0x2e
'v', # 0x2f
'w', # 0x30
'x', # 0x31
'y', # 0x32
'z', # 0x33
'A', # 0x34
'B', # 0x35
'C', # 0x36
'D', # 0x37
'E', # 0x38
'F', # 0x39
'G', # 0x3a
'H', # 0x3b
'I', # 0x3c
'J', # 0x3d
'K', # 0x3e
'L', # 0x3f
'M', # 0x40
'N', # 0x41
'O', # 0x42
'P', # 0x43
'Q', # 0x44
'R', # 0x45
'S', # 0x46
'T', # 0x47
'U', # 0x48
'V', # 0x49
'W', # 0x4a
'X', # 0x4b
'Y', # 0x4c
'Z', # 0x4d
'a', # 0x4e
'b', # 0x4f
'c', # 0x50
'd', # 0x51
'e', # 0x52
'f', # 0x53
'g', # 0x54
None, # 0x55
'i', # 0x56
'j', # 0x57
'k', # 0x58
'l', # 0x59
'm', # 0x5a
'n', # 0x5b
'o', # 0x5c
'p', # 0x5d
'q', # 0x5e
'r', # 0x5f
's', # 0x60
't', # 0x61
'u', # 0x62
'v', # 0x63
'w', # 0x64
'x', # 0x65
'y', # 0x66
'z', # 0x67
'A', # 0x68
'B', # 0x69
'C', # 0x6a
'D', # 0x6b
'E', # 0x6c
'F', # 0x6d
'G', # 0x6e
'H', # 0x6f
'I', # 0x70
'J', # 0x71
'K', # 0x72
'L', # 0x73
'M', # 0x74
'N', # 0x75
'O', # 0x76
'P', # 0x77
'Q', # 0x78
'R', # 0x79
'S', # 0x7a
'T', # 0x7b
'U', # 0x7c
'V', # 0x7d
'W', # 0x7e
'X', # 0x7f
'Y', # 0x80
'Z', # 0x81
'a', # 0x82
'b', # 0x83
'c', # 0x84
'd', # 0x85
'e', # 0x86
'f', # 0x87
'g', # 0x88
'h', # 0x89
'i', # 0x8a
'j', # 0x8b
'k', # 0x8c
'l', # 0x8d
'm', # 0x8e
'n', # 0x8f
'o', # 0x90
'p', # 0x91
'q', # 0x92
'r', # 0x93
's', # 0x94
't', # 0x95
'u', # 0x96
'v', # 0x97
'w', # 0x98
'x', # 0x99
'y', # 0x9a
'z', # 0x9b
'A', # 0x9c
None, # 0x9d
'C', # 0x9e
'D', # 0x9f
None, # 0xa0
None, # 0xa1
'G', # 0xa2
None, # 0xa3
None, # 0xa4
'J', # 0xa5
'K', # 0xa6
None, # 0xa7
None, # 0xa8
'N', # 0xa9
'O', # 0xaa
'P', # 0xab
'Q', # 0xac
None, # 0xad
'S', # 0xae
'T', # 0xaf
'U', # 0xb0
'V', # 0xb1
'W', # 0xb2
'X', # 0xb3
'Y', # 0xb4
'Z', # 0xb5
'a', # 0xb6
'b', # 0xb7
'c', # 0xb8
'd', # 0xb9
None, # 0xba
'f', # 0xbb
None, # 0xbc
'h', # 0xbd
'i', # 0xbe
'j', # 0xbf
'k', # 0xc0
'l', # 0xc1
'm', # 0xc2
'n', # 0xc3
None, # 0xc4
'p', # 0xc5
'q', # 0xc6
'r', # 0xc7
's', # 0xc8
't', # 0xc9
'u', # 0xca
'v', # 0xcb
'w', # 0xcc
'x', # 0xcd
'y', # 0xce
'z', # 0xcf
'A', # 0xd0
'B', # 0xd1
'C', # 0xd2
'D', # 0xd3
'E', # 0xd4
'F', # 0xd5
'G', # 0xd6
'H', # 0xd7
'I', # 0xd8
'J', # 0xd9
'K', # 0xda
'L', # 0xdb
'M', # 0xdc
'N', # 0xdd
'O', # 0xde
'P', # 0xdf
'Q', # 0xe0
'R', # 0xe1
'S', # 0xe2
'T', # 0xe3
'U', # 0xe4
'V', # 0xe5
'W', # 0xe6
'X', # 0xe7
'Y', # 0xe8
'Z', # 0xe9
'a', # 0xea
'b', # 0xeb
'c', # 0xec
'd', # 0xed
'e', # 0xee
'f', # 0xef
'g', # 0xf0
'h', # 0xf1
'i', # 0xf2
'j', # 0xf3
'k', # 0xf4
'l', # 0xf5
'm', # 0xf6
'n', # 0xf7
'o', # 0xf8
'p', # 0xf9
'q', # 0xfa
'r', # 0xfb
's', # 0xfc
't', # 0xfd
'u', # 0xfe
'v', # 0xff
)
| gpl-2.0 | 9,157,324,283,306,575,000 | 13.972868 | 15 | 0.341703 | false |
newsages/nQTrucks | ntrain/puertoct/truck2/prep.py | 1 | 5615 | #!/usr/bin/python
import os
from PIL import Image
import uuid
import shutil
import sys
WIDTH=52
HEIGHT=13
COUNTRY='trucks2'
#WIDTH=52
#HEIGHT=13
#COUNTRY='eu'
#constants
OPENCV_DIR= '/usr/bin'
SAMPLE_CREATOR = OPENCV_DIR + '/opencv_createsamples'
BASE_DIR = './'
OUTPUT_DIR = BASE_DIR + "out/"
INPUT_NEGATIVE_DIR = BASE_DIR + 'neg/'
INPUT_POSITIVE_DIR = BASE_DIR + COUNTRY + '/'
OUTPUT_NEGATIVE_DIR = BASE_DIR + 'negative/'
OUTPUT_POSITIVE_DIR = BASE_DIR + 'positive/'
POSITIVE_INFO_FILE = OUTPUT_POSITIVE_DIR + 'positive.txt'
NEGATIVE_INFO_FILE = OUTPUT_NEGATIVE_DIR + 'negative.txt'
VEC_FILE = OUTPUT_POSITIVE_DIR + 'vecfile.vec'
vector_arg = '-vec %s' % (VEC_FILE)
width_height_arg = '-w %d -h %d' % (WIDTH, HEIGHT)
def print_usage():
print "Usage: prep.py [Operation]"
print " -- Operations --"
print " neg -- Prepares the negative samples list"
print " pos -- Copies all the raw positive files to a opencv vector"
print " showpos -- Shows the positive samples that were created"
print " train -- Outputs the command for the Cascade Training algorithm"
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
command=""
if command != "":
pass
elif len(sys.argv) != 2:
print_usage()
exit()
else:
command = sys.argv[1]
if command == "neg":
print "Neg"
# Get rid of any spaces
for neg_file in os.listdir(INPUT_NEGATIVE_DIR):
if " " in neg_file:
fileName, fileExtension = os.path.splitext(neg_file)
newfilename = str(uuid.uuid4()) + fileExtension
#print "renaming: " + files + " to "+ root_dir + "/" + str(uuid.uuid4()) + fileExtension
os.rename(INPUT_NEGATIVE_DIR + neg_file, INPUT_POSITIVE_DIR + newfilename)
f = open(NEGATIVE_INFO_FILE,'w')
## Write a list of all the negative files
for neg_file in os.listdir(INPUT_NEGATIVE_DIR):
if os.path.isdir(INPUT_NEGATIVE_DIR + neg_file):
continue
shutil.copy2(INPUT_NEGATIVE_DIR + neg_file, OUTPUT_NEGATIVE_DIR + neg_file )
f.write(neg_file + "\n")
f.close()
elif command == "pos":
print "Pos"
info_arg = '-info %s' % (POSITIVE_INFO_FILE)
# Copy all files in the raw directory and build an info file
## Remove all files in the output positive directory
for old_file in os.listdir(OUTPUT_POSITIVE_DIR):
os.unlink(OUTPUT_POSITIVE_DIR + old_file)
## First, prep the sample filenames (make sure they have no spaces)
for files in os.listdir(INPUT_POSITIVE_DIR):
if os.path.isdir(INPUT_POSITIVE_DIR + files):
continue
# Rename the file if it has a space in it
newfilename = files
if " " in files:
fileName, fileExtension = os.path.splitext(files)
newfilename = str(uuid.uuid4()) + fileExtension
#print "renaming: " + files + " to "+ root_dir + "/" + str(uuid.uuid4()) + fileExtension
os.rename(INPUT_POSITIVE_DIR + files, INPUT_POSITIVE_DIR + newfilename)
# Copy from the raw directory to the positive directory
shutil.copy2(INPUT_POSITIVE_DIR + newfilename, OUTPUT_POSITIVE_DIR + newfilename )
total_pics = 0
## Create the positive.txt input file
f = open(POSITIVE_INFO_FILE,'w')
for filename in os.listdir(OUTPUT_POSITIVE_DIR):
if os.path.isdir(OUTPUT_POSITIVE_DIR + filename):
continue
if filename.endswith(".txt"):
continue
try:
img = Image.open(OUTPUT_POSITIVE_DIR + filename)
# get the image's width and height in pixels
width, height = img.size
f.write(filename + " 1 0 0 " + str(width) + " " + str(height) + '\n')
total_pics = total_pics + 1
except IOError:
print "Exception reading image file: " + filename
f.close()
# Collapse the samples into a vector file
execStr = '%s/opencv_createsamples %s %s %s -num %d' % (OPENCV_DIR, vector_arg, width_height_arg, info_arg, total_pics )
print execStr
os.system(execStr)
#opencv_createsamples -info ./positive.txt -vec ../positive/vecfile.vec -w 120 -h 60 -bg ../negative/PentagonCityParkingGarage21.jpg -num 100
elif command == "showpos":
print "SHOW"
execStr = '%s/opencv_createsamples -vec %s -w %d -h %d' % (OPENCV_DIR, VEC_FILE, WIDTH, HEIGHT )
print execStr
os.system(execStr)
#opencv_createsamples -vec ../positive/vecfile.vec -w 120 -h 60
elif command == "train":
print "TRAIN"
data_arg = '-data %s/' % (OUTPUT_DIR)
bg_arg = '-bg %s' % (NEGATIVE_INFO_FILE)
try:
num_pos_samples = file_len(POSITIVE_INFO_FILE)
except:
num_pos_samples = -1
num_neg_samples = file_len(NEGATIVE_INFO_FILE)
execStr = '%s/opencv_traincascade %s %s %s %s -numPos %d -numNeg %d -maxFalseAlarmRate 0.45 -featureType LBP -numStages 13' % (OPENCV_DIR, data_arg, vector_arg, bg_arg, width_height_arg, num_pos_samples, num_neg_samples )
print "Execute the following command to start training:"
print execStr
#opencv_traincascade -data ./out/ -vec ./positive/vecfile.vec -bg ./negative/negative.txt -w 120 -h 60 -numPos 99 -numNeg 5 -featureType LBP -numStages 8
#opencv_traincascade -data ./out/ -vec ./positive/vecfile.vec -bg ./negative/negative.txt -w 120 -h 60 -numPos 99 -numNeg 5 -featureType LBP -numStages 20
elif command == "SDFLSDFSDFSDF":
root_dir = '/home/mhill/projects/anpr/AlprPlus/samples/svm/raw-pos'
outputfilename = "positive.txt"
else:
print_usage()
exit()
| gpl-3.0 | -3,035,221,647,513,750,500 | 28.708995 | 225 | 0.636331 | false |
marnnie/Cable-buenaventura | plugin.video.genesis/resources/lib/sources/mvsnap_mv_tv.py | 1 | 4444 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,json
from resources.lib.libraries import client
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://mvsnap.com'
self.search_link = '/v1/api/search?query=%s'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
result = result['movies'][0]['slug']
url = '/movies/%s' % result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = '%s (%s)' % (tvshowtitle, year)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
result = result['movies']
season = '%02d' % int(season)
episode = '%02d' % int(episode)
result = [(i['slug'], i['long_title']) for i in result]
result = [(i[0], re.compile('(\d*)$').findall(i[1])) for i in result]
result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
result = [i[0] for i in result if season == i[1]][0]
url = '/tv-shows/%s?S%sE%s' % (result, season, episode)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
query = urlparse.urlparse(url).query
try: query = '%02d' % int(re.compile('E(\d*)$').findall(query)[0])
except: query = ''
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = client.parseDOM(result, 'select', attrs = {'id': 'myDropdown'})[0]
result = zip(client.parseDOM(result, 'option', ret='value'), client.parseDOM(result, 'option'))
result = [i[0] for i in result if i[1].endswith(query) or query == ''][0]
direct = re.compile('(.+)[|](.+?)[,]').findall(result)
if len(direct) > 0:
quality = 'HD' if 'hd' in direct[0][0].lower() else 'SD'
sources.append({'source': 'GVideo', 'quality': quality, 'provider': 'MVsnap', 'url': direct[0][1]})
return sources
url = urlparse.urljoin(self.base_link, result)
url = client.source(url, output='geturl')
if not 'google' in url: raise Exception()
url = url.split('get_video_info')[0]
url = resolvers.request(url)
for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'MVsnap', 'url': i['url']})
return sources
except:
return sources
def resolve(self, url):
try:
if url.startswith('stack://'): return url
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| gpl-2.0 | 2,883,124,037,299,441,000 | 31.437956 | 126 | 0.54928 | false |
metabrainz/botbot-web | botbot/apps/plugins/core/logger.py | 1 | 2002 | import re
from botbot.apps.logs.models import Log
from botbot_plugins.base import BasePlugin
import botbot_plugins.config as config
class Config(config.BaseConfig):
ignore_prefixes = config.Field(
default=["!-"],
required=False,
help_text="""
Specify a list of regular expressions which match
the start of messages to be ignored (excluded from the logs)
"""
)
def should_ignore_text(text, ignore_prefixes):
return any(
(
prefix and
re.match(prefix, text, flags=re.IGNORECASE) is not None
)
for prefix in ignore_prefixes
)
class Plugin(BasePlugin):
"""
Logs all activity.
I keep extensive logs on all the activity in `{{ channel.name }}`.
You can read and search them at {{ SITE }}{{ channel.get_absolute_url }}.
"""
config_class = Config
def logit(self, line):
"""Log a message to the database"""
# If the channel does not start with "#" that means the message
# is part of a /query
if line._channel_name.startswith("#"):
ignore_prefixes = self.config['ignore_prefixes']
if ignore_prefixes:
if not isinstance(ignore_prefixes, list):
ignore_prefixes = [ignore_prefixes]
else:
ignore_prefixes = []
# Delete ACTION prefix created by /me
text = line.text
if text.startswith("ACTION "):
text = text[7:]
if not should_ignore_text(text, ignore_prefixes):
Log.objects.create(
channel_id=line._channel.pk,
timestamp=line._received,
nick=line.user,
text=line.full_text,
room=line._channel,
host=line._host,
command=line._command,
raw=line._raw)
logit.route_rule = ('firehose', ur'(.*)')
| mit | 801,682,193,017,813,800 | 28.880597 | 77 | 0.545455 | false |
evanmiltenburg/Dutch-corpora | overheid/scripts/make_xml_plain.py | 1 | 1350 | from bs4 import BeautifulSoup
import nltk.data
from nltk.tokenize import word_tokenize
import glob
import gzip
import sys
tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle')
def good_sentence(s):
if len(s) < 4 or s.count(',') > 4:
return False
else:
digits = filter(lambda x:x.isdigit(),s)
if len(digits) > (float(len(s))/2):
return False
else:
return True
def sentences_for_file(filename):
with open(filename) as f:
soup = BeautifulSoup(f)
pars = filter(lambda p: not p == None,
map(lambda x:x.get_text(), soup.find_all('al')))
sentences = [word_tokenize(sentence) for x in pars
for sentence in tokenizer.tokenize(x)]
return [' '.join(s).encode('utf-8') for s in filter(good_sentence, sentences)]
def main(ftype):
with gzip.open('../corpus/' + ftype + '_plain.txt.gz','w') as f:
for filename in glob.glob('../data/' + ftype + '/*/*.xml'):
f.write('\n'.join(sentences_for_file(filename)))
if __name__ == "__main__":
ftypes = {'kst', 'trb', 'stb', 'ag', 'ah', 'stcrt', 'kv', 'h', 'blg', 'nds'}
ftype = sys.argv[1]
if ftype in ftypes:
main(ftype)
else:
raise KeyError('No known folder of that type. (You entered: '+ftype + ')')
| apache-2.0 | 3,827,033,118,878,343,000 | 31.926829 | 86 | 0.568148 | false |
erikdejonge/newsrivr | daemons/oauth.py | 1 | 23551 | """
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import int
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import cgi
import urllib.request, urllib.parse, urllib.error
import time
import random
import urllib.parse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.parse.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, str):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urllib.parse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urllib.parse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.parse.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.items():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.items():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.items()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in list(params.items())]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urllib.parse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urllib.parse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.parse.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.items():
parameters[k] = urllib.parse.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(list(self.signature_methods.keys()))
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | gpl-2.0 | 6,105,287,884,939,560,000 | 34.416541 | 86 | 0.617893 | false |
datapythonista/pandas | pandas/core/arrays/sparse/accessor.py | 2 | 11479 | """Sparse accessor"""
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None):
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas import Series
from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
result = coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self):
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas._libs.sparse import IntIndex
from pandas import DataFrame
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self):
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes.to_list())
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, data = [], [], []
for col, name in enumerate(self._parent):
s = self._parent[name]
row = s.array.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
data.append(s.array.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
data = np.concatenate(data)
return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
tmp = np.mean([column.array.density for _, column in self._parent.items()])
return tmp
@staticmethod
def _prep_index(data, index, columns):
from pandas.core.indexes.api import ensure_index
import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
| bsd-3-clause | -6,695,402,877,092,622,000 | 29.287599 | 86 | 0.531492 | false |
nkalodimas/invenio | modules/bibsched/lib/bibtask.py | 1 | 50380 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Bibliographic Task Class.
BibTask class.
A BibTask is an executable under CFG_BINDIR, whose name is stored in
bibtask_config.CFG_BIBTASK_VALID_TASKS.
A valid task must call the task_init function with the proper parameters.
Generic task related parameters (user, sleeptime, runtime, task_id, task_name
verbose)
go to _TASK_PARAMS global dictionary accessible through task_get_task_param.
Option specific to the particular BibTask go to _OPTIONS global dictionary
and are accessible via task_get_option/task_set_option.
In order to log something properly, just use write_message(s) with the desired
verbose level.
task_update_status and task_update_progress can be used to update the status
of the task (DONE, FAILED, DONE WITH ERRORS...) and it's progress
(1 out 100..) within the bibsched monitor.
It is possible to enqueue a BibTask via API call by means of
task_low_level_submission.
"""
__revision__ = "$Id$"
import getopt
import getpass
import marshal
import os
import pwd
import re
import signal
import sys
import time
import datetime
import traceback
import logging
import logging.handlers
import random
from socket import gethostname
from invenio.dbquery import run_sql, _db_login
from invenio.access_control_engine import acc_authorize_action
from invenio.config import CFG_PREFIX, CFG_BINDIR, CFG_LOGDIR, \
CFG_BIBSCHED_PROCESS_USER, CFG_TMPDIR, CFG_SITE_SUPPORT_EMAIL
from invenio.errorlib import register_exception
from invenio.access_control_config import CFG_EXTERNAL_AUTH_USING_SSO, \
CFG_EXTERNAL_AUTHENTICATION
from invenio.webuser import get_user_preferences, get_email
from invenio.bibtask_config import CFG_BIBTASK_VALID_TASKS, \
CFG_BIBTASK_DEFAULT_TASK_SETTINGS, CFG_BIBTASK_FIXEDTIMETASKS
from invenio.dateutils import parse_runtime_limit
from invenio.shellutils import escape_shell_arg
from invenio.mailutils import send_email
from invenio.bibsched import bibsched_set_host, \
bibsched_get_host
# Global _TASK_PARAMS dictionary.
_TASK_PARAMS = {
'version': '',
'task_stop_helper_fnc': None,
'task_name': os.path.basename(sys.argv[0]),
'task_specific_name': '',
'task_id': 0,
'user': '',
# If the task is not initialized (usually a developer debugging
# a single method), output all messages.
'verbose': 9,
'sleeptime': '',
'runtime': time.strftime("%Y-%m-%d %H:%M:%S"),
'priority': 0,
'runtime_limit': None,
'profile': [],
'post-process': [],
'sequence-id':None,
'stop_queue_on_error': False,
'fixed_time': False,
'email_logs_to': [],
}
# Global _OPTIONS dictionary.
_OPTIONS = {}
# Which tasks don't need to ask the user for authorization?
CFG_VALID_PROCESSES_NO_AUTH_NEEDED = ("bibupload", )
CFG_TASK_IS_NOT_A_DEAMON = ("bibupload", )
def fix_argv_paths(paths, argv=None):
"""Given the argv vector of cli parameters, and a list of path that
can be relative and may have been specified within argv,
it substitute all the occurencies of these paths in argv.
argv is changed in place and returned.
"""
if argv is None:
argv = sys.argv
for path in paths:
for count in xrange(len(argv)):
if path == argv[count]:
argv[count] = os.path.abspath(path)
return argv
def task_low_level_submission(name, user, *argv):
"""Let special lowlevel enqueuing of a task on the bibsche queue.
@param name: is the name of the bibtask. It must be a valid executable under
C{CFG_BINDIR}.
@type name: string
@param user: is a string that will appear as the "user" submitting the task.
Since task are submitted via API it make sense to set the
user to the name of the module/function that called
task_low_level_submission.
@type user: string
@param argv: are all the additional CLI parameters that would have been
passed on the CLI (one parameter per variable).
e.g.:
>>> task_low_level_submission('bibupload', 'admin', '-a', '/tmp/z.xml')
@type: strings
@return: the task identifier when the task is correctly enqueued.
@rtype: int
@note: use absolute paths in argv
"""
def get_priority(argv):
"""Try to get the priority by analysing the arguments."""
priority = 0
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 'P:', ['priority='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-P', '--priority'):
try:
priority = int(opt[1])
except ValueError:
pass
return priority
def get_special_name(argv):
"""Try to get the special name by analysing the arguments."""
special_name = ''
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 'N:', ['name='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-N', '--name'):
special_name = opt[1]
return special_name
def get_runtime(argv):
"""Try to get the runtime by analysing the arguments."""
runtime = time.strftime("%Y-%m-%d %H:%M:%S")
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 't:', ['runtime='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-t', '--runtime'):
try:
runtime = get_datetime(opt[1])
except ValueError:
pass
return runtime
def get_sleeptime(argv):
"""Try to get the runtime by analysing the arguments."""
sleeptime = ""
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 's:', ['sleeptime='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-s', '--sleeptime'):
try:
sleeptime = opt[1]
except ValueError:
pass
return sleeptime
def get_sequenceid(argv):
"""Try to get the sequenceid by analysing the arguments."""
sequenceid = None
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 'I:', ['sequence-id='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-I', '--sequence-id'):
try:
sequenceid = opt[1]
except ValueError:
pass
return sequenceid
task_id = None
try:
if not name in CFG_BIBTASK_VALID_TASKS:
raise StandardError('%s is not a valid task name' % name)
new_argv = []
for arg in argv:
if isinstance(arg, unicode):
arg = arg.encode('utf8')
new_argv.append(arg)
argv = new_argv
priority = get_priority(argv)
special_name = get_special_name(argv)
runtime = get_runtime(argv)
sleeptime = get_sleeptime(argv)
sequenceid = get_sequenceid(argv)
argv = tuple([os.path.join(CFG_BINDIR, name)] + list(argv))
if special_name:
name = '%s:%s' % (name, special_name)
verbose_argv = 'Will execute: %s' % ' '.join([escape_shell_arg(str(arg)) for arg in argv])
## submit task:
task_id = run_sql("""INSERT INTO schTASK (proc,user,
runtime,sleeptime,status,progress,arguments,priority,sequenceid)
VALUES (%s,%s,%s,%s,'WAITING',%s,%s,%s,%s)""",
(name, user, runtime, sleeptime, verbose_argv, marshal.dumps(argv), priority, sequenceid))
except Exception:
register_exception(alert_admin=True)
if task_id:
run_sql("""DELETE FROM schTASK WHERE id=%s""", (task_id, ))
raise
return task_id
def bibtask_allocate_sequenceid(curdir=None):
"""
Returns an almost unique number to be used a task sequence ID.
In WebSubmit functions, set C{curdir} to the curdir (!) to read
the shared sequence ID for all functions of this submission (reading
"access number").
@param curdir: in WebSubmit functions (ONLY) the value retrieved
from the curdir parameter of the function
@return: an integer for the sequence ID. 0 is returned if the
sequence ID could not be allocated
@rtype: int
"""
if curdir:
try:
fd = file(os.path.join(curdir, 'access'), "r")
access = fd.readline().strip()
fd.close()
return access.replace("_", "")[-9:]
except:
return 0
else:
return random.randrange(1, 4294967296)
def setup_loggers(task_id=None):
"""Sets up the logging system."""
logger = logging.getLogger()
for handler in logger.handlers:
## Let's clean the handlers in case some piece of code has already
## fired any write_message, i.e. any call to debug, info, etc.
## which triggered a call to logging.basicConfig()
logger.removeHandler(handler)
formatter = logging.Formatter('%(asctime)s --> %(message)s', '%Y-%m-%d %H:%M:%S')
if task_id is not None:
err_logger = logging.handlers.RotatingFileHandler(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.err' % _TASK_PARAMS['task_id']), 'a', 1*1024*1024, 10)
log_logger = logging.handlers.RotatingFileHandler(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.log' % _TASK_PARAMS['task_id']), 'a', 1*1024*1024, 10)
log_logger.setFormatter(formatter)
log_logger.setLevel(logging.DEBUG)
err_logger.setFormatter(formatter)
err_logger.setLevel(logging.WARNING)
logger.addHandler(err_logger)
logger.addHandler(log_logger)
stdout_logger = logging.StreamHandler(sys.stdout)
stdout_logger.setFormatter(formatter)
stdout_logger.setLevel(logging.DEBUG)
stderr_logger = logging.StreamHandler(sys.stderr)
stderr_logger.setFormatter(formatter)
stderr_logger.setLevel(logging.WARNING)
logger.addHandler(stderr_logger)
logger.addHandler(stdout_logger)
logger.setLevel(logging.INFO)
return logger
def task_init(
authorization_action="",
authorization_msg="",
description="",
help_specific_usage="",
version=__revision__,
specific_params=("", []),
task_stop_helper_fnc=None,
task_submit_elaborate_specific_parameter_fnc=None,
task_submit_check_options_fnc=None,
task_run_fnc=None):
""" Initialize a BibTask.
@param authorization_action: is the name of the authorization action
connected with this task;
@param authorization_msg: is the header printed when asking for an
authorization password;
@param description: is the generic description printed in the usage page;
@param help_specific_usage: is the specific parameter help
@param task_stop_fnc: is a function that will be called
whenever the task is stopped
@param task_submit_elaborate_specific_parameter_fnc: will be called passing
a key and a value, for parsing specific cli parameters. Must return True if
it has recognized the parameter. Must eventually update the options with
bibtask_set_option;
@param task_submit_check_options: must check the validity of options (via
bibtask_get_option) once all the options where parsed;
@param task_run_fnc: will be called as the main core function. Must return
False in case of errors.
"""
global _TASK_PARAMS, _OPTIONS
_TASK_PARAMS = {
"version" : version,
"task_stop_helper_fnc" : task_stop_helper_fnc,
"task_name" : os.path.basename(sys.argv[0]),
"task_specific_name" : '',
"user" : '',
"verbose" : 1,
"sleeptime" : '',
"runtime" : time.strftime("%Y-%m-%d %H:%M:%S"),
"priority" : 0,
"runtime_limit" : None,
"profile" : [],
"post-process": [],
"sequence-id": None,
"stop_queue_on_error": False,
"fixed_time": False,
}
to_be_submitted = True
if len(sys.argv) == 2 and sys.argv[1].isdigit():
_TASK_PARAMS['task_id'] = int(sys.argv[1])
argv = _task_get_options(_TASK_PARAMS['task_id'], _TASK_PARAMS['task_name'])
to_be_submitted = False
else:
argv = sys.argv
setup_loggers(_TASK_PARAMS.get('task_id'))
task_name = os.path.basename(sys.argv[0])
if task_name not in CFG_BIBTASK_VALID_TASKS or os.path.realpath(os.path.join(CFG_BINDIR, task_name)) != os.path.realpath(sys.argv[0]):
raise OSError("%s is not in the allowed modules" % sys.argv[0])
from invenio.errorlib import wrap_warn
wrap_warn()
if type(argv) is dict:
# FIXME: REMOVE AFTER MAJOR RELEASE 1.0
# This is needed for old task submitted before CLI parameters
# where stored in DB and _OPTIONS dictionary was stored instead.
_OPTIONS = argv
else:
try:
_task_build_params(_TASK_PARAMS['task_name'], argv, description,
help_specific_usage, version, specific_params,
task_submit_elaborate_specific_parameter_fnc,
task_submit_check_options_fnc)
except (SystemExit, Exception), err:
if not to_be_submitted:
register_exception(alert_admin=True)
write_message("Error in parsing the parameters: %s." % err, sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("ERROR")
raise
write_message('argv=%s' % (argv, ), verbose=9)
write_message('_OPTIONS=%s' % (_OPTIONS, ), verbose=9)
write_message('_TASK_PARAMS=%s' % (_TASK_PARAMS, ), verbose=9)
if to_be_submitted:
_task_submit(argv, authorization_action, authorization_msg)
else:
try:
try:
if task_get_task_param('profile'):
try:
from cStringIO import StringIO
import pstats
filename = os.path.join(CFG_TMPDIR, 'bibsched_task_%s.pyprof' % _TASK_PARAMS['task_id'])
existing_sorts = pstats.Stats.sort_arg_dict_default.keys()
required_sorts = []
profile_dump = []
for sort in task_get_task_param('profile'):
if sort not in existing_sorts:
sort = 'cumulative'
if sort not in required_sorts:
required_sorts.append(sort)
if sys.hexversion < 0x02050000:
import hotshot
import hotshot.stats
pr = hotshot.Profile(filename)
ret = pr.runcall(_task_run, task_run_fnc)
for sort_type in required_sorts:
tmp_out = sys.stdout
sys.stdout = StringIO()
hotshot.stats.load(filename).strip_dirs().sort_stats(sort_type).print_stats()
# pylint: disable=E1103
# This is a hack. sys.stdout is a StringIO in this case.
profile_dump.append(sys.stdout.getvalue())
# pylint: enable=E1103
sys.stdout = tmp_out
else:
import cProfile
pr = cProfile.Profile()
ret = pr.runcall(_task_run, task_run_fnc)
pr.dump_stats(filename)
for sort_type in required_sorts:
strstream = StringIO()
pstats.Stats(filename, stream=strstream).strip_dirs().sort_stats(sort_type).print_stats()
profile_dump.append(strstream.getvalue())
profile_dump = '\n'.join(profile_dump)
profile_dump += '\nYou can use profile=%s' % existing_sorts
open(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.log' % _TASK_PARAMS['task_id']), 'a').write("%s" % profile_dump)
os.remove(filename)
except ImportError:
ret = _task_run(task_run_fnc)
write_message("ERROR: The Python Profiler is not installed!", stream=sys.stderr)
else:
ret = _task_run(task_run_fnc)
if not ret:
write_message("Error occurred. Exiting.", sys.stderr)
except Exception, e:
register_exception(alert_admin=True)
write_message("Unexpected error occurred: %s." % e, sys.stderr)
write_message("Traceback is:", sys.stderr)
write_messages(''.join(traceback.format_tb(sys.exc_info()[2])), sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("ERROR")
finally:
_task_email_logs()
logging.shutdown()
def _task_build_params(
task_name,
argv,
description="",
help_specific_usage="",
version=__revision__,
specific_params=("", []),
task_submit_elaborate_specific_parameter_fnc=None,
task_submit_check_options_fnc=None):
""" Build the BibTask params.
@param argv: a list of string as in sys.argv
@param description: is the generic description printed in the usage page;
@param help_specific_usage: is the specific parameter help
@param task_submit_elaborate_specific_parameter_fnc: will be called passing
a key and a value, for parsing specific cli parameters. Must return True if
it has recognized the parameter. Must eventually update the options with
bibtask_set_option;
@param task_submit_check_options: must check the validity of options (via
bibtask_get_option) once all the options where parsed;
"""
global _OPTIONS
_OPTIONS = {}
if task_name in CFG_BIBTASK_DEFAULT_TASK_SETTINGS:
_OPTIONS.update(CFG_BIBTASK_DEFAULT_TASK_SETTINGS[task_name])
# set user-defined options:
try:
(short_params, long_params) = specific_params
opts, args = getopt.gnu_getopt(argv[1:], "hVv:u:s:t:P:N:L:I:" +
short_params, [
"help",
"version",
"verbose=",
"user=",
"sleep=",
"runtime=",
"priority=",
"name=",
"limit=",
"profile=",
"post-process=",
"sequence-id=",
"stop-on-error",
"continue-on-error",
"fixed-time",
"email-logs-to="
] + long_params)
except getopt.GetoptError, err:
_usage(1, err, help_specific_usage=help_specific_usage, description=description)
try:
for opt in opts:
if opt[0] in ("-h", "--help"):
_usage(0, help_specific_usage=help_specific_usage, description=description)
elif opt[0] in ("-V", "--version"):
print _TASK_PARAMS["version"]
sys.exit(0)
elif opt[0] in ("-u", "--user"):
_TASK_PARAMS["user"] = opt[1]
elif opt[0] in ("-v", "--verbose"):
_TASK_PARAMS["verbose"] = int(opt[1])
elif opt[0] in ("-s", "--sleeptime"):
if task_name not in CFG_TASK_IS_NOT_A_DEAMON:
get_datetime(opt[1]) # see if it is a valid shift
_TASK_PARAMS["sleeptime"] = opt[1]
elif opt[0] in ("-t", "--runtime"):
_TASK_PARAMS["runtime"] = get_datetime(opt[1])
elif opt[0] in ("-P", "--priority"):
_TASK_PARAMS["priority"] = int(opt[1])
elif opt[0] in ("-N", "--name"):
_TASK_PARAMS["task_specific_name"] = opt[1]
elif opt[0] in ("-L", "--limit"):
_TASK_PARAMS["runtime_limit"] = parse_runtime_limit(opt[1])
elif opt[0] in ("--profile", ):
_TASK_PARAMS["profile"] += opt[1].split(',')
elif opt[0] in ("--post-process", ):
_TASK_PARAMS["post-process"] += [opt[1]]
elif opt[0] in ("-I","--sequence-id"):
_TASK_PARAMS["sequence-id"] = opt[1]
elif opt[0] in ("--stop-on-error", ):
_TASK_PARAMS["stop_queue_on_error"] = True
elif opt[0] in ("--continue-on-error", ):
_TASK_PARAMS["stop_queue_on_error"] = False
elif opt[0] in ("--fixed-time", ):
_TASK_PARAMS["fixed_time"] = True
elif opt[0] in ("--email-logs-to",):
_TASK_PARAMS["email_logs_to"] = opt[1].split(',')
elif not callable(task_submit_elaborate_specific_parameter_fnc) or \
not task_submit_elaborate_specific_parameter_fnc(opt[0],
opt[1], opts, args):
_usage(1, help_specific_usage=help_specific_usage, description=description)
except StandardError, e:
_usage(e, help_specific_usage=help_specific_usage, description=description)
if callable(task_submit_check_options_fnc):
if not task_submit_check_options_fnc():
_usage(1, help_specific_usage=help_specific_usage, description=description)
def task_set_option(key, value):
"""Set an value to key in the option dictionary of the task"""
global _OPTIONS
try:
_OPTIONS[key] = value
except NameError:
_OPTIONS = {key : value}
def task_get_option(key, default=None):
"""Returns the value corresponding to key in the option dictionary of the task"""
try:
return _OPTIONS.get(key, default)
except NameError:
return default
def task_has_option(key):
"""Map the has_key query to _OPTIONS"""
try:
return _OPTIONS.has_key(key)
except NameError:
return False
def task_get_task_param(key, default=None):
"""Returns the value corresponding to the particular task param"""
try:
return _TASK_PARAMS.get(key, default)
except NameError:
return default
def task_set_task_param(key, value):
"""Set the value corresponding to the particular task param"""
global _TASK_PARAMS
try:
_TASK_PARAMS[key] = value
except NameError:
_TASK_PARAMS = {key : value}
def task_update_progress(msg):
"""Updates progress information in the BibSched task table."""
write_message("Updating task progress to %s." % msg, verbose=9)
if "task_id" in _TASK_PARAMS:
return run_sql("UPDATE schTASK SET progress=%s where id=%s",
(msg, _TASK_PARAMS["task_id"]))
def task_update_status(val):
"""Updates status information in the BibSched task table."""
write_message("Updating task status to %s." % val, verbose=9)
if "task_id" in _TASK_PARAMS:
return run_sql("UPDATE schTASK SET status=%s where id=%s",
(val, _TASK_PARAMS["task_id"]))
def task_read_status():
"""Read status information in the BibSched task table."""
res = run_sql("SELECT status FROM schTASK where id=%s",
(_TASK_PARAMS['task_id'],), 1)
try:
out = res[0][0]
except:
out = 'UNKNOWN'
return out
def write_messages(msgs, stream=None, verbose=1):
"""Write many messages through write_message"""
if stream is None:
stream = sys.stdout
for msg in msgs.split('\n'):
write_message(msg, stream, verbose)
def write_message(msg, stream=None, verbose=1):
"""Write message and flush output stream (may be sys.stdout or sys.stderr).
Useful for debugging stuff.
@note: msg could be a callable with no parameters. In this case it is
been called in order to obtain the string to be printed.
"""
if stream is None:
stream = sys.stdout
if msg and _TASK_PARAMS['verbose'] >= verbose:
if callable(msg):
msg = msg()
if stream == sys.stdout:
logging.info(msg)
elif stream == sys.stderr:
logging.error(msg)
else:
sys.stderr.write("Unknown stream %s. [must be sys.stdout or sys.stderr]\n" % stream)
else:
logging.debug(msg)
_RE_SHIFT = re.compile("([-\+]{0,1})([\d]+)([dhms])")
def get_datetime(var, format_string="%Y-%m-%d %H:%M:%S", now=None):
"""Returns a date string according to the format string.
It can handle normal date strings and shifts with respect
to now."""
date = now or datetime.datetime.now()
factors = {"d": 24 * 3600, "h": 3600, "m": 60, "s": 1}
m = _RE_SHIFT.match(var)
if m:
sign = m.groups()[0] == "-" and -1 or 1
factor = factors[m.groups()[2]]
value = float(m.groups()[1])
delta = sign * factor * value
while delta > 0 and date < datetime.datetime.now():
date = date + datetime.timedelta(seconds=delta)
date = date.strftime(format_string)
else:
date = time.strptime(var, format_string)
date = time.strftime(format_string, date)
return date
def task_sleep_now_if_required(can_stop_too=False):
"""This function should be called during safe state of BibTask,
e.g. after flushing caches or outside of run_sql calls.
"""
status = task_read_status()
write_message('Entering task_sleep_now_if_required with status=%s' % status, verbose=9)
if status == 'ABOUT TO SLEEP':
write_message("sleeping...")
task_update_status("SLEEPING")
signal.signal(signal.SIGTSTP, _task_sig_dumb)
os.kill(os.getpid(), signal.SIGSTOP)
time.sleep(1)
if task_read_status() == 'NOW STOP':
if can_stop_too:
write_message("stopped")
task_update_status("STOPPED")
sys.exit(0)
else:
write_message("stopping as soon as possible...")
task_update_status('ABOUT TO STOP')
else:
write_message("... continuing...")
task_update_status("CONTINUING")
signal.signal(signal.SIGTSTP, _task_sig_sleep)
elif status == 'ABOUT TO STOP':
if can_stop_too:
write_message("stopped")
task_update_status("STOPPED")
sys.exit(0)
else:
## I am a capricious baby. At least I am going to sleep :-)
write_message("sleeping...")
task_update_status("SLEEPING")
signal.signal(signal.SIGTSTP, _task_sig_dumb)
os.kill(os.getpid(), signal.SIGSTOP)
time.sleep(1)
## Putting back the status to "ABOUT TO STOP"
write_message("... continuing...")
task_update_status("ABOUT TO STOP")
signal.signal(signal.SIGTSTP, _task_sig_sleep)
if can_stop_too:
runtime_limit = task_get_option("limit")
if runtime_limit is not None:
if not (runtime_limit[0] <= datetime.datetime.now() <= runtime_limit[1]):
write_message("stopped (outside runtime limit)")
task_update_status("STOPPED")
sys.exit(0)
def authenticate(user, authorization_action, authorization_msg=""):
"""Authenticate the user against the user database.
Check for its password, if it exists.
Check for authorization_action access rights.
Return user name upon authorization success,
do system exit upon authorization failure.
"""
# With SSO it's impossible to check for pwd
if CFG_EXTERNAL_AUTH_USING_SSO or os.path.basename(sys.argv[0]) in CFG_VALID_PROCESSES_NO_AUTH_NEEDED:
return user
if authorization_msg:
print authorization_msg
print "=" * len(authorization_msg)
if user == "":
print >> sys.stdout, "\rUsername: ",
try:
user = sys.stdin.readline().lower().strip()
except EOFError:
sys.stderr.write("\n")
sys.exit(1)
except KeyboardInterrupt:
sys.stderr.write("\n")
sys.exit(1)
else:
print >> sys.stdout, "\rUsername:", user
## first check user:
# p_un passed may be an email or a nickname:
res = run_sql("select id from user where email=%s", (user,), 1) + \
run_sql("select id from user where nickname=%s", (user,), 1)
if not res:
print "Sorry, %s does not exist." % user
sys.exit(1)
else:
uid = res[0][0]
ok = False
login_method = get_user_preferences(uid)['login_method']
if not CFG_EXTERNAL_AUTHENTICATION[login_method]:
#Local authentication, let's see if we want passwords.
res = run_sql("select id from user where id=%s "
"and password=AES_ENCRYPT(email,'')",
(uid,), 1)
if res:
ok = True
if not ok:
try:
password_entered = getpass.getpass()
except EOFError:
sys.stderr.write("\n")
sys.exit(1)
except KeyboardInterrupt:
sys.stderr.write("\n")
sys.exit(1)
if not CFG_EXTERNAL_AUTHENTICATION[login_method]:
res = run_sql("select id from user where id=%s "
"and password=AES_ENCRYPT(email, %s)",
(uid, password_entered), 1)
if res:
ok = True
else:
if CFG_EXTERNAL_AUTHENTICATION[login_method].auth_user(get_email(uid), password_entered):
ok = True
if not ok:
print "Sorry, wrong credentials for %s." % user
sys.exit(1)
else:
## secondly check authorization for the authorization_action:
(auth_code, auth_message) = acc_authorize_action(uid, authorization_action)
if auth_code != 0:
print auth_message
sys.exit(1)
return user
def _task_submit(argv, authorization_action, authorization_msg):
"""Submits task to the BibSched task queue. This is what people will
be invoking via command line."""
## check as whom we want to submit?
check_running_process_user()
## sanity check: remove eventual "task" option:
## authenticate user:
_TASK_PARAMS['user'] = authenticate(_TASK_PARAMS["user"], authorization_action, authorization_msg)
## submit task:
if _TASK_PARAMS['task_specific_name']:
task_name = '%s:%s' % (_TASK_PARAMS['task_name'], _TASK_PARAMS['task_specific_name'])
else:
task_name = _TASK_PARAMS['task_name']
write_message("storing task options %s\n" % argv, verbose=9)
verbose_argv = 'Will execute: %s' % ' '.join([escape_shell_arg(str(arg)) for arg in argv])
_TASK_PARAMS['task_id'] = run_sql("""INSERT INTO schTASK (proc,user,
runtime,sleeptime,status,progress,arguments,priority,sequenceid)
VALUES (%s,%s,%s,%s,'WAITING',%s,%s,%s,%s)""",
(task_name, _TASK_PARAMS['user'], _TASK_PARAMS["runtime"],
_TASK_PARAMS["sleeptime"], verbose_argv, marshal.dumps(argv), _TASK_PARAMS['priority'], _TASK_PARAMS['sequence-id']))
## update task number:
write_message("Task #%d submitted." % _TASK_PARAMS['task_id'])
return _TASK_PARAMS['task_id']
def _task_get_options(task_id, task_name):
"""Returns options for the task 'id' read from the BibSched task
queue table."""
out = {}
res = run_sql("SELECT arguments FROM schTASK WHERE id=%s AND proc LIKE %s",
(task_id, task_name+'%'))
try:
out = marshal.loads(res[0][0])
except:
write_message("Error: %s task %d does not seem to exist." \
% (task_name, task_id), sys.stderr)
task_update_status('ERROR')
sys.exit(1)
write_message('Options retrieved: %s' % (out, ), verbose=9)
return out
def _task_email_logs():
"""
In case this was requested, emails the logs.
"""
email_logs_to = task_get_task_param('email_logs_to')
if not email_logs_to:
return
status = task_read_status()
task_name = task_get_task_param('task_name')
task_specific_name = task_get_task_param('task_specific_name')
if task_specific_name:
task_name += ':' + task_specific_name
runtime = task_get_task_param('runtime')
title = "Execution of %s: %s" % (task_name, status)
body = """
Attached you can find the stdout and stderr logs of the execution of
name: %s
id: %s
runtime: %s
options: %s
status: %s
""" % (task_name, _TASK_PARAMS['task_id'], runtime, _OPTIONS, status)
err_file = os.path.join(CFG_LOGDIR, 'bibsched_task_%d.err' % _TASK_PARAMS['task_id'])
log_file = os.path.join(CFG_LOGDIR, 'bibsched_task_%d.log' % _TASK_PARAMS['task_id'])
return send_email(CFG_SITE_SUPPORT_EMAIL, email_logs_to, title, body, attachments=[(log_file, 'text/plain'), (err_file, 'text/plain')])
def _task_run(task_run_fnc):
"""Runs the task by fetching arguments from the BibSched task queue.
This is what BibSched will be invoking via daemon call.
The task prints Fibonacci numbers for up to NUM on the stdout, and some
messages on stderr.
@param task_run_fnc: will be called as the main core function. Must return
False in case of errors.
Return True in case of success and False in case of failure."""
from invenio.bibtasklet import _TASKLETS
## We prepare the pid file inside /prefix/var/run/taskname_id.pid
check_running_process_user()
try:
pidfile_name = os.path.join(CFG_PREFIX, 'var', 'run',
'bibsched_task_%d.pid' % _TASK_PARAMS['task_id'])
pidfile = open(pidfile_name, 'w')
pidfile.write(str(os.getpid()))
pidfile.close()
except OSError:
register_exception(alert_admin=True)
task_update_status("ERROR")
return False
## check task status:
task_status = task_read_status()
if task_status not in ("WAITING", "SCHEDULED"):
write_message("Error: The task #%d is %s. I expected WAITING or SCHEDULED." %
(_TASK_PARAMS['task_id'], task_status), sys.stderr)
return False
time_now = datetime.datetime.now()
if _TASK_PARAMS['runtime_limit'] is not None and os.environ.get('BIBSCHED_MODE', 'manual') != 'manual':
if not _TASK_PARAMS['runtime_limit'][0][0] <= time_now <= _TASK_PARAMS['runtime_limit'][0][1]:
if time_now <= _TASK_PARAMS['runtime_limit'][0][0]:
new_runtime = _TASK_PARAMS['runtime_limit'][0][0].strftime("%Y-%m-%d %H:%M:%S")
else:
new_runtime = _TASK_PARAMS['runtime_limit'][1][0].strftime("%Y-%m-%d %H:%M:%S")
progress = run_sql("SELECT progress FROM schTASK WHERE id=%s", (_TASK_PARAMS['task_id'], ))
if progress:
progress = progress[0][0]
else:
progress = ''
g = re.match(r'Postponed (\d+) time\(s\)', progress)
if g:
postponed_times = int(g.group(1))
else:
postponed_times = 0
if _TASK_PARAMS['sequence-id']:
## Also postponing other dependent tasks.
run_sql("UPDATE schTASK SET runtime=%s, progress=%s WHERE sequenceid=%s AND status='WAITING'", (new_runtime, 'Postponed as task %s' % _TASK_PARAMS['task_id'], _TASK_PARAMS['sequence-id'])) # kwalitee: disable=sql
run_sql("UPDATE schTASK SET runtime=%s, status='WAITING', progress=%s, host='' WHERE id=%s", (new_runtime, 'Postponed %d time(s)' % (postponed_times + 1), _TASK_PARAMS['task_id'])) # kwalitee: disable=sql
write_message("Task #%d postponed because outside of runtime limit" % _TASK_PARAMS['task_id'])
return True
# Make sure the host field is updated
# It will not be updated properly when we run
# a task from the cli (without using the bibsched monitor)
host = bibsched_get_host(_TASK_PARAMS['task_id'])
if host and host != gethostname():
write_message("Error: The task #%d is bound to %s." %
(_TASK_PARAMS['task_id'], host), sys.stderr)
return False
else:
bibsched_set_host(_TASK_PARAMS['task_id'], gethostname())
## initialize signal handler:
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGTSTP, _task_sig_sleep)
signal.signal(signal.SIGTERM, _task_sig_stop)
signal.signal(signal.SIGQUIT, _task_sig_stop)
signal.signal(signal.SIGABRT, _task_sig_suicide)
signal.signal(signal.SIGINT, _task_sig_stop)
## we can run the task now:
write_message("Task #%d started." % _TASK_PARAMS['task_id'])
task_update_status("RUNNING")
## run the task:
_TASK_PARAMS['task_starting_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sleeptime = _TASK_PARAMS['sleeptime']
try:
try:
if callable(task_run_fnc) and task_run_fnc():
task_update_status("DONE")
else:
task_update_status("DONE WITH ERRORS")
except SystemExit:
pass
except:
write_message(traceback.format_exc()[:-1])
register_exception(alert_admin=True)
if task_get_task_param('stop_queue_on_error'):
task_update_status("ERROR")
else:
task_update_status("CERROR")
finally:
task_status = task_read_status()
if sleeptime:
argv = _task_get_options(_TASK_PARAMS['task_id'], _TASK_PARAMS['task_name'])
verbose_argv = 'Will execute: %s' % ' '.join([escape_shell_arg(str(arg)) for arg in argv])
# Here we check if the task can shift away of has to be run at
# a fixed time
if task_get_task_param('fixed_time') or _TASK_PARAMS['task_name'] in CFG_BIBTASK_FIXEDTIMETASKS:
old_runtime = run_sql("SELECT runtime FROM schTASK WHERE id=%s", (_TASK_PARAMS['task_id'], ))[0][0]
else:
old_runtime = None
new_runtime = get_datetime(sleeptime, now=old_runtime)
## The task is a daemon. We resubmit it
if task_status == 'DONE':
## It has finished in a good way. We recycle the database row
run_sql("UPDATE schTASK SET runtime=%s, status='WAITING', progress=%s, host='' WHERE id=%s", (new_runtime, verbose_argv, _TASK_PARAMS['task_id']))
write_message("Task #%d finished and resubmitted." % _TASK_PARAMS['task_id'])
elif task_status == 'STOPPED':
run_sql("UPDATE schTASK SET status='WAITING', progress=%s, host='' WHERE id=%s", (verbose_argv, _TASK_PARAMS['task_id'], ))
write_message("Task #%d stopped and resubmitted." % _TASK_PARAMS['task_id'])
else:
## We keep the bad result and we resubmit with another id.
#res = run_sql('SELECT proc,user,sleeptime,arguments,priority FROM schTASK WHERE id=%s', (_TASK_PARAMS['task_id'], ))
#proc, user, sleeptime, arguments, priority = res[0]
#run_sql("""INSERT INTO schTASK (proc,user,
#runtime,sleeptime,status,arguments,priority)
#VALUES (%s,%s,%s,%s,'WAITING',%s, %s)""",
#(proc, user, new_runtime, sleeptime, arguments, priority))
write_message("Task #%d finished but not resubmitted. [%s]" % (_TASK_PARAMS['task_id'], task_status))
else:
## we are done:
write_message("Task #%d finished. [%s]" % (_TASK_PARAMS['task_id'], task_status))
## Removing the pid
os.remove(pidfile_name)
#Lets call the post-process tasklets
if task_get_task_param("post-process"):
split = re.compile(r"(bst_.*)\[(.*)\]")
for tasklet in task_get_task_param("post-process"):
if not split.match(tasklet): # wrong syntax
_usage(1, "There is an error in the post processing option "
"for this task.")
aux_tasklet = split.match(tasklet)
_TASKLETS[aux_tasklet.group(1)](**eval("dict(%s)" % (aux_tasklet.group(2))))
return True
def _usage(exitcode=1, msg="", help_specific_usage="", description=""):
"""Prints usage info."""
if msg:
sys.stderr.write("Error: %s.\n" % msg)
sys.stderr.write("Usage: %s [options]\n" % sys.argv[0])
if help_specific_usage:
sys.stderr.write("Command options:\n")
sys.stderr.write(help_specific_usage)
sys.stderr.write(" Scheduling options:\n")
sys.stderr.write(" -u, --user=USER\tUser name under which to submit this"
" task.\n")
sys.stderr.write(" -t, --runtime=TIME\tTime to execute the task. [default=now]\n"
"\t\t\tExamples: +15s, 5m, 3h, 2002-10-27 13:57:26.\n")
sys.stderr.write(" -s, --sleeptime=SLEEP\tSleeping frequency after"
" which to repeat the task.\n"
"\t\t\tExamples: 30m, 2h, 1d. [default=no]\n")
sys.stderr.write(" --fixed-time\t\tAvoid drifting of execution time when using --sleeptime\n")
sys.stderr.write(" -I, --sequence-id=SEQUENCE-ID\tSequence Id of the current process\n")
sys.stderr.write(" -L --limit=LIMIT\tTime limit when it is"
" allowed to execute the task.\n"
"\t\t\tExamples: 22:00-03:00, Sunday 01:00-05:00.\n"
"\t\t\tSyntax: [Wee[kday]] [hh[:mm][-hh[:mm]]].\n")
sys.stderr.write(" -P, --priority=PRI\tTask priority (0=default, 1=higher, etc).\n")
sys.stderr.write(" -N, --name=NAME\tTask specific name (advanced option).\n\n")
sys.stderr.write(" General options:\n")
sys.stderr.write(" -h, --help\t\tPrint this help.\n")
sys.stderr.write(" -V, --version\t\tPrint version information.\n")
sys.stderr.write(" -v, --verbose=LEVEL\tVerbose level (0=min,"
" 1=default, 9=max).\n")
sys.stderr.write(" --profile=STATS\tPrint profile information. STATS is a comma-separated\n\t\t\tlist of desired output stats (calls, cumulative,\n\t\t\tfile, line, module, name, nfl, pcalls, stdname, time).\n")
sys.stderr.write(" --stop-on-error\tIn case of unrecoverable error stop the bibsched queue.\n")
sys.stderr.write(" --continue-on-error\tIn case of unrecoverable error don't stop the bibsched queue.\n")
sys.stderr.write(" --post-process=BIB_TASKLET_NAME[parameters]\tPostprocesses the specified\n\t\t\tbibtasklet with the given parameters between square\n\t\t\tbrackets.\n")
sys.stderr.write("\t\t\tExample:--post-process \"bst_send_email[fromaddr=\n\t\t\t'[email protected]', toaddr='[email protected]', subject='hello',\n\t\t\tcontent='help']\"\n")
sys.stderr.write(" --email-logs-to=EMAILS Sends an email with the results of the execution\n\t\t\tof the task, and attached the logs (EMAILS could be a comma-\n\t\t\tseparated lists of email addresses)\n")
if description:
sys.stderr.write(description)
sys.exit(exitcode)
def _task_sig_sleep(sig, frame):
"""Signal handler for the 'sleep' signal sent by BibSched."""
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
write_message("task_sig_sleep(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("sleeping as soon as possible...")
_db_login(relogin=1)
task_update_status("ABOUT TO SLEEP")
def _task_sig_stop(sig, frame):
"""Signal handler for the 'stop' signal sent by BibSched."""
write_message("task_sig_stop(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("stopping as soon as possible...")
_db_login(relogin=1) # To avoid concurrency with an interrupted run_sql call
task_update_status("ABOUT TO STOP")
def _task_sig_suicide(sig, frame):
"""Signal handler for the 'suicide' signal sent by BibSched."""
write_message("task_sig_suicide(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("suiciding myself now...")
task_update_status("SUICIDING")
write_message("suicided")
_db_login(relogin=1)
task_update_status("SUICIDED")
sys.exit(1)
def _task_sig_dumb(sig, frame):
"""Dumb signal handler."""
pass
_RE_PSLINE = re.compile('^\s*(\w+)\s+(\w+)')
def guess_apache_process_user_from_ps():
"""Guess Apache process user by parsing the list of running processes."""
apache_users = []
try:
# Tested on Linux, Sun and MacOS X
for line in os.popen('ps -A -o user,comm').readlines():
g = _RE_PSLINE.match(line)
if g:
username = g.group(1)
process = os.path.basename(g.group(2))
if process in ('apache', 'apache2', 'httpd') :
if username not in apache_users and username != 'root':
apache_users.append(username)
except Exception, e:
print >> sys.stderr, "WARNING: %s" % e
return tuple(apache_users)
def guess_apache_process_user():
"""
Return the possible name of the user running the Apache server process.
(Look at running OS processes or look at OS users defined in /etc/passwd.)
"""
apache_users = guess_apache_process_user_from_ps() + ('apache2', 'apache', 'www-data')
for username in apache_users:
try:
userline = pwd.getpwnam(username)
return userline[0]
except KeyError:
pass
print >> sys.stderr, "ERROR: Cannot detect Apache server process user. Please set the correct value in CFG_BIBSCHED_PROCESS_USER."
sys.exit(1)
def check_running_process_user():
"""
Check that the user running this program is the same as the user
configured in CFG_BIBSCHED_PROCESS_USER or as the user running the
Apache webserver process.
"""
running_as_user = pwd.getpwuid(os.getuid())[0]
if CFG_BIBSCHED_PROCESS_USER:
# We have the expected bibsched process user defined in config,
# so check against her, not against Apache.
if running_as_user != CFG_BIBSCHED_PROCESS_USER:
print >> sys.stderr, """ERROR: You must run "%(x_proc)s" as the user set up in your
CFG_BIBSCHED_PROCESS_USER (seems to be "%(x_user)s").
You may want to do "sudo -u %(x_user)s %(x_proc)s ..." to do so.
If you think this is not right, please set CFG_BIBSCHED_PROCESS_USER
appropriately and rerun "inveniocfg --update-config-py".""" % \
{'x_proc': os.path.basename(sys.argv[0]), 'x_user': CFG_BIBSCHED_PROCESS_USER}
sys.exit(1)
elif running_as_user != guess_apache_process_user(): # not defined in config, check against Apache
print >> sys.stderr, """ERROR: You must run "%(x_proc)s" as the same user that runs your Apache server
process (seems to be "%(x_user)s").
You may want to do "sudo -u %(x_user)s %(x_proc)s ..." to do so.
If you think this is not right, please set CFG_BIBSCHED_PROCESS_USER
appropriately and rerun "inveniocfg --update-config-py".""" % \
{'x_proc': os.path.basename(sys.argv[0]), 'x_user': guess_apache_process_user()}
sys.exit(1)
return
| gpl-2.0 | 5,392,646,837,147,061,000 | 41.622673 | 228 | 0.583426 | false |
alanmcruickshank/superset-dev | superset/connectors/druid/models.py | 1 | 45334 | # pylint: disable=invalid-unary-operand-type
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
import json
import logging
from multiprocessing import Pool
from dateutil.parser import parse as dparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.aggregators import count
from pydruid.utils.filters import Bound, Dimension, Filter
from pydruid.utils.having import Aggregation
from pydruid.utils.postaggregator import (
Const, Field, HyperUniqueCardinality, Postaggregator, Quantile, Quantiles,
)
import requests
from six import string_types
import sqlalchemy as sa
from sqlalchemy import (
Boolean, Column, DateTime, ForeignKey, Integer, or_, String, Text,
)
from sqlalchemy.orm import backref, relationship
from superset import conf, db, import_util, sm, utils
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.models.helpers import AuditMixinNullable, QueryResult, set_perm
from superset.utils import (
DimSelector, DTTM_ALIAS, flasher, MetricPermException,
)
DRUID_TZ = conf.get('DRUID_TZ')
# Function wrapper because bound methods cannot
# be passed to processes
def _fetch_metadata_for(datasource):
return datasource.latest_metadata()
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class CustomPostAggregator(Postaggregator):
"""A way to allow users to specify completely custom PostAggregators"""
def __init__(self, name, post_aggregator):
self.name = name
self.post_aggregator = post_aggregator
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
type = 'druid'
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer, default=8081)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer, default=8082)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
cache_timeout = Column(Integer)
def __repr__(self):
return self.verbose_name if self.verbose_name else self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
'http://{0}:{1}/'.format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
'http://{obj.coordinator_host}:{obj.coordinator_port}/'
'{obj.coordinator_endpoint}/datasources'
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
'http://{obj.coordinator_host}:{obj.coordinator_port}/status'
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
def refresh_datasources(
self,
datasource_name=None,
merge_flag=True,
refreshAll=True):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
self.druid_version = self.get_druid_version()
ds_list = self.get_datasources()
blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])
ds_refresh = []
if not datasource_name:
ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))
elif datasource_name not in blacklist and datasource_name in ds_list:
ds_refresh.append(datasource_name)
else:
return
self.refresh_async(ds_refresh, merge_flag, refreshAll)
def refresh_async(self, datasource_names, merge_flag, refreshAll):
"""
Fetches metadata for the specified datasources andm
merges to the Superset database
"""
session = db.session
ds_list = (
session.query(DruidDatasource)
.filter(or_(DruidDatasource.datasource_name == name
for name in datasource_names))
)
ds_map = {ds.name: ds for ds in ds_list}
for ds_name in datasource_names:
datasource = ds_map.get(ds_name, None)
if not datasource:
datasource = DruidDatasource(datasource_name=ds_name)
with session.no_autoflush:
session.add(datasource)
flasher(
'Adding new datasource [{}]'.format(ds_name), 'success')
ds_map[ds_name] = datasource
elif refreshAll:
flasher(
'Refreshing datasource [{}]'.format(ds_name), 'info')
else:
del ds_map[ds_name]
continue
datasource.cluster = self
datasource.merge_flag = merge_flag
session.flush()
# Prepare multithreaded executation
pool = Pool()
ds_refresh = list(ds_map.values())
metadata = pool.map(_fetch_metadata_for, ds_refresh)
pool.close()
pool.join()
for i in range(0, len(ds_refresh)):
datasource = ds_refresh[i]
cols = metadata[i]
col_objs_list = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_name == datasource.datasource_name)
.filter(or_(DruidColumn.column_name == col for col in cols))
)
col_objs = {col.column_name: col for col in col_objs_list}
for col in cols:
if col == '__time': # skip the time column
continue
col_obj = col_objs.get(col, None)
if not col_obj:
col_obj = DruidColumn(
datasource_name=datasource.datasource_name,
column_name=col)
with session.no_autoflush:
session.add(col_obj)
datatype = cols[col]['type']
if datatype == 'STRING':
col_obj.groupby = True
col_obj.filterable = True
if datatype == 'hyperUnique' or datatype == 'thetaSketch':
col_obj.count_distinct = True
# Allow sum/min/max for long or double
if datatype == 'LONG' or datatype == 'DOUBLE':
col_obj.sum = True
col_obj.min = True
col_obj.max = True
col_obj.type = datatype
col_obj.datasource = datasource
datasource.generate_metrics_for(col_objs_list)
session.commit()
@property
def perm(self):
return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)
def get_perm(self):
return self.perm
@property
def name(self):
return self.verbose_name if self.verbose_name else self.cluster_name
@property
def unique_name(self):
return self.verbose_name if self.verbose_name else self.cluster_name
class DruidColumn(Model, BaseColumn):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('columns', cascade='all, delete-orphan'),
enable_typechecks=False)
dimension_spec_json = Column(Text)
export_fields = (
'datasource_name', 'column_name', 'is_active', 'type', 'groupby',
'count_distinct', 'sum', 'avg', 'max', 'min', 'filterable',
'description', 'dimension_spec_json',
)
def __repr__(self):
return self.column_name
@property
def expression(self):
return self.dimension_spec_json
@property
def dimension_spec(self):
if self.dimension_spec_json:
return json.loads(self.dimension_spec_json)
def get_metrics(self):
metrics = {}
metrics['count'] = DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'}),
)
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.is_num:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.avg and self.is_num:
mt = corrected_type.lower() + 'Avg'
name = 'avg__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='avg',
verbose_name='AVG({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.min and self.is_num:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.max and self.is_num:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.count_distinct:
name = 'count_distinct__' + self.column_name
if self.type == 'hyperUnique' or self.type == 'thetaSketch':
metrics[name] = DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type=self.type,
json=json.dumps({
'type': self.type,
'name': name,
'fieldName': self.column_name,
}),
)
else:
metrics[name] = DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]}),
)
return metrics
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
metrics = self.get_metrics()
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.filter(DruidMetric.datasource_name == self.datasource_name)
.filter(or_(
DruidMetric.metric_name == m for m in metrics
))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
metric.datasource_name = self.datasource_name
if not dbmetrics.get(metric.metric_name, None):
db.session.add(metric)
@classmethod
def import_obj(cls, i_column):
def lookup_obj(lookup_column):
return db.session.query(DruidColumn).filter(
DruidColumn.datasource_name == lookup_column.datasource_name,
DruidColumn.column_name == lookup_column.column_name).first()
return import_util.import_simple_obj(db.session, i_column, lookup_obj)
class DruidMetric(Model, BaseMetric):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('metrics', cascade='all, delete-orphan'),
enable_typechecks=False)
json = Column(Text)
export_fields = (
'metric_name', 'verbose_name', 'metric_type', 'datasource_name',
'json', 'description', 'is_restricted', 'd3format',
)
@property
def expression(self):
return self.json
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
'{parent_name}.[{obj.metric_name}](id:{obj.id})'
).format(obj=self,
parent_name=self.datasource.full_name,
) if self.datasource else None
@classmethod
def import_obj(cls, i_metric):
def lookup_obj(lookup_metric):
return db.session.query(DruidMetric).filter(
DruidMetric.datasource_name == lookup_metric.datasource_name,
DruidMetric.metric_name == lookup_metric.metric_name).first()
return import_util.import_simple_obj(db.session, i_metric, lookup_obj)
class DruidDatasource(Model, BaseDatasource):
"""ORM object referencing Druid datasources (tables)"""
__tablename__ = 'datasources'
type = 'druid'
query_langtage = 'json'
cluster_class = DruidCluster
metric_class = DruidMetric
column_class = DruidColumn
baselink = 'druiddatasourcemodelview'
# Columns
datasource_name = Column(String(255), unique=True)
is_hidden = Column(Boolean, default=False)
fetch_values_from = Column(String(100))
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship(
sm.user_model,
backref=backref('datasources', cascade='all, delete-orphan'),
foreign_keys=[user_id])
export_fields = (
'datasource_name', 'is_hidden', 'description', 'default_endpoint',
'cluster_name', 'offset', 'cache_timeout', 'params',
)
@property
def database(self):
return self.cluster
@property
def connection(self):
return str(self.database)
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.is_num]
@property
def name(self):
return self.datasource_name
@property
def schema(self):
ds_name = self.datasource_name or ''
name_pieces = ds_name.split('.')
if len(name_pieces) > 1:
return name_pieces[0]
else:
return None
@property
def schema_perm(self):
"""Returns schema permission if present, cluster one otherwise."""
return utils.get_schema_perm(self.cluster, self.schema)
def get_perm(self):
return (
'[{obj.cluster_name}].[{obj.datasource_name}]'
'(id:{obj.id})').format(obj=self)
@property
def link(self):
name = escape(self.datasource_name)
return Markup('<a href="{self.url}">{name}</a>').format(**locals())
@property
def full_name(self):
return utils.get_datasource_full_name(
self.cluster_name, self.datasource_name)
@property
def time_column_grains(self):
return {
'time_columns': [
'all', '5 seconds', '30 seconds', '1 minute',
'5 minutes', '1 hour', '6 hour', '1 day', '7 days',
'week', 'week_starting_sunday', 'week_ending_saturday',
'month',
],
'time_grains': ['now'],
}
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
name = escape(self.datasource_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@classmethod
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_datasource(d):
return db.session.query(DruidDatasource).join(DruidCluster).filter(
DruidDatasource.datasource_name == d.datasource_name,
DruidCluster.cluster_name == d.cluster_name,
).first()
def lookup_cluster(d):
return db.session.query(DruidCluster).filter_by(
cluster_name=d.cluster_name).one()
return import_util.import_datasource(
db.session, i_datasource, lookup_cluster, lookup_datasource,
import_time)
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> DruidDatasource.version_higher('0.8.2', '0.9.1')
False
>>> DruidDatasource.version_higher('0.8.2', '0.6.1')
True
>>> DruidDatasource.version_higher('0.8.2', '0.8.2')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
logging.info('Syncing datasource [{}]'.format(self.datasource_name))
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = dparse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which triggered a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
lbound = (max_time - timedelta(days=7)).isoformat()
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = (max_time - timedelta(1)).isoformat()
else:
rbound = max_time.isoformat()
segment_metadata = None
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound,
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
logging.warning('Failed first attempt to get latest segment')
logging.exception(e)
if not segment_metadata:
# if no segments in the past 7 days, look at all segments
lbound = datetime(1901, 1, 1).isoformat()[:10]
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = datetime.now().isoformat()
else:
rbound = datetime(2050, 1, 1).isoformat()[:10]
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound,
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
logging.warning('Failed 2nd attempt to get latest segment')
logging.exception(e)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
self.generate_metrics_for(self.columns)
def generate_metrics_for(self, columns):
metrics = {}
for col in columns:
metrics.update(col.get_metrics())
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidCluster.cluster_name == self.cluster_name)
.filter(DruidMetric.datasource_name == self.datasource_name)
.filter(or_(DruidMetric.metric_name == m for m in metrics))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
metric.datasource_name = self.datasource_name
if not dbmetrics.get(metric.metric_name, None):
with db.session.no_autoflush:
db.session.add(metric)
@classmethod
def sync_to_db_from_config(
cls,
druid_config,
user,
cluster,
refresh=True):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls)
.filter_by(datasource_name=druid_config['name'])
.first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config['name'],
cluster=cluster,
owner=user,
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config['dimensions']
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_name == druid_config['name'])
.filter(or_(DruidColumn.column_name == dim for dim in dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_name=druid_config['name'],
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type='STRING',
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_name == druid_config['name'])
.filter(or_(DruidMetric.metric_name == spec['name']
for spec in druid_config['metrics_spec']))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config['metrics_spec']:
metric_name = metric_spec['name']
metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
if metric_type == 'count':
metric_type = 'longSum'
metric_json = json.dumps({
'type': 'longSum',
'name': metric_name,
'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
@staticmethod
def time_offset(granularity):
if granularity == 'week_ending_saturday':
return 6 * 24 * 3600 * 1000 # 6 days
return 0
# uses https://en.wikipedia.org/wiki/ISO_8601
# http://druid.io/docs/0.8.0/querying/granularities.html
# TODO: pass origin from the UI
@staticmethod
def granularity(period_name, timezone=None, origin=None):
if not period_name or period_name == 'all':
return 'all'
iso_8601_dict = {
'5 seconds': 'PT5S',
'30 seconds': 'PT30S',
'1 minute': 'PT1M',
'5 minutes': 'PT5M',
'1 hour': 'PT1H',
'6 hour': 'PT6H',
'one day': 'P1D',
'1 day': 'P1D',
'7 days': 'P7D',
'week': 'P1W',
'week_starting_sunday': 'P1W',
'week_ending_saturday': 'P1W',
'month': 'P1M',
}
granularity = {'type': 'period'}
if timezone:
granularity['timeZone'] = timezone
if origin:
dttm = utils.parse_human_datetime(origin)
granularity['origin'] = dttm.isoformat()
if period_name in iso_8601_dict:
granularity['period'] = iso_8601_dict[period_name]
if period_name in ('week_ending_saturday', 'week_starting_sunday'):
# use Sunday as start of the week
granularity['origin'] = '2016-01-03T00:00:00'
elif not isinstance(period_name, string_types):
granularity['type'] = 'duration'
granularity['duration'] = period_name
elif period_name.startswith('P'):
# identify if the string is the iso_8601 period
granularity['period'] = period_name
else:
granularity['type'] = 'duration'
granularity['duration'] = utils.parse_human_timedelta(
period_name).total_seconds() * 1000
return granularity
@staticmethod
def _metrics_and_post_aggs(metrics, metrics_dict):
all_metrics = []
post_aggs = {}
def recursive_get_fields(_conf):
_type = _conf.get('type')
_field = _conf.get('field')
_fields = _conf.get('fields')
field_names = []
if _type in ['fieldAccess', 'hyperUniqueCardinality',
'quantile', 'quantiles']:
field_names.append(_conf.get('fieldName', ''))
if _field:
field_names += recursive_get_fields(_field)
if _fields:
for _f in _fields:
field_names += recursive_get_fields(_f)
return list(set(field_names))
for metric_name in metrics:
metric = metrics_dict[metric_name]
if metric.metric_type != 'postagg':
all_metrics.append(metric_name)
else:
mconf = metric.json_obj
all_metrics += recursive_get_fields(mconf)
all_metrics += mconf.get('fieldNames', [])
if mconf.get('type') == 'javascript':
post_aggs[metric_name] = JavascriptPostAggregator(
name=mconf.get('name', ''),
field_names=mconf.get('fieldNames', []),
function=mconf.get('function', ''))
elif mconf.get('type') == 'quantile':
post_aggs[metric_name] = Quantile(
mconf.get('name', ''),
mconf.get('probability', ''),
)
elif mconf.get('type') == 'quantiles':
post_aggs[metric_name] = Quantiles(
mconf.get('name', ''),
mconf.get('probabilities', ''),
)
elif mconf.get('type') == 'fieldAccess':
post_aggs[metric_name] = Field(mconf.get('name'))
elif mconf.get('type') == 'constant':
post_aggs[metric_name] = Const(
mconf.get('value'),
output_name=mconf.get('name', ''),
)
elif mconf.get('type') == 'hyperUniqueCardinality':
post_aggs[metric_name] = HyperUniqueCardinality(
mconf.get('name'),
)
elif mconf.get('type') == 'arithmetic':
post_aggs[metric_name] = Postaggregator(
mconf.get('fn', '/'),
mconf.get('fields', []),
mconf.get('name', ''))
else:
post_aggs[metric_name] = CustomPostAggregator(
mconf.get('name', ''),
mconf)
return all_metrics, post_aggs
def values_for_column(self,
column_name,
limit=10000):
"""Retrieve some values for the given column"""
# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
if self.fetch_values_from:
from_dttm = utils.parse_human_datetime(self.fetch_values_from)
else:
from_dttm = datetime(1970, 1, 1)
qry = dict(
datasource=self.datasource_name,
granularity='all',
intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
aggregations=dict(count=count('count')),
dimension=column_name,
metric='count',
threshold=limit,
)
client = self.cluster.get_pydruid_client()
client.topn(**qry)
df = client.export_pandas()
return [row[column_name] for row in df.to_records(index=False)]
def get_query_str(self, query_obj, phase=1, client=None):
return self.run_query(client=client, phase=phase, **query_obj)
def _add_filter_from_pre_query_data(self, df, dimensions, dim_filter):
ret = dim_filter
if df is not None and not df.empty:
new_filters = []
for unused, row in df.iterrows():
fields = []
for dim in dimensions:
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
term = Filter(type='and', fields=fields)
new_filters.append(term)
elif fields:
new_filters.append(fields[0])
if new_filters:
ff = Filter(type='or', fields=new_filters)
if not dim_filter:
ret = ff
else:
ret = Filter(type='and', fields=[ff, dim_filter])
return ret
def run_query( # noqa / druid
self,
groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, phase=2, client=None, form_data=None,
order_desc=True):
"""Runs a query against Druid and returns a dataframe.
"""
# TODO refactor into using a TBD Query object
client = client or self.cluster.get_pydruid_client()
if not is_timeseries:
granularity = 'all'
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=DRUID_TZ)
to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)
timezone = from_dttm.tzname()
query_str = ''
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
all_metrics, post_aggs = self._metrics_and_post_aggs(
metrics,
metrics_dict)
aggregations = OrderedDict()
for m in self.metrics:
if m.metric_name in all_metrics:
aggregations[m.metric_name] = m.json_obj
rejected_metrics = [
m.metric_name for m in self.metrics
if m.is_restricted and
m.metric_name in aggregations.keys() and
not sm.has_access('metric_access', m.perm)
]
if rejected_metrics:
raise MetricPermException(
'Access to the metrics denied: ' + ', '.join(rejected_metrics),
)
# the dimensions list with dimensionSpecs expanded
dimensions = []
groupby = [gb for gb in groupby if gb in columns_dict]
for column_name in groupby:
col = columns_dict.get(column_name)
dim_spec = col.dimension_spec
if dim_spec:
dimensions.append(dim_spec)
else:
dimensions.append(column_name)
qry = dict(
datasource=self.datasource_name,
dimensions=dimensions,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity,
timezone=timezone,
origin=extras.get('druid_time_origin'),
),
post_aggregations=post_aggs,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = DruidDatasource.get_filters(filter, self.num_cols)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
order_direction = 'descending' if order_desc else 'ascending'
if len(groupby) == 0 and not having_filters:
del qry['dimensions']
client.timeseries(**qry)
if (
not having_filters and
len(groupby) == 1 and
order_desc and
not isinstance(list(qry.get('dimensions'))[0], dict)
):
dim = list(qry.get('dimensions'))[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
else:
order_by = list(qry['aggregations'].keys())[0]
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = 'all'
pre_qry['threshold'] = min(row_limit,
timeseries_limit or row_limit)
pre_qry['metric'] = order_by
pre_qry['dimension'] = dim
del pre_qry['dimensions']
client.topn(**pre_qry)
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'], filters)
qry['threshold'] = timeseries_limit or 1000
if row_limit and granularity == 'all':
qry['threshold'] = row_limit
qry['dimension'] = list(qry.get('dimensions'))[0]
qry['dimension'] = dim
del qry['dimensions']
qry['metric'] = list(qry['aggregations'].keys())[0]
client.topn(**qry)
elif len(groupby) > 1 or having_filters or not order_desc:
# If grouping on multiple fields or using a having filter
# we have to force a groupby query
if timeseries_limit and is_timeseries:
order_by = metrics[0] if metrics else self.metrics[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = 'all'
pre_qry['limit_spec'] = {
'type': 'default',
'limit': min(timeseries_limit, row_limit),
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
'columns': [{
'dimension': order_by,
'direction': order_direction,
}],
}
client.groupby(**pre_qry)
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'],
filters,
)
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
'type': 'default',
'limit': row_limit,
'columns': [{
'dimension': (
metrics[0] if metrics else self.metrics[0]),
'direction': order_direction,
}],
}
client.groupby(**qry)
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
return query_str
def query(self, query_obj):
qry_start_dttm = datetime.now()
client = self.cluster.get_pydruid_client()
query_str = self.get_query_str(
client=client, query_obj=query_obj, phase=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception(_('No data was returned.'))
df.columns = [
DTTM_ALIAS if c == 'timestamp' else c for c in df.columns]
is_timeseries = query_obj['is_timeseries'] \
if 'is_timeseries' in query_obj else True
if (
not is_timeseries and
DTTM_ALIAS in df.columns):
del df[DTTM_ALIAS]
# Reordering columns
cols = []
if DTTM_ALIAS in df.columns:
cols += [DTTM_ALIAS]
cols += [col for col in query_obj['groupby'] if col in df.columns]
cols += [col for col in query_obj['metrics'] if col in df.columns]
df = df[cols]
time_offset = DruidDatasource.time_offset(query_obj['granularity'])
def increment_timestamp(ts):
dt = utils.parse_human_datetime(ts).replace(
tzinfo=DRUID_TZ)
return dt + timedelta(milliseconds=time_offset)
if DTTM_ALIAS in df.columns and time_offset:
df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(increment_timestamp)
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
@staticmethod
def get_filters(raw_filters, num_cols): # noqa
filters = None
for flt in raw_filters:
if not all(f in flt for f in ['col', 'op', 'val']):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
cond = None
if op in ('in', 'not in'):
eq = [
types.replace('"', '').strip()
if isinstance(types, string_types)
else types
for types in eq]
elif not isinstance(flt['val'], string_types):
eq = eq[0] if eq and len(eq) > 0 else ''
is_numeric_col = col in num_cols
if is_numeric_col:
if op in ('in', 'not in'):
eq = [utils.string_to_num(v) for v in eq]
else:
eq = utils.string_to_num(eq)
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = Dimension(col) != eq
elif op in ('in', 'not in'):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type='or', fields=fields)
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(type='regex', pattern=eq, dimension=col)
elif op == '>=':
cond = Bound(col, eq, None, alphaNumeric=is_numeric_col)
elif op == '<=':
cond = Bound(col, None, eq, alphaNumeric=is_numeric_col)
elif op == '>':
cond = Bound(
col, eq, None,
lowerStrict=True, alphaNumeric=is_numeric_col,
)
elif op == '<':
cond = Bound(
col, None, eq,
upperStrict=True, alphaNumeric=is_numeric_col,
)
if filters:
filters = Filter(type='and', fields=[
cond,
filters,
])
else:
filters = cond
return filters
def _get_having_obj(self, col, op, eq):
cond = None
if op == '==':
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == '>':
cond = Aggregation(col) > eq
elif op == '<':
cond = Aggregation(col) < eq
return cond
def get_having_filters(self, raw_filters):
filters = None
reversed_op_map = {
'!=': '==',
'>=': '<',
'<=': '>',
}
for flt in raw_filters:
if not all(f in flt for f in ['col', 'op', 'val']):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
cond = None
if op in ['==', '>', '<']:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
@classmethod
def query_datasources_by_name(
cls, session, database, datasource_name, schema=None):
return (
session.query(cls)
.filter_by(cluster_name=database.id)
.filter_by(datasource_name=datasource_name)
.all()
)
sa.event.listen(DruidDatasource, 'after_insert', set_perm)
sa.event.listen(DruidDatasource, 'after_update', set_perm)
| apache-2.0 | -5,881,308,706,200,281,000 | 35.677994 | 89 | 0.527088 | false |
shsdev/khclass | khclarifai/khclarifai_predict.py | 1 | 1302 | #!/usr/bin/env python
# coding=UTF-8
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from math import floor
from clarifai.rest import ClarifaiApp
from config.configuration import data_path, test_set_id, clarifai_api_key, clarifai_model_name
def floored_percentage(val, digits):
"""Format float value as percentage string"""
val *= 10 ** (digits + 2)
return '{1:.{0}f}%'.format(digits, floor(val) / 10 ** digits)
def get_prediction_confidence(model, image_path):
"""Get the first value's float prediction value"""
print "Processing prediction for image: %s" % image_path
full_image_path = "%s/%s" % (data_path, image_path)
prediction_confidence = 0.0
result = model.predict_by_filename(full_image_path)
for o in result['outputs']:
concept_results = o['data']['concepts']
for concept_result in concept_results:
print concept_result['value']
prediction_confidence = float(concept_result['value'])
break
return prediction_confidence
if __name__ == '__main__':
app = ClarifaiApp(api_key=clarifai_api_key)
mdl = app.models.get(clarifai_model_name)
print floored_percentage(get_prediction_confidence(mdl, "images/test%s/%s" % (test_set_id, "EMK_1303.jpg")), 2)
| gpl-3.0 | -7,671,868,139,313,991,000 | 34.189189 | 115 | 0.667435 | false |
h2g2bob/ynmp-wikipedia-sync | chgparty_dot.py | 1 | 5461 | import csv
from collections import defaultdict
from collections import namedtuple
class Pty(object):
def __init__(self, ynmp, name, rank=3, color="white"):
self.ynmp = ynmp
self.name = name.replace('"', '').replace("'", "")
self.rank = rank
self.color = color
self.code = "".join(x for x in self.ynmp if x.isalpha())
def __hash__(self):
return hash(self.ynmp)
def __cmp__(self, other):
return cmp(self.ynmp, other)
parties = dict((x.ynmp, x) for x in (
Pty("Conservative Party", "Conservative", 0, "dodgerblue"),
Pty("Labour Party", "Labour", 0, "firebrick1"),
Pty("Liberal Democrats", "Lib Dem", 0, "orange"),
Pty("UK Independence Party (UKIP)", "UKIP", 1, "purple"),
Pty("Green Party", "Green", 1, "green"),
Pty("British National Party", "BNP"),
Pty("Christian Party \"Proclaiming Christ's Lordship\"", "Christian"),
Pty("English Democrats", "Eng Dem"),
Pty("Ulster Unionist Party", "UUP"),
Pty("Trade Unionist and Socialist Coalition", "TUSC"),
Pty("National Health Action Party", "NHA"),
))
party_others = Pty("Others", "Others")
def get_party(ynmp_name, args):
try:
party = parties[ynmp_name]
except KeyError:
if ynmp_name == "Independent":
party = Pty("Independent", "Independent", rank=0 if args.independent else 100)
else:
party = Pty(ynmp_name, ynmp_name)
if party.rank > 5 - args.hide_small:
party = party_others
return party
def format_name(name):
return name
def name_grouping_individual(l):
return [[x] for x in l]
def name_grouping_grouped(l):
return [l]
def print_digraph(by_parties, name_grouping, args):
print "digraph {"
for party in set(n for (n, _) in by_parties.keys()) | set(n for (_, n) in by_parties.keys()):
print "%s [label=\"%s\",style=filled,fillcolor=%s];" % (party.code, party.name, party.color if args.color else "white",)
for ((old, new), full_namelist) in by_parties.items():
for namelist in name_grouping(full_namelist):
print "%s -> %s [label=\"%s\", penwidth=%d, weight=%d, fontsize=10];" % (
old.code,
new.code,
"\\n".join(format_name(name) for name in namelist) if args.names else "",
len(namelist),
len(namelist))
print "}"
def main(args):
by_parties = defaultdict(list)
for _, name, old_name, new_name in csv.reader(open("chgparty.csv")):
old = get_party(old_name, args)
new = get_party(new_name, args)
by_parties[old, new].append(name)
if args.ignore_uup:
by_parties.pop(("Conservative and Unionist Party", "Ulster Unionist Party"), None) # pop with default avoids KeyError
if args.trim_parties:
by_parties = trim_parties(args, by_parties)
if not args.no_others:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if old != "Others" and new != "Others")
if not args.others_to_others:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if old != "Others" or new != "Others")
if args.trim:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if len(namelist) > args.trim or max((old.rank, new.rank)) < args.dont_trim_large)
print_digraph(by_parties, name_grouping_individual if args.single_line else name_grouping_grouped, args)
def trim_parties(args, by_parties):
counts = defaultdict(int)
for (old, new), namelist in by_parties.items():
counts[old] += len(namelist)
counts[new] += len(namelist)
to_trim = set(k for (k, v) in counts.items() if v <= args.trim_parties)
rtn = {}
for (old, new), namelist in by_parties.items():
if old in to_trim:
old = party_others
if new in to_trim:
new = party_others
rtn.setdefault((old, new), []).extend(namelist)
return rtn
if __name__=='__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--trim", action="count", default=0, help="Hide single defections (multiple times to hide less than N defections)")
parser.add_argument("-T", "--dont-trim-large", action="count", default=0, help="Do not hide single defections to/from large political parties")
parser.add_argument("-s", "--hide-small", action="count", default=0, help="Hide small parties (multiple times to hide more parties)")
parser.add_argument("-x", "--trim-parties", action="count", default=0, help="Trim parties with few defections")
parser.add_argument("-o", "--no-others", action="store_false", default=True, help="Hide the combined \"others\" for small parties")
parser.add_argument("-2", "--others-to-others", action="store_true", default=False, help="Show defections from \"others\" to itself")
parser.add_argument("-i", "--independent", action="store_true", default=False, help="Show independent and others as different")
parser.add_argument("-1", "--single-line", action="store_true", default=False, help="Show one line per candidate")
parser.add_argument("-c", "--no-color", action="store_false", dest="color", default=True, help="No color")
parser.add_argument("-n", "--no-names", action="store_false", dest="names", default=True, help="No names")
parser.add_argument("--no-ignore-uup", action="store_false", dest="ignore_uup", default=True, help="The UUP fielded a bunch of candidates jointly with the Conservative Party, using the name \"Conservative and Unionist Party\". The candidates were really UUP people, so this transition is boring.")
args = parser.parse_args()
if args.dont_trim_large and not args.trim:
raise ValueError("You can't use -T without -t")
main(args)
| agpl-3.0 | -6,079,931,843,688,282,000 | 38.572464 | 298 | 0.680461 | false |
underyx/TheMajorNews | main.py | 1 | 2711 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import config
import requests
from requests_oauthlib import OAuth1
from base64 import b64encode
def get_access_token():
token = config.twitter_app_key + ':' + config.twitter_app_secret
h = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Authorization': b'Basic ' + b64encode(bytes(token, 'utf8'))}
print()
r = requests.post('https://api.twitter.com/oauth2/token',
data=b'grant_type=client_credentials', headers=h)
assert r.json()['token_type'] == 'bearer'
return r.json()['access_token']
def get_latest_tweet(token):
parameters = {'screen_name': 'TwoHeadlines',
'count': 1,
'trim_user': True}
headers = {'Authorization': 'Bearer ' + token}
r = requests.get('https://api.twitter.com/1.1/statuses/user_timeline.json',
params=parameters, headers=headers)
return r.json(encoding='utf8')[0]['text']
def do_translations(tweet, i=0):
i += 1
if i > config.run_limit:
return tweet
ko_parameters = {'q': tweet,
'format': 'text',
'target': 'ko',
'source': 'en',
'key': config.google_key}
ko_r = requests.get('https://www.googleapis.com/language/translate/v2',
params=ko_parameters)
ko_result = ko_r.json()['data']['translations'][0]['translatedText']
en_parameters = {'q': ko_result,
'format': 'text',
'target': 'en',
'source': 'ko',
'key': config.google_key}
en_r = requests.get('https://www.googleapis.com/language/translate/v2',
params=en_parameters)
en_result = en_r.json()['data']['translations'][0]['translatedText']
print('Translation #{} is: {}'.format(i, en_result))
return do_translations(en_result, i) if tweet != en_result else en_result
def post_tweet(tweet):
if len(tweet) > 140:
tweet = tweet[:137] + "..."
auth = OAuth1(config.twitter_app_key, config.twitter_app_secret,
config.twitter_user_key, config.twitter_user_secret)
r = requests.post('https://api.twitter.com/1.1/statuses/update.json',
auth=auth, data={'status': tweet})
return r.json()
def main():
bearer_token = get_access_token()
latest_tweet = get_latest_tweet(bearer_token)
print('Latest Original is: ' + latest_tweet)
translation = do_translations(latest_tweet)
print('Translation is: ' + translation)
post_tweet(translation)
if __name__ == '__main__':
main()
| mit | 2,091,146,900,491,236,900 | 28.467391 | 79 | 0.569532 | false |
meerkat-code/meerkat_frontend | meerkat_frontend/views/homepage.py | 1 | 5469 | """
homepage.py
A Flask Blueprint module for the homepage.
"""
from flask import Blueprint, render_template, current_app, g
from flask import request, make_response, redirect, flash, abort
from flask_babel import gettext
from meerkat_frontend import app, auth
from meerkat_frontend import common as c
from meerkat_frontend.messages import messages
from meerkat_libs import hermes
import requests
import logging
import datetime
# Register the homepage blueprint.
homepage = Blueprint('homepage', __name__, url_prefix='/<language>')
homepage_route = app.config.get("HOMEPAGE_ROUTE", "")
@homepage.route('/' + homepage_route)
def index():
# Messages to be flashed to the user from the system admins
messages.flash()
return render_template(
'homepage/index.html',
content=g.config['HOMEPAGE_CONFIG'],
)
@homepage.route('/login')
def login():
# Enable url get args.
url = request.args.get('url', '/en/technical')
error = request.args.get('error', '')
# If a mesage is specified show it.
if error:
flash(error, "error")
# Return the login page.
return render_template(
'homepage/login.html',
content=g.config['HOMEPAGE_CONFIG'],
redirect=url
)
@homepage.route('/login_request', methods=['POST'])
def login_request():
"""
Make a login request to the authentication module.
Can't do this directly from browser because of the "same-origin policy".
Browser scripts can't make cross domain POST requests.
"""
url = current_app.config['INTERNAL_AUTH_ROOT'] + "/api/login"
r = requests.post(url, json=request.json)
return (r.text, r.status_code, r.headers.items())
@homepage.route('/logout')
def logout():
"""
Logs a user out. This involves delete the current jwt stored in a cookie
and redirecting to the specified page. We delete a cookie by changing it's
expiration date to immediately. Set the page to be redirected to using url
params, eg. /logout?url=https://www.google.com
Get Args:
url (str) The url of the page to redirect to after logging out.
Returns:
A redirect response object that also sets the cookie's expiration time
to 0.
"""
url = request.args.get('url', '/')
response = make_response(redirect(url))
response.set_cookie(
current_app.config["JWT_COOKIE_NAME"],
value="",
expires=0
)
g.payload = {}
return response
@homepage.route('/account_settings', methods=['GET', 'POST'])
@auth.authorise(*app.config['AUTH'].get('settings', [['BROKEN'], ['']]))
def account_settings():
"""
Shows the account settings page.
"""
if request.method == 'GET':
current_app.logger.warning("GET called")
return render_template(
'homepage/account_settings.html',
content=g.config['TECHNICAL_CONFIG'],
week=c.api('/epi_week')
)
elif request.method == 'POST':
url = current_app.config['INTERNAL_AUTH_ROOT'] + "/api/update_user"
r = requests.post(url, json=request.json)
return (r.text, r.status_code, r.headers.items())
@homepage.route('/fault', methods=['GET', 'POST'])
@auth.authorise(*app.config['AUTH'].get('fault-report', [['BROKEN'], ['']]))
def report_fault():
"""
Enables users to directly report faults to the developer. This page
displays a fault report form and generates a fault report email from the
data it posts to the server.
"""
# If a post request is made to the url, process the form's data.
if request.method == 'POST':
# Get the data from the POST request and initialise variables.
data = request.form
now = datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")
deployment = current_app.config['DEPLOYMENT']
# Create a simple string that displays the submitted data
details = "<b>"
for key, value in data.items():
details = ''.join([
details, key.capitalize(), ':</b> ', value, '<br/><br/><b>'
])
# Send an email
# TODO: Direct github issue creation if from a personal account.
try:
hermes('/email', 'PUT', data={
'email': '[email protected]',
'subject': gettext('Fault Report') + ' | {} | {}'.format(
deployment,
data['url']
),
'message': gettext('There was a fault reported at {} in the '
'{} deployment. Here are the details...'
'\n\n{}').format(now, deployment, details)
})
except Exception as e:
logging.warning("Error sending email through hermes...")
logging.warning(e)
flash(gettext(
'Could not notify developers. Please contact them directly.'
), 'error')
abort(502)
return render_template(
'homepage/fault_report_response.html',
content=g.config['TECHNICAL_CONFIG'],
details=details.replace('\n', '<br/>')
)
# If a get request is made to the url, display the form
elif request.method == 'GET':
url = request.args.get('url', '')
return render_template(
'homepage/fault_report_form.html',
content=g.config['TECHNICAL_CONFIG'],
url=url
)
| mit | 4,114,660,971,303,595,000 | 32.552147 | 79 | 0.602121 | false |
hunse/vrep-python | dvs-play.py | 1 | 1515 | """
Play DVS events in real time
TODO: deal with looping event times for recordings > 65 s
"""
import numpy as np
import matplotlib.pyplot as plt
import dvs
def close(a, b, atol=1e-8, rtol=1e-5):
return np.abs(a - b) < atol + rtol * b
def imshow(image, ax=None):
ax = plt.gca() if ax is None else ax
ax.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None)
def add_to_image(image, events):
for x, y, s, _ in events:
image[y, x] += 1 if s else -1
def as_image(events):
image = np.zeros((128, 128), dtype=float)
add_to_image(image, events)
return image
# filename = 'dvs.npz'
filename = 'dvs-ball-10ms.npz'
events = dvs.load(filename, dt_round=True)
udiffs = np.unique(np.diff(np.unique(events['t'])))
assert np.allclose(udiffs, 0.01)
plt.figure(1)
plt.clf()
times = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
for i in range(6):
plt.subplot(2, 3, i+1)
imshow(as_image(events[close(events['t'], times[i])]))
plt.title("t = %0.3f" % times[i])
# plt.figure(1)
# plt.clf()
# image = np.zeros((128, 128), dtype=float)
# plt_image = plt.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None)
# plt.gca().invert_yaxis()
# while t0 < t_max:
# time.sleep(0.001)
# t1 = time.time() - t_world
# new_events = events[(ts > t0) & (ts < t1)]
# dt = t1 - t0
# image *= np.exp(-dt / 0.01)
# for x, y, s, _ in new_events:
# image[y, x] += 1 if s else -1
# plt_image.set_data(image)
# plt.draw()
# t0 = t1
plt.show()
| gpl-2.0 | 5,992,263,968,722,449,000 | 21.279412 | 81 | 0.59538 | false |
jasonmaier/CircularEconomyBlog | db_repository/versions/013_migration.py | 1 | 1155 | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
tasks = Table('tasks', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('priority', INTEGER, nullable=False),
Column('user_id', INTEGER),
Column('task', VARCHAR(length=140)),
)
tasks = Table('tasks', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('description', String(length=140)),
Column('priority', Integer),
Column('user_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['tasks'].columns['task'].drop()
post_meta.tables['tasks'].columns['description'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['tasks'].columns['task'].create()
post_meta.tables['tasks'].columns['description'].drop()
| bsd-3-clause | 1,869,276,016,205,127,000 | 30.216216 | 68 | 0.688312 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.